sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
marimo-team/marimo:tests/_runtime/test_runtime_datasets.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._messaging.notification import (
DataSourceConnectionsNotification,
SQLMetadata,
SQLTableListPreviewNotification,
SQLTablePreviewNotification,
ValidateSQLResultNotification,
)
from marimo._runtime.commands import (
ExecuteCellCommand,
ListDataSourceConnectionCommand,
ListSQLTablesCommand,
PreviewSQLTableCommand,
ValidateSQLCommand,
)
from marimo._sql.engines.duckdb import INTERNAL_DUCKDB_ENGINE
from marimo._sql.parse import SqlCatalogCheckResult, SqlParseResult
from marimo._types.ids import CellId_t, RequestId
from tests.conftest import MockedKernel
HAS_SQL = DependencyManager.duckdb.has() and DependencyManager.polars.has()
DUCKDB_CONN = "duckdb_conn"
SQLITE_CONN = "sqlite_conn"
@pytest.fixture
async def connection_requests() -> list[ExecuteCellCommand]:
return [
ExecuteCellCommand(cell_id=CellId_t("0"), code="import duckdb"),
ExecuteCellCommand(
cell_id=CellId_t("1"),
code=f"{DUCKDB_CONN} = duckdb.connect(':memory:')",
),
ExecuteCellCommand(cell_id=CellId_t("2"), code="import sqlite3"),
ExecuteCellCommand(
cell_id=CellId_t("3"),
code=f"{SQLITE_CONN} = sqlite3.connect(':memory:')",
),
]
# @pytest.mark.skipif(not HAS_SQL, reason="SQL deps not available")
# class TestGetSQLConnection:
# async def test_non_existent_engine(
# self, mocked_kernel: MockedKernel
# ) -> None:
# k = mocked_kernel.k
# # Non-existent engine
# k.get_sql_connection(DUCKDB_CONN)
# assert k.get_sql_connection(DUCKDB_CONN) == (None, "Engine not found")
# async def test_created_engine(
# self,
# mocked_kernel: MockedKernel,
# connection_requests: list[ExecuteCellCommand],
# ) -> None:
# k = mocked_kernel.k
# # Create duckdb and sqlite connections
# await k.run(connection_requests)
# connection, error = k.get_sql_connection("duckdb_conn")
# assert connection is not None
# assert error is None
# # Test with SQLite engine (which is a QueryEngine, but not a EngineCatalog)
# connection, error = k.get_sql_connection(SQLITE_CONN)
# assert connection is not None
# assert error is None
@pytest.mark.skipif(not HAS_SQL, reason="SQL deps not available")
class TestPreviewSQLTable:
async def test_non_existent_engine(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
preview_sql_table_request = PreviewSQLTableCommand(
request_id=RequestId("0"),
engine=DUCKDB_CONN,
database="test",
schema="test",
table_name="t1",
)
await k.handle_message(preview_sql_table_request)
preview_sql_table_results = [
op
for op in stream.operations
if isinstance(op, SQLTablePreviewNotification)
]
assert preview_sql_table_results == [
SQLTablePreviewNotification(
request_id=RequestId("0"),
table=None,
error="Engine not found",
metadata=SQLMetadata(
connection=DUCKDB_CONN, database="test", schema="test"
),
)
]
async def test_catalog_engine(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
preview_sql_table_request = PreviewSQLTableCommand(
request_id=RequestId("0"),
engine=DUCKDB_CONN,
database="test",
schema="test",
table_name="t1",
)
await k.handle_message(preview_sql_table_request)
preview_sql_table_results = [
op
for op in stream.operations
if isinstance(op, SQLTablePreviewNotification)
]
assert preview_sql_table_results == [
SQLTablePreviewNotification(
request_id=RequestId("0"),
table=None,
error=None,
metadata=SQLMetadata(
connection=DUCKDB_CONN, database="test", schema="test"
),
)
]
async def test_query_engine(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
preview_sql_table_request = PreviewSQLTableCommand(
request_id=RequestId("0"),
engine=SQLITE_CONN,
database="test",
schema="test",
table_name="t1",
)
await k.handle_message(preview_sql_table_request)
preview_sql_table_results = [
op
for op in stream.operations
if isinstance(op, SQLTablePreviewNotification)
]
assert preview_sql_table_results == [
SQLTablePreviewNotification(
request_id=RequestId("0"),
table=None,
error="Connection does not support catalog operations",
metadata=SQLMetadata(
connection=SQLITE_CONN, database="test", schema="test"
),
)
]
@pytest.mark.skipif(not HAS_SQL, reason="SQL deps not available")
class TestPreviewSQLTableList:
async def test_non_existent_engine(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
preview_sql_table_list_request = ListSQLTablesCommand(
request_id=RequestId("0"),
engine=DUCKDB_CONN,
database="test",
schema="test",
)
await k.handle_message(preview_sql_table_list_request)
preview_sql_table_list_results = [
op
for op in stream.operations
if isinstance(op, SQLTableListPreviewNotification)
]
assert preview_sql_table_list_results == [
SQLTableListPreviewNotification(
request_id=RequestId("0"),
tables=[],
error="Engine not found",
metadata=SQLMetadata(
connection=DUCKDB_CONN, database="test", schema="test"
),
)
]
async def test_catalog_engine(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
preview_sql_table_list_request = ListSQLTablesCommand(
request_id=RequestId("0"),
engine=DUCKDB_CONN,
database="test",
schema="test",
)
await k.handle_message(preview_sql_table_list_request)
preview_sql_table_list_results = [
op
for op in stream.operations
if isinstance(op, SQLTableListPreviewNotification)
]
assert preview_sql_table_list_results == [
SQLTableListPreviewNotification(
request_id=RequestId("0"),
tables=[],
error=None,
metadata=SQLMetadata(
connection=DUCKDB_CONN, database="test", schema="test"
),
)
]
async def test_query_engine(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
preview_sql_table_list_request = ListSQLTablesCommand(
request_id=RequestId("0"),
engine=SQLITE_CONN,
database="test",
schema="test",
)
await k.handle_message(preview_sql_table_list_request)
preview_sql_table_list_results = [
op
for op in stream.operations
if isinstance(op, SQLTableListPreviewNotification)
]
assert preview_sql_table_list_results == [
SQLTableListPreviewNotification(
request_id=RequestId("0"),
tables=[],
error="Connection does not support catalog operations",
metadata=SQLMetadata(
connection=SQLITE_CONN, database="test", schema="test"
),
)
]
class TestPreviewDatasourceConnection:
async def test_non_existent_engine(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
preview_datasource_connection_request = (
ListDataSourceConnectionCommand(engine=DUCKDB_CONN)
)
await k.handle_message(preview_datasource_connection_request)
preview_datasource_connection_results = [
op
for op in stream.operations
if isinstance(op, DataSourceConnectionsNotification)
]
assert preview_datasource_connection_results == []
@pytest.mark.xfail(
reason="Should have only 2 connections (duckdb and sqlite)"
)
async def test_engines(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
preview_datasource_connection_request = (
ListDataSourceConnectionCommand(engine=DUCKDB_CONN)
)
await k.handle_message(preview_datasource_connection_request)
preview_datasource_connection_results = [
op
for op in stream.operations
if isinstance(op, DataSourceConnectionsNotification)
]
assert len(preview_datasource_connection_results) == 2
@pytest.mark.skipif(not HAS_SQL, reason="SQL deps not available")
class TestSQLValidate:
async def test_non_existent_engine(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
# Non-existent engine
validate_sql_request = ValidateSQLCommand(
request_id=RequestId("0"),
engine=DUCKDB_CONN,
query="SELECT * from t1",
only_parse=False,
)
await k.handle_message(validate_sql_request)
validate_sql_results = [
op
for op in stream.operations
if isinstance(op, ValidateSQLResultNotification)
]
assert validate_sql_results == [
ValidateSQLResultNotification(
request_id=RequestId("0"),
parse_result=None,
validate_result=None,
error="Failed to get engine duckdb_conn",
)
]
async def test_internal_engine_and_valid_query(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
# Internal engine and valid query
validate_sql_request = ValidateSQLCommand(
request_id=RequestId("1"),
engine=INTERNAL_DUCKDB_ENGINE,
query="SELECT 1, 2",
only_parse=False,
)
await k.handle_message(validate_sql_request)
validate_sql_results = [
op
for op in stream.operations
if isinstance(op, ValidateSQLResultNotification)
]
assert validate_sql_results[-1] == ValidateSQLResultNotification(
request_id=RequestId("1"),
parse_result=SqlParseResult(success=True, errors=[]),
validate_result=SqlCatalogCheckResult(
success=True, error_message=None
),
error=None,
)
async def test_internal_engine_and_invalid_query(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
# Internal engine and invalid query
validate_sql_request = ValidateSQLCommand(
request_id=RequestId("2"),
engine=INTERNAL_DUCKDB_ENGINE,
query="SELECT * FROM t1",
only_parse=False,
)
await k.handle_message(validate_sql_request)
validate_sql_results = [
op
for op in stream.operations
if isinstance(op, ValidateSQLResultNotification)
]
latest_validate_sql_result = validate_sql_results[-1]
assert latest_validate_sql_result.request_id == RequestId("2")
assert latest_validate_sql_result.parse_result is not None
# query is syntactically valid
assert latest_validate_sql_result.parse_result.success is True
assert len(latest_validate_sql_result.parse_result.errors) == 0
assert latest_validate_sql_result.validate_result is not None
assert latest_validate_sql_result.validate_result.success is False
assert (
latest_validate_sql_result.validate_result.error_message
is not None
)
assert latest_validate_sql_result.error is None
stream.operations.clear()
async def test_other_engine_and_valid_query(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
# Handle other engines
await k.run(connection_requests)
validate_sql_request = ValidateSQLCommand(
request_id=RequestId("3"),
engine=SQLITE_CONN,
query="SELECT 1, 2",
only_parse=False,
)
await k.handle_message(validate_sql_request)
validate_sql_results = [
op
for op in stream.operations
if isinstance(op, ValidateSQLResultNotification)
]
assert (
validate_sql_results[-1]
== ValidateSQLResultNotification(
request_id=RequestId("3"),
parse_result=None, # Currently does not support parse errors for non-duckdb engines
validate_result=SqlCatalogCheckResult(
success=True, error_message=None
),
error=None,
)
)
async def test_only_parse_with_no_dialect(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
validate_sql_request = ValidateSQLCommand(
request_id=RequestId("4"),
engine=SQLITE_CONN,
query="SELECT 1, 2",
only_parse=True,
)
await k.handle_message(validate_sql_request)
validate_sql_results = [
op
for op in stream.operations
if isinstance(op, ValidateSQLResultNotification)
]
assert validate_sql_results[-1] == ValidateSQLResultNotification(
request_id=RequestId("4"),
parse_result=None,
validate_result=None,
error="Dialect is required when only parsing",
)
async def test_only_parse_unsupported_dialect(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
validate_sql_request = ValidateSQLCommand(
request_id=RequestId("5"),
dialect="sqlite",
query="SELECT 1, 2",
only_parse=True,
)
await k.handle_message(validate_sql_request)
validate_sql_results = [
op
for op in stream.operations
if isinstance(op, ValidateSQLResultNotification)
]
assert validate_sql_results[-1] == ValidateSQLResultNotification(
request_id=RequestId("5"),
parse_result=None,
validate_result=None,
error="Unsupported dialect: sqlite",
)
async def test_only_parse_duckdb(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
validate_sql_request = ValidateSQLCommand(
request_id=RequestId("6"),
dialect="duckdb",
query="SELECT 1, 2",
only_parse=True,
)
await k.handle_message(validate_sql_request)
validate_sql_results = [
op
for op in stream.operations
if isinstance(op, ValidateSQLResultNotification)
]
assert validate_sql_results[-1] == ValidateSQLResultNotification(
request_id=RequestId("6"),
parse_result=SqlParseResult(success=True, errors=[]),
validate_result=None,
error=None,
)
async def test_validate_but_no_engine(
self,
mocked_kernel: MockedKernel,
connection_requests: list[ExecuteCellCommand],
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(connection_requests)
validate_sql_request = ValidateSQLCommand(
request_id=RequestId("7"),
query="SELECT 1, 2",
only_parse=False,
)
await k.handle_message(validate_sql_request)
validate_sql_results = [
op
for op in stream.operations
if isinstance(op, ValidateSQLResultNotification)
]
assert validate_sql_results[-1] == ValidateSQLResultNotification(
request_id=RequestId("7"),
parse_result=None,
validate_result=None,
error="Engine is required for validating catalog",
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/test_runtime_datasets.py",
"license": "Apache License 2.0",
"lines": 502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_sql/test_sql_parse.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from textwrap import dedent
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._sql.parse import (
SqlParseError,
SqlParseResult,
format_query_with_globals,
parse_sql,
replace_brackets_with_quotes,
)
HAS_DUCKDB = DependencyManager.duckdb.has()
def test_sql_parse_result_with_errors():
"""Test SqlParseResult with errors."""
error = SqlParseError(
message="Syntax error", line=1, column=5, severity="error"
)
result = SqlParseResult(success=False, errors=[error])
assert result.success is False
assert len(result.errors) == 1
assert result.errors[0].message == "Syntax error"
class TestUnsupportedDialects:
"""Test handling of unsupported SQL dialects."""
@pytest.mark.parametrize(
"dialect",
[
"postgresql",
"mysql",
"sqlite",
"oracle",
"sqlserver",
"bigquery",
"snowflake",
"redshift",
"",
"unknown_dialect",
],
)
def test_unsupported_dialects_return_none(self, dialect: str):
"""Test that unsupported dialects return none."""
result, error = parse_sql("SELECT * FROM table", dialect)
assert result is None
assert error == "Unsupported dialect: " + dialect
def test_dialect_with_whitespace(self):
"""Test dialects with leading/trailing whitespace."""
result, error = parse_sql("SELECT 1", " postgresql ")
assert result is None
assert error == "Unsupported dialect: postgresql"
@pytest.mark.requires("duckdb")
def test_duckdb_dialect_with_whitespace(self):
"""Test duckdb dialect with leading/trailing whitespace."""
result, error = parse_sql("SELECT 1", " duckdb ")
assert result is not None
assert error is None
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
class TestDuckDBValidQueries:
"""Test parsing of valid SQL queries with DuckDB."""
@pytest.mark.parametrize(
"query",
[
"SELECT 1",
"SELECT 1 as value",
"SELECT 'hello' as greeting",
"SELECT 1, 2, 3",
"SELECT 1 as a, 2 as b, 3 as c",
"SELECT 1 UNION SELECT 2",
"SELECT CASE WHEN 1 > 0 THEN 'positive' ELSE 'zero or negative' END",
"SELECT COUNT(*)",
"SELECT SUM(1), AVG(1), MAX(1), MIN(1)",
"SELECT 1 ORDER BY 1",
"SELECT 1 LIMIT 10",
],
)
def test_valid_queries_return_success(self, query: str):
"""Test that valid SQL queries return successful parse results."""
result, error = parse_sql(query, "duckdb")
assert result is not None
assert error is None
assert isinstance(result, SqlParseResult)
assert result.success is True
assert result.errors == []
@pytest.mark.parametrize(
"dialect",
[
"duckdb",
"DUCKDB",
"DuckDB",
" duckdb ",
],
)
def test_duckdb_dialect_variations(self, dialect: str):
"""Test that various DuckDB dialect strings work."""
result, error = parse_sql("SELECT * FROM", dialect)
assert result is not None
assert error is None
assert result.success is False
def test_multiline_valid_query(self):
"""Test parsing multiline valid queries."""
query = """
SELECT
1 as id,
'test' as name,
true as active
ORDER BY id DESC
"""
result, error = parse_sql(query, "duckdb")
assert result is not None
assert error is None
assert result.success is True
assert result.errors == []
def test_query_with_comments(self):
"""Test parsing queries with SQL comments."""
query = """
-- This is a comment
SELECT 1 as test_column; -- End line comment
"""
result, error = parse_sql(query, "duckdb")
assert result is not None
assert error is None
assert result.success is True
assert result.errors == []
def test_complex_valid_query(self):
"""Test parsing a complex but valid query."""
query = """
WITH summary AS (
SELECT
1 as id,
100 as total_quantity,
50.5 as avg_price
)
SELECT
id,
total_quantity,
avg_price,
CASE
WHEN total_quantity > 100 THEN 'High Volume'
WHEN total_quantity > 50 THEN 'Medium Volume'
ELSE 'Low Volume'
END as volume_category
FROM summary
ORDER BY total_quantity DESC
"""
result, error = parse_sql(query, "duckdb")
assert result is not None
assert error is None
assert result.success is True
assert result.errors == []
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
class TestDuckDBInvalidQueries:
"""Test parsing of invalid SQL queries with DuckDB."""
@pytest.mark.parametrize(
("query", "expected_error_keywords"),
[
("SELECT * FRM table", ["syntax", "error"]),
("SELECT (", ["syntax", "error"]),
("SELECT * FROM", ["syntax", "error"]),
("SELECT ,", ["syntax", "error"]),
("SELEC * FROM table", ["syntax", "error"]),
("SELECT * FROM table WHERE", ["syntax", "error"]),
("SELECT * FROM table ORDER", ["syntax", "error"]),
("INSERT INTO", ["syntax", "error"]),
("UPDATE", ["syntax", "error"]),
("DELETE", ["syntax", "error"]),
# Unclosed parentheses
("SELECT COUNT(*", ["syntax", "error"]),
# Unclosed quotes
# ("SELECT 'unclosed string", ["syntax", "error"]), # DuckDB does not raise errors for this case
# # Invalid function call
("SELECT COUNT(DISTINCT)", ["syntax", "error"]),
# # Malformed CASE statement
("SELECT CASE WHEN FROM table", ["syntax", "error"]),
# # Incomplete JOIN
("SELECT * FROM table1 JOIN", ["syntax", "error"]),
],
)
def test_invalid_queries_return_errors(
self, query: str, expected_error_keywords: list[str]
):
"""Test that invalid SQL queries return error results."""
result, error = parse_sql(query, "duckdb")
assert isinstance(result, SqlParseResult)
assert result.success is False
assert len(result.errors) > 0
error = result.errors[0]
assert isinstance(error, SqlParseError)
assert error.severity == "error"
assert error.line > 0
assert error.column >= 0
# Check that error message contains expected keywords
error_msg_lower = error.message.lower()
assert any(
keyword in error_msg_lower for keyword in expected_error_keywords
)
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
class TestErrorPositionCalculation:
"""Test that error positions (line and column) are calculated correctly."""
def assert_line_column(
self,
result: SqlParseResult | None,
parse_error: str | None,
expected_line: int,
expected_column: int,
):
assert result is not None
assert parse_error is None
assert result.success is False
assert len(result.errors) == 1
error = result.errors[0]
assert error.line == expected_line
assert error.column == expected_column
def test_single_line_error_position(self):
"""Test position calculation for single-line queries."""
query = "SELECT * FRM table" # Error at position of "FRM"
result, error = parse_sql(query, "duckdb")
self.assert_line_column(result, error, 1, 13)
@pytest.mark.xfail(reason="DuckDB does not raise errors for this case")
def test_multiline_error_position(self):
"""Test position calculation for multiline queries."""
query = """SELECT name,
email,
FRM users""" # Error on line 3
result, error = parse_sql(query, "duckdb")
self.assert_line_column(result, error, 3, 0)
def test_error_at_beginning_of_line(self):
"""Test position calculation when error is at beginning of line."""
query = """SELECT *
FRM table"""
result, error = parse_sql(query, "duckdb")
self.assert_line_column(result, error, 2, 4)
def test_error_with_leading_whitespace(self):
"""Test position calculation with leading whitespace."""
query = """ SELECT *
FRM table"""
result, error = parse_sql(query, "duckdb")
assert result is not None
assert result.success is False
assert len(result.errors) == 1
error = result.errors[0]
assert error.line == 2
@pytest.mark.xfail(
reason="DuckDB does not raise errors for invalid syntax"
)
def test_error_position_after_newlines(self):
"""Test position calculation with multiple newlines."""
query = """
SELECT name
FROM users
WHERE invalid_syntax"""
result, error = parse_sql(query, "duckdb")
assert result is not None
assert error is None
assert result.success is False
assert len(result.errors) == 1
error = result.errors[0]
assert error.line == 3
assert error.column >= 0
def test_error_position_after_offset(self):
"""Test position calculation with offset."""
query = """SELECT id FRM users"""
result, error = parse_sql(query, "duckdb")
expected_line = 1
expected_column = 14
self.assert_line_column(result, error, expected_line, expected_column)
query_with_brackets = "SELECT {id} FRM users"
result, error = parse_sql(query_with_brackets, "duckdb")
# Add 2 for the brackets
self.assert_line_column(
result, error, expected_line, expected_column + 2
)
# Multiple brackets
query_with_multiple_vars = "SELECT id, name FRM users"
result, error = parse_sql(query_with_multiple_vars, "duckdb")
expected_line = 1
expected_column = 20
self.assert_line_column(result, error, expected_line, expected_column)
query_multiple_vars_brackets = "SELECT {id}, {name} FRM users"
result, error = parse_sql(query_multiple_vars_brackets, "duckdb")
self.assert_line_column(
result,
error,
expected_line,
expected_column + (2 * 2), # 2 for each bracket
)
def test_offset_after_position(self):
query = "SELECT * FRM users WHERE id = id"
result, error = parse_sql(query, "duckdb")
expected_column = 13
expected_line = 1
self.assert_line_column(result, error, expected_line, expected_column)
query_with_brackets = "SELECT * FRM users WHERE id = {id}"
result, error = parse_sql(query_with_brackets, "duckdb")
# No change since the error is before the brackets
self.assert_line_column(result, error, expected_line, expected_column)
# Multiple variables with brackets
query_multiple_vars_brackets = (
"SELECT * FRM users WHERE id = id and name = name"
)
result, error = parse_sql(query_multiple_vars_brackets, "duckdb")
self.assert_line_column(result, error, expected_line, expected_column)
query_multiple_vars_brackets_with_brackets = (
"SELECT * FRM users WHERE id = {id} and name = {name}"
)
result, error = parse_sql(
query_multiple_vars_brackets_with_brackets, "duckdb"
)
self.assert_line_column(result, error, expected_line, expected_column)
def test_mixed_position_brackets(self):
query = "SELECT id FRM users WHERE name = name"
result, error = parse_sql(query, "duckdb")
expected_line = 1
expected_column = 14
self.assert_line_column(result, error, expected_line, expected_column)
query_with_brackets = "SELECT {id} FRM users WHERE name = {name}"
result, error = parse_sql(query_with_brackets, "duckdb")
self.assert_line_column(
result,
error,
expected_line,
# Only accounts for brackets before the errors
expected_column + 2,
)
def test_multiline_brackets_before_error(self):
query = """SELECT id
FRM users"""
result, error = parse_sql(query, "duckdb")
expected_line = 2
expected_column = 4
self.assert_line_column(result, error, expected_line, expected_column)
query_with_brackets = """SELECT {id}
FRM users"""
result, error = parse_sql(query_with_brackets, "duckdb")
self.assert_line_column(result, error, expected_line, expected_column)
@pytest.mark.xfail(
reason="There is an incorrect calculation for column position"
)
def test_brackets_on_error_line(self):
# Brackets on error line
query_error_line = """SELECT name,
id FRM users"""
result, error = parse_sql(query_error_line, "duckdb")
expected_line = 2
expected_column = 7
self.assert_line_column(result, error, expected_line, expected_column)
query_error_line_with_brackets = """SELECT {name},
{id} FRM users"""
result, error = parse_sql(query_error_line_with_brackets, "duckdb")
self.assert_line_column(
result, error, expected_line, expected_column + 2
)
def test_multiline_brackets_after_error(self):
query = """SELECT *
FRM users WHERE name = name"""
result, error = parse_sql(query, "duckdb")
expected_line = 2
expected_column = 4
self.assert_line_column(result, error, expected_line, expected_column)
query_with_brackets = """SELECT *
FRM users WHERE name = {name}"""
result, error = parse_sql(query_with_brackets, "duckdb")
# No change since the error is after the brackets
self.assert_line_column(result, error, expected_line, expected_column)
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
class TestEdgeCases:
"""Test edge cases and boundary conditions."""
def test_empty_query(self):
"""Test parsing empty query."""
result, error = parse_sql("", "duckdb")
assert result is not None
assert error is None
assert isinstance(result, SqlParseResult)
assert result.success is True
def test_whitespace_only_query(self):
"""Test parsing query with only whitespace."""
result, error = parse_sql(" \n \t ", "duckdb")
assert result is not None
assert error is None
assert isinstance(result, SqlParseResult)
assert result.success is True
assert result.errors == []
def test_query_with_semicolon(self):
"""Test parsing query with trailing semicolon."""
result, error = parse_sql("SELECT 1;", "duckdb")
assert result is not None
assert result.success is True
assert result.errors == []
def test_multiple_statements(self):
"""Test parsing multiple SQL statements."""
query = "SELECT 1; SELECT 2;"
result, error = parse_sql(query, "duckdb")
# DuckDB might handle this differently, just ensure we get a result
assert isinstance(result, SqlParseResult)
def test_very_long_query(self):
"""Test parsing a very long query."""
# Create a query with many columns
columns = ", ".join([f"{i} as col_{i}" for i in range(100)])
query = f"SELECT {columns}"
result, error = parse_sql(query, "duckdb")
assert error is None
assert result is not None
assert result.success is True
assert result.errors == []
def test_query_with_unicode(self):
"""Test parsing query with Unicode characters."""
query = "SELECT 'Hello 世界' as greeting"
result, error = parse_sql(query, "duckdb")
assert error is None
assert result is not None
assert result.success is True
assert result.errors == []
def test_query_with_special_characters(self):
"""Test parsing query with various special characters."""
query = "SELECT 'test@#$%^&*()' as special_chars"
result, error = parse_sql(query, "duckdb")
assert error is None
assert result is not None
assert result.success is True
assert result.errors == []
def test_deeply_nested_query(self):
"""Test parsing deeply nested subqueries."""
query = """
SELECT * FROM (
SELECT * FROM (
SELECT * FROM (
SELECT 1 as nested_value
) inner_query
) middle_query
) outer_query
"""
result, error = parse_sql(query, "duckdb")
assert error is None
assert result is not None
assert result.success is True
assert result.errors == []
def test_just_comments(self):
"""Test parsing query with just comments."""
for query in [
"""
-- This is a comment
/* This is a comment */
""",
"""-- SELECT 1""",
"""/* SELECT 1 */""",
"""
-- This is a comment
SELECT 1 as test_column; -- End line comment
""",
"""
/* This is a comment */
SELECT 1 as test_column; /* End line comment */
""",
]:
result, error = parse_sql(query, "duckdb")
assert result is not None
assert result.success is True
assert result.errors == []
@pytest.mark.skipif(HAS_DUCKDB, reason="DuckDB is installed")
def test_fails_gracefully_no_duckdb():
"""Test that the function fails gracefully if DuckDB is not installed."""
result, error = parse_sql("SELECT 1", "duckdb")
assert result is None
assert error is not None
class TestReplaceBracketsWithQuotes:
"""Test the replace_brackets_with_quotes function."""
def test_basic_replacement(self):
"""Test basic bracket replacement."""
sql = "SELECT {id} FROM users"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == "SELECT '{id}' FROM users"
assert offset_record == {7: 2}
def test_already_quoted_single(self):
"""Test that already single-quoted brackets are not modified."""
sql = "SELECT {id}, '{name}' FROM users"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == "SELECT '{id}', '{name}' FROM users"
assert offset_record == {7: 2}
def test_already_quoted_double(self):
"""Test that already double-quoted brackets are not modified."""
sql = 'SELECT {id}, "{name}" FROM users'
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == "SELECT '{id}', \"{name}\" FROM users"
assert offset_record == {7: 2}
def test_multiple_brackets(self):
"""Test multiple unquoted brackets."""
sql = "SELECT {id}, {name}, {age} FROM users"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == "SELECT '{id}', '{name}', '{age}' FROM users"
assert offset_record == {7: 2, 13: 2, 21: 2}
def test_mixed_quoted_and_unquoted(self):
"""Test mix of quoted and unquoted brackets."""
sql = "SELECT {id}, '{name}', {age}, \"{city}\" FROM users"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert (
result_sql
== "SELECT '{id}', '{name}', '{age}', \"{city}\" FROM users"
)
assert offset_record == {7: 2, 23: 2}
def test_no_brackets(self):
"""Test SQL with no brackets."""
sql = "SELECT id, name FROM users"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == "SELECT id, name FROM users"
assert offset_record == {}
def test_empty_brackets(self):
"""Test SQL with empty brackets."""
sql = "SELECT {} FROM users"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == "SELECT '{}' FROM users"
assert offset_record == {7: 2}
def test_multiple_bracket_in_quotes(self):
sql = "SELECT '{id} {name}' FROM users"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == "SELECT '{id} {name}' FROM users"
assert offset_record == {}
def test_escaped_quotes_in_strings(self):
"""Test that escaped quotes in strings are handled correctly."""
sql = "SELECT 'O\\'Brien', {id} FROM users"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == "SELECT 'O\\'Brien', '{id}' FROM users"
assert offset_record == {19: 2}
def test_complex_nested_quotes(self):
"""Test complex nested quote scenarios."""
sql = "SELECT '{id}', \"{name}\", {status} FROM users WHERE name = 'John\\'s'"
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert (
result_sql
== "SELECT '{id}', \"{name}\", '{status}' FROM users WHERE name = 'John\\'s'"
)
assert offset_record == {25: 2}
def test_multiline(self):
"""Test multiline query."""
sql = """
SELECT
{id}, {name}, {age}
FROM users
WHERE name = 'John\\'s'
"""
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert (
result_sql
== """
SELECT
'{id}', '{name}', '{age}'
FROM users
WHERE name = 'John\\'s'
"""
)
assert offset_record == {24: 2, 30: 2, 38: 2}
sql = """SELECT
{id}
FROM users"""
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert (
result_sql
== """SELECT
'{id}'
FROM users"""
)
assert offset_record == {7: 2}
sql = dedent("""
SELECT
{id}
FROM users
""")
result_sql, offset_record = replace_brackets_with_quotes(sql)
assert result_sql == dedent("""
SELECT
'{id}'
FROM users
""")
assert offset_record == {12: 2}
def test_insert_json(self):
query = "INSERT INTO users VALUES (1, '{\"id\": 1}')"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "INSERT INTO users VALUES (1, '{\"id\": 1}')"
assert offset_record == {}
def test_brackets_inside_quotes(self):
# Brackets inside single quotes should not be replaced
query = "SELECT '{id}' FROM users"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "SELECT '{id}' FROM users"
assert offset_record == {}
# Brackets inside double quotes should not be replaced
query = 'SELECT "{id}" FROM users'
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == 'SELECT "{id}" FROM users'
assert offset_record == {}
def test_multiple_brackets_on_same_line(self):
query = "SELECT {id}, {name}, {age} FROM users"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "SELECT '{id}', '{name}', '{age}' FROM users"
assert offset_record == {7: 2, 13: 2, 21: 2}
@pytest.mark.xfail(reason="Nested brackets are not supported")
def test_nested_brackets(self):
query = "SELECT {id_{nested}} FROM users"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "SELECT '{id_{nested}}' FROM users"
assert offset_record == {7: 2}
def test_brackets_with_escaped_quotes(self):
# Brackets inside a quoted string with escaped quotes
query = "SELECT '{id}\\'s' FROM users"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "SELECT '{id}\\'s' FROM users"
assert offset_record == {}
def test_brackets_at_start_and_end(self):
query = "{id} FROM users WHERE name = {name}"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "'{id}' FROM users WHERE name = '{name}'"
assert offset_record == {0: 2, 29: 2}
def test_brackets_with_special_characters(self):
query = "SELECT {id_1$-foo} FROM users"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "SELECT '{id_1$-foo}' FROM users"
assert offset_record == {7: 2}
def test_brackets_in_comment(self):
# Brackets in SQL comments should be replaced, as comments are not parsed
query = "SELECT id -- {comment}\nFROM users"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "SELECT id -- '{comment}'\nFROM users"
assert offset_record == {13: 2}
def test_adjacent_brackets(self):
query = "SELECT {id}{name}{age} FROM users"
result_sql, offset_record = replace_brackets_with_quotes(query)
assert result_sql == "SELECT '{id}''{name}''{age}' FROM users"
assert offset_record == {7: 2, 11: 2, 17: 2}
class TestFormatQueryWithGlobals:
"""Test the format_query_with_globals function."""
def test_basic_substitution(self):
"""Test basic variable substitution."""
query = "SELECT {column} FROM users"
result = format_query_with_globals(query, {"column": "id"})
assert result == "SELECT id FROM users"
def test_multiple_substitutions(self):
"""Test multiple variable substitutions."""
query = "SELECT {col1}, {col2} FROM {table}"
result = format_query_with_globals(
query, {"col1": "id", "col2": "name", "table": "users"}
)
assert result == "SELECT id, name FROM users"
def test_missing_variable_quoted(self):
"""Test that missing variables are returned with quotes."""
query = "SELECT {missing} FROM users"
result = format_query_with_globals(query, {})
assert result == "SELECT 'missing' FROM users"
def test_mixed_present_and_missing(self):
"""Test mix of present and missing variables."""
query = "SELECT {present}, {missing} FROM users"
result = format_query_with_globals(query, {"present": "id"})
assert result == "SELECT id, 'missing' FROM users"
def test_no_brackets_returns_unchanged(self):
"""Test query without brackets returns unchanged."""
query = "SELECT * FROM users"
result = format_query_with_globals(query, {"unused": "value"})
assert result == "SELECT * FROM users"
def test_empty_query(self):
"""Test empty query."""
result = format_query_with_globals("", {})
assert result == ""
def test_empty_globals(self):
"""Test with empty globals dict - missing keys get quoted."""
query = "SELECT {var} FROM users"
result = format_query_with_globals(query, {})
assert result == "SELECT 'var' FROM users"
def test_quoted_brackets_ignored(self):
"""Test that brackets inside single quotes are NOT substituted."""
query = "SELECT '{id}' FROM users"
result = format_query_with_globals(query, {"id": "REPLACED"})
assert result == "SELECT '{id}' FROM users"
def test_double_quoted_brackets_substituted(self):
"""Test that brackets inside double quotes ARE substituted (identifiers)."""
query = 'SELECT "{col}" FROM users'
result = format_query_with_globals(query, {"col": "my_column"})
assert result == 'SELECT "my_column" FROM users'
def test_double_quoted_missing_key(self):
"""Test that missing keys in double quotes get quoted."""
query = 'SELECT "{col}" FROM users'
result = format_query_with_globals(query, {})
assert result == "SELECT \"'col'\" FROM users"
def test_mixed_quoted_and_unquoted(self):
"""Test mix of quoted and unquoted brackets."""
query = "SELECT '{literal}', {variable} FROM users"
result = format_query_with_globals(query, {"variable": "id"})
assert result == "SELECT '{literal}', id FROM users"
def test_numeric_value(self):
"""Test numeric values in globals."""
query = "SELECT * FROM users WHERE id = {id}"
result = format_query_with_globals(query, {"id": 42})
assert result == "SELECT * FROM users WHERE id = 42"
def test_string_value_not_auto_quoted(self):
"""Test that string values are inserted as-is (caller handles quoting)."""
query = "SELECT * FROM users WHERE name = {name}"
result = format_query_with_globals(query, {"name": "'Alice'"})
assert result == "SELECT * FROM users WHERE name = 'Alice'"
def test_escaped_quotes_in_strings(self):
"""Test brackets near escaped quotes."""
query = "SELECT 'O\\'Brien', {id} FROM users"
result = format_query_with_globals(query, {"id": "123"})
assert result == "SELECT 'O\\'Brien', 123 FROM users"
def test_json_in_quoted_string(self):
"""Test JSON-like content in quoted strings is preserved."""
query = "INSERT INTO t VALUES ('{\"id\": 1}', {value})"
result = format_query_with_globals(query, {"value": "42"})
assert result == "INSERT INTO t VALUES ('{\"id\": 1}', 42)"
def test_empty_brackets(self):
"""Test empty bracket expression."""
query = "SELECT {} FROM users"
result = format_query_with_globals(query, {"": "value"})
assert result == "SELECT value FROM users"
def test_empty_brackets_missing_key(self):
"""Test empty bracket expression with no matching key."""
query = "SELECT {} FROM users"
result = format_query_with_globals(query, {})
assert result == "SELECT '' FROM users"
def test_adjacent_brackets(self):
"""Test adjacent bracket expressions."""
query = "SELECT {a}{b} FROM users"
result = format_query_with_globals(query, {"a": "1", "b": "2"})
assert result == "SELECT 12 FROM users"
def test_multiline_query(self):
"""Test multiline query."""
query = dedent("""
SELECT {col}
FROM {table}
WHERE id = {id}
""")
result = format_query_with_globals(
query, {"col": "name", "table": "users", "id": "1"}
)
expected = dedent("""
SELECT name
FROM users
WHERE id = 1
""")
assert result == expected
def test_missing_key_handler(self):
"""Test missing key handler."""
query = "SELECT {col} FROM {table}"
result_double_quote = format_query_with_globals(
query, {"col": "id"}, lambda key: f'"{key}"'
)
assert result_double_quote == 'SELECT id FROM "table"'
result_with_uppercase = format_query_with_globals(
query, {"col": "id"}, lambda key: key.upper()
)
assert result_with_uppercase == "SELECT id FROM TABLE"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_sql/test_sql_parse.py",
"license": "Apache License 2.0",
"lines": 745,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/anywidget_smoke_tests/uchimata_example.py | # /// script
# requires-python = ">=3.12"
# dependencies = [
# "anywidget==0.9.18",
# "numpy==2.3.3",
# "polars==1.33.1",
# "traitlets==5.14.3",
# "uchimata==0.3.0",
# ]
# ///
import marimo
__generated_with = "0.19.7"
app = marimo.App(width="medium")
@app.cell
def _():
import anywidget
import traitlets
class Widget(anywidget.AnyWidget):
_esm = """
export default {
render({ model, el }) {
const dataView = model.get("data");
const bytes = new Uint8Array(dataView.buffer)
const decoded = new TextDecoder().decode(bytes);
el.innerText = decoded;
}
}
"""
data = traitlets.Any().tag(sync=True)
# Should display "hello"
Widget(data=b"hello")
return
@app.cell
def _():
import uchimata as uchi
import numpy as np
BINS_NUM = 1000
# Step 1: Generate random structure, returns a 2D numpy array:
def make_random_3D_chromatin_structure(n):
position = np.array([0.0, 0.0, 0.0])
positions = [position.copy()]
for _ in range(n):
step = np.random.choice(
[-1.0, 0.0, 1.0], size=3
) # Randomly choose to move left, right, up, down, forward, or backward
position += step
positions.append(position.copy())
return np.array(positions)
random_structure = make_random_3D_chromatin_structure(BINS_NUM)
# Step 2: Display the structure in an uchimata widget
numbers = list(range(0, BINS_NUM + 1))
vc = {
"color": {
"values": numbers,
"min": 0,
"max": BINS_NUM,
"colorScale": "Spectral",
},
"scale": 0.01,
"links": True,
"mark": "sphere",
}
uchi.Widget(random_structure, vc)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/anywidget_smoke_tests/uchimata_example.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_output/md_extensions/breakless_lists.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from markdown import ( # type: ignore
Extension,
Markdown,
preprocessors,
treeprocessors,
)
if TYPE_CHECKING:
from xml.etree.ElementTree import Element
class BreaklessListsPreprocessor(preprocessors.Preprocessor): # type: ignore[misc]
"""
Enables CommonMark-style list interruption of paragraphs.
In CommonMark, lists can interrupt paragraphs without requiring a blank line.
Python-Markdown requires blank lines, so this preprocessor adds them automatically
when it detects a list immediately following a paragraph.
"""
# Pattern to match lines that start list items (ordered or unordered)
LIST_START_PATTERN = re.compile(r"^(\s*)([*+-]|\d+\.)(\s+)", re.MULTILINE)
def __init__(self, md: Markdown) -> None:
super().__init__(md)
def run(self, lines: list[str]) -> list[str]:
"""Process the lines and insert blank lines before lists that follow paragraphs."""
if not lines:
return lines
result_lines: list[str] = []
i = 0
while i < len(lines):
current_line = lines[i]
result_lines.append(current_line)
# Check if we need to look ahead for a list
if i + 1 < len(lines):
next_line = lines[i + 1]
# If current line is not empty and next line starts a list
if (
current_line.strip() # Current line has content
and self.LIST_START_PATTERN.match(next_line)
): # Next line starts a list
# Check if there's already a blank line
if current_line.strip():
# Insert blank line to enable list interruption
result_lines.append("")
i += 1
return result_lines
class BreaklessListsTreeProcessor(treeprocessors.Treeprocessor): # type: ignore[misc]
"""
Removes paragraph tags from list items to create compact lists.
This makes lists more compact by removing <p> tags within <li> elements.
"""
def run(self, root: Element) -> None:
def is_only_child(parent: Element, child: Element) -> bool:
return len(parent) == 1 and parent[0] is child
for element in root.iter(tag="li"):
for p in element.findall(".//p"):
# If paragraph has no attributes and is the only child
if not p.attrib and is_only_child(element, p):
# Swap the paragraph with the list item
element.text = p.text
element.tail = p.tail
# Copy over the children
for child in p:
element.append(child)
# Remove the paragraph tag
element.remove(p)
class BreaklessListsExtension(Extension): # type: ignore[misc]
"""
Extension to enable CommonMark-style list interruption of paragraphs.
This allows lists to follow paragraphs without requiring blank lines,
matching CommonMark specification behavior. Also makes lists compact
by removing paragraph tags within list items.
"""
def extendMarkdown(self, md: Markdown) -> None:
# Register preprocessor to enable list interruption
md.preprocessors.register(
BreaklessListsPreprocessor(md),
"breakless_lists_preproc",
# Run early in preprocessing, before other processors
30,
)
# Register tree processor to make lists compact
md.treeprocessors.register(
BreaklessListsTreeProcessor(md),
"breakless_lists_tree",
# Run after lists are parsed but before paragraph cleanup
10,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_output/md_extensions/breakless_lists.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_output/md_extensions/flexible_indent.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import re
from markdown import Extension, Markdown, preprocessors # type: ignore
class FlexibleIndentPreprocessor(preprocessors.Preprocessor): # type: ignore[misc]
"""
Preprocessor to standardize list indentation to specific levels.
Normalizes inconsistent indentation to match the allowed levels.
"""
# Pattern to match lines that start list items (ordered or unordered)
# Captures: (indentation, list_marker, trailing_space, content)
LIST_PATTERN = re.compile(r"^(\s*)([*+-]|\d+\.)(\s+)(.*)$", re.MULTILINE)
INDENT_LEVELS = [2, 4]
BASE_INDENT_SIZE = 4
FOUR_SPACES = " "
def __init__(self, md: Markdown) -> None:
super().__init__(md)
def _detect_base_indent(self, lines: list[str]) -> int:
"""
Detect the base indentation level used in the document.
Returns 2 for 2-space indentation or 4 for 4-space indentation.
"""
indents: list[int] = []
for line in lines:
match = self.LIST_PATTERN.match(line)
if match:
indent_str = match.group(1)
if indent_str: # Skip non-indented items
indent_count = len(
indent_str.replace("\t", self.FOUR_SPACES)
)
indents.append(indent_count)
if not indents:
return self.BASE_INDENT_SIZE
# Find the smallest non-zero indent - this is likely our base level
min_indent = min(indents)
# Choose the closest allowed indent level
if min_indent <= 2:
return 2
else:
return self.BASE_INDENT_SIZE
def _normalize_indentation(self, indent_str: str, base_level: int) -> str:
"""
Normalize indentation to consistent 2-space increments.
This ensures that both 2-space and 4-space indentation patterns
result in the same normalized output.
Args:
indent_str: The original indentation string
base_level: The detected base indentation level (2 or 4)
Returns:
Normalized indentation string using 2-space increments
"""
# Convert tabs to spaces (assuming 1 tab = 4 spaces)
normalized = indent_str.replace("\t", self.FOUR_SPACES)
indent_count = len(normalized)
if indent_count == 0:
return ""
# Calculate the intended nesting level based on the base level
nesting_level = max(1, round(indent_count / base_level))
# Always output using 4-space increments since that is what the markdown spec requires
return " " * (4 * nesting_level)
def _get_list_depth(self, indent_str: str, base_level: int = 2) -> int:
"""Calculate the nesting depth of a list item."""
normalized = indent_str.replace("\t", self.FOUR_SPACES)
indent_count = len(normalized)
if indent_count == 0:
return 0
# Calculate depth based on the base level
return max(1, round(indent_count / base_level))
def run(self, lines: list[str]) -> list[str]:
"""Process the lines and normalize list indentation."""
if not lines:
return lines
# Detect the base indentation level used in this document
base_level = self._detect_base_indent(lines)
result_lines: list[str] = []
for line in lines:
match = self.LIST_PATTERN.match(line)
if match:
indent, marker, space, content = match.groups()
# Normalize the indentation based on detected base level
normalized_indent = self._normalize_indentation(
indent, base_level
)
# Reconstruct the line with normalized indentation
normalized_line = (
f"{normalized_indent}{marker}{space}{content}"
)
result_lines.append(normalized_line)
else:
result_lines.append(line)
return result_lines
class FlexibleIndentExtension(Extension): # type: ignore[misc]
"""
Extension to provide flexible list indentation support.
"""
def extendMarkdown(self, md: Markdown) -> None:
"""Add the preprocessor to the markdown instance."""
# Register preprocessor to normalize indentation
md.preprocessors.register(
FlexibleIndentPreprocessor(md),
"flexible_indent",
# Run early, before breakless_lists and other list processing
35,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_output/md_extensions/flexible_indent.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/api/endpoints/sql.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from starlette.authentication import requires
from marimo._server.api.utils import dispatch_control_request
from marimo._server.models.models import BaseResponse, ValidateSQLRequest
from marimo._server.router import APIRouter
if TYPE_CHECKING:
from starlette.requests import Request
# Router for SQL endpoints
router = APIRouter()
@router.post("/validate")
@requires("edit")
async def validate_sql(request: Request) -> BaseResponse:
"""
parameters:
- in: header
name: Marimo-Session-Id
schema:
type: string
required: true
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/ValidateSQLRequest"
responses:
200:
description: Validate an SQL query
content:
application/json:
schema:
$ref: "#/components/schemas/SuccessResponse"
"""
return await dispatch_control_request(request, ValidateSQLRequest)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/sql.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:tests/_server/api/endpoints/test_sql_endpoints.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from tests._server.mocks import token_header, with_read_session, with_session
if TYPE_CHECKING:
from starlette.testclient import TestClient
SESSION_ID = "session-123"
HEADERS = {
"Marimo-Session-Id": SESSION_ID,
**token_header("fake-token"),
}
@with_session(SESSION_ID)
def test_validate_sql(client: TestClient) -> None:
response = client.post(
"/api/sql/validate",
headers=HEADERS,
json={
"requestId": "test_request_id",
"engine": "test_engine",
"query": "SELECT * FROM test",
"onlyParse": False,
},
)
assert response.status_code == 200, response.text
@with_read_session(SESSION_ID)
def test_fails_in_read_mode(client: TestClient) -> None:
response = client.post(
"/api/sql/validate",
headers=HEADERS,
json={
"requestId": "test_request_id",
"engine": "test_engine",
"query": "SELECT * FROM test",
},
)
assert response.status_code == 401
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/api/endpoints/test_sql_endpoints.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/sql_error_handling.py | # /// script
# requires-python = ">=3.11"
# dependencies = [
# "duckdb",
# "marimo",
# ]
# ///
# Copyright 2026 Marimo. All rights reserved.
import marimo
__generated_with = "0.16.1"
app = marimo.App(width="medium")
with app.setup(hide_code=True):
import marimo as mo
import duckdb
@app.cell(hide_code=True)
def _():
mo.md(
"""
# SQL Error Handling Smoke Test
This notebook demonstrates marimo's SQL error handling capabilities across different types of SQL errors.
Each section shows how marimo gracefully handles SQL errors with helpful error messages.
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## Setup Test Data
First, let's create some test data to work with.
"""
)
return
@app.cell
def _():
# Create test data for our error examples
test_setup = mo.sql(
f"""
CREATE OR REPLACE TABLE users (
id INTEGER,
name TEXT,
email TEXT,
age INTEGER
)
"""
)
return (users,)
@app.cell
def _(users):
_df = mo.sql(
f"""
INSERT INTO users VALUES
(1, 'Alice', 'alice@example.com', 25),
(2, 'Bob', 'bob@example.com', 30),
(3, 'Charlie', 'charlie@example.com', 35)
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 1. Table Not Found Errors
DuckDB provides helpful suggestions when table names don't exist.
"""
)
return
@app.cell
def _(user):
_df = mo.sql(
f"""
-- This will show "Table with name 'user' does not exist! Did you mean 'users'?"
select * from user
"""
)
return
@app.cell
def _(user_table):
_df = mo.sql(
f"""
-- Another table typo example - completely wrong name
SELECT * FROM user_table
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 2. Column Not Found Errors
Column reference errors with candidate suggestions.
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- This will show column not found with candidates
-- Should be 'name' instead of 'user_name'
SELECT user_name FROM users
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- Another column error
-- Should be 'name' instead of 'fullname'
SELECT id, fullname, email FROM users
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 3. SQL Syntax Errors
Various syntax errors with position information.
"""
)
return
@app.cell
def _():
_df = mo.sql(
f"""
-- Missing FROM keyword (FRM instead of FROM)
SELECT * FRM users
"""
)
return
@app.cell
def _():
_df = mo.sql(
f"""
-- Malformed parentheses
SELECT ( FROM users
"""
)
return
@app.cell
def _():
_df = mo.sql(
f"""
-- Invalid WHERE clause - missing condition
SELECT * FROM users WHERE
"""
)
return
@app.cell
def _():
_df = mo.sql(
f"""
-- Multiple FROM clauses - invalid syntax
SELECT * FROM users FROM users
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 4. Data Type Errors
Type mismatch and conversion errors.
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- Type conversion error - can't divide number by string
SELECT age / 'invalid_string' FROM users
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- String comparison with number - type mismatch
SELECT * FROM users WHERE name > 123
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 5. Function Errors
Function call errors and argument mismatches.
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- Invalid function name
SELECT INVALID_FUNCTION(name) FROM users
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- Wrong number of arguments for SUBSTRING
-- Missing start position and length parameters
SELECT SUBSTRING(name) FROM users
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 6. Aggregate Function Errors
GROUP BY and aggregate function errors.
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- Missing GROUP BY for non-aggregated column
-- 'name' should be in GROUP BY when using COUNT(*)
SELECT name, COUNT(*) FROM users
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- Invalid HAVING without GROUP BY
SELECT * FROM users HAVING COUNT(*) > 1
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 7. Complex Query Errors
More complex SQL errors that might occur in real scenarios.
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
-- Subquery error - invalid_column doesn't exist
SELECT * FROM users
WHERE id IN (SELECT invalid_column FROM users)
"""
)
return
@app.cell
def _(nonexistent_table, users):
_df = mo.sql(
f"""
-- JOIN error - nonexistent_table doesn't exist
SELECT u.name, p.title
FROM users u
JOIN nonexistent_table p ON u.id = p.user_id
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 8. Very Long SQL Statements
Testing error handling with long SQL that gets truncated.
"""
)
return
@app.cell
def _(nonexistent_table):
_df = mo.sql(
f"""
-- Very long SELECT with many columns that don't exist
-- This will show error message truncation
SELECT {", ".join([f"col_{i}" for i in range(50)])}
FROM nonexistent_table
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 9. SQL with Special Characters
Testing error handling with special characters and edge cases.
"""
)
return
@app.cell
def _():
_df = mo.sql(
f"""
-- SQL with quotes and special characters in table name
SELECT * FROM 'table with spaces and quotes'
"""
)
return
@app.cell
def _(用户表):
_df = mo.sql(
f"""
-- SQL with unicode characters in table name
SELECT * FROM 用户表
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
r"""
## 11. Multiple Statements
Test sql cells with multiple queries
"""
)
return
@app.cell
def _(users):
_df = mo.sql(
f"""
SELECT * FROM users;
SELECT names FROM users
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## 12. Successful Query for Comparison
Here's a working query to show the contrast with error handling.
"""
)
return
@app.cell
def _(users):
# This should work perfectly
successful_query = mo.sql(
f"""
SELECT name, age, email
FROM users
WHERE age > 25
ORDER BY age DESC
"""
)
return
@app.cell(hide_code=True)
def _():
mo.md(
"""
## Summary
This notebook demonstrates marimo's comprehensive SQL error handling:
- **Clear Error Messages**: Specific, actionable error descriptions displayed in marimo's error UI
- **Helpful Suggestions**: DuckDB's friendly error messages with "Did you mean?" suggestions
- **Position Information**: Line and column details when available
- **Statement Context**: Shows the problematic SQL statement in the structured error display
- **Graceful Degradation**: Errors don't crash the notebook, they display as structured errors
- **Truncation**: Long SQL statements are truncated for readability in error messages
Each SQL cell above will display a structured error in marimo's error UI, showing how
different types of SQL errors are handled gracefully with actionable feedback.
"""
)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/sql_error_handling.py",
"license": "Apache License 2.0",
"lines": 377,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_sql/error_utils.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Callable, Optional, TypedDict
from marimo._dependencies.dependencies import DependencyManager
if TYPE_CHECKING:
import ast
from marimo._messaging.errors import MarimoSQLError
from marimo import _loggers
LOGGER = _loggers.marimo_logger()
class MarimoSQLException(Exception):
"""Exception raised for SQL-related errors in marimo."""
def __init__(
self,
message: str,
sql_statement: str = "",
sql_line: Optional[int] = None,
sql_col: Optional[int] = None,
hint: Optional[str] = None,
):
super().__init__(message)
self.sql_statement = sql_statement
self.sql_line = sql_line
self.sql_col = sql_col
self.hint = hint
class SQLErrorMetadata(TypedDict):
"""Structured metadata for SQL parsing errors."""
lint_rule: str
error_type: str
clean_message: str
hint: Optional[str]
node_lineno: int
node_col_offset: int
sql_statement: str
sql_line: Optional[int]
sql_col: Optional[int]
context: str
def is_sql_parse_error(exception: BaseException) -> bool:
"""Check if the exception is a SQL parsing error."""
# Check for DuckDB exceptions first (most common)
if DependencyManager.duckdb.imported():
try:
import duckdb
# Errors are general enough to capture all meaningful SQL issues.
# NB. Errors like Binder/CatalogException are under ProgrammingError.
# The definitions can be found here:
# https://github.com/duckdb/duckdb-python/blob/0ee500cfa35fc07bf81ed02e8ab6984ea1f665fd/duckdb/__init__.pyi#L82
if isinstance(
exception,
(
duckdb.ParserException,
duckdb.ProgrammingError,
duckdb.IOException,
duckdb.OperationalError,
duckdb.IntegrityError,
duckdb.DataError,
),
):
return True
except ImportError:
pass
# Check for SQLGlot exceptions
if DependencyManager.sqlglot.imported():
try:
from sqlglot.errors import ParseError
# Definitions can be found here:
# https://sqlglot.com/sqlglot/errors.html
if isinstance(exception, ParseError):
return True
except ImportError:
pass
if DependencyManager.sqlalchemy.imported():
try:
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
# Definitions can be found here:
# https://docs.sqlalchemy.org/en/20/core/exceptions.html
if isinstance(exception, (SQLAlchemyError, ProgrammingError)):
return True
except ImportError:
pass
return isinstance(exception, MarimoSQLException)
def _extract_sql_position(
exception_msg: str,
) -> tuple[Optional[int], Optional[int]]:
"""Extract line and column position from SQL exception message."""
# SqlGlot format: "Line 1, Col: 15"
line_col_match = re.search(r"Line (\d+), Col: (\d+)", exception_msg)
if line_col_match:
return (
int(line_col_match.group(1)) - 1, # Convert to 0-based
int(line_col_match.group(2)) - 1,
)
# DuckDB format: "LINE 4:" (line only)
line_only_match = re.search(r"LINE (\d+):", exception_msg)
if line_only_match:
return (
int(line_only_match.group(1)) - 1, # Convert to 0-based
None, # No column information
)
# SQLGlot format variations
sqlglot_match = re.search(
r"line (\d+), col (\d+)", exception_msg, re.IGNORECASE
)
if sqlglot_match:
return (
int(sqlglot_match.group(1)) - 1,
int(sqlglot_match.group(2)) - 1,
)
return None, None
def create_sql_error_metadata(
exception: BaseException,
*,
rule_code: str,
node: Optional[ast.expr] = None,
sql_content: str = "",
context: str = "",
) -> SQLErrorMetadata:
"""Create structured SQL error metadata from an exception.
This is the single source of truth for parsing SQL exceptions into metadata.
"""
exception_msg = str(exception)
sql_line, sql_col = _extract_sql_position(exception_msg)
# Truncate long SQL content
truncated_sql = sql_content
if sql_content and len(sql_content) > 200:
truncated_sql = sql_content[:200] + "..."
# Create clean error message (first line only)
clean_message = exception_msg.split("\n", 1)[0]
# Extract helpful DuckDB hints separately (including multiline hints)
hint = None
lines = exception_msg.split("\n")
hint_lines = []
for line in lines[1:]:
hint_lines.append(line.strip())
if hint_lines:
hint = "\n".join(hint_lines)
return SQLErrorMetadata(
lint_rule=rule_code,
error_type=type(exception).__name__,
clean_message=clean_message,
hint=hint,
node_lineno=node.lineno if node else 0,
node_col_offset=node.col_offset if node else 0,
sql_statement=truncated_sql,
sql_line=sql_line,
sql_col=sql_col,
context=context,
)
def metadata_to_sql_error(metadata: SQLErrorMetadata) -> MarimoSQLError:
"""Convert SQLErrorMetadata to MarimoSQLError for frontend messaging."""
from marimo._messaging.errors import MarimoSQLError
return MarimoSQLError(
msg=metadata["clean_message"],
sql_statement=metadata["sql_statement"],
hint=metadata["hint"],
sql_line=metadata["sql_line"],
sql_col=metadata["sql_col"],
node_lineno=metadata["node_lineno"],
node_col_offset=metadata["node_col_offset"],
)
def log_sql_error(
logger_func: Callable[..., None],
*,
message: str,
exception: BaseException,
rule_code: str,
node: Optional[ast.expr] = None,
sql_content: str = "",
context: str = "",
) -> None:
"""Log SQL-related errors with structured metadata."""
# Use centralized metadata creation
metadata = create_sql_error_metadata(
exception,
rule_code=rule_code,
node=node,
sql_content=sql_content,
context=context,
)
# Log clean SQL error without traces
log_msg = message if message else metadata["clean_message"]
if metadata["sql_line"] is not None and metadata["sql_col"] is not None:
log_msg += f" (Line {metadata['sql_line'] + 1}, Col {metadata['sql_col'] + 1})"
if metadata["sql_statement"]:
log_msg += f"\nSQL: {metadata['sql_statement']}"
logger_func(log_msg, extra=metadata)
def create_sql_error_from_exception(
exception: BaseException, cell: object
) -> MarimoSQLError:
"""Create a MarimoSQLError from a SQL parsing exception."""
# Get SQL statement from cell
sql_statement = ""
if hasattr(cell, "sqls") and cell.sqls:
sql_statement = str(cell.sqls[0])
# Check if this is a MarimoSQLException with structured hint data
if isinstance(exception, MarimoSQLException) and exception.hint:
# Use the structured hint data from the exception
from marimo._messaging.errors import MarimoSQLError
return MarimoSQLError(
msg=str(exception),
sql_statement=exception.sql_statement,
hint=exception.hint,
sql_line=exception.sql_line,
sql_col=exception.sql_col,
)
from marimo._messaging.errors import MarimoSQLError
return MarimoSQLError(
msg=str(exception),
sql_statement=sql_statement,
hint=None,
sql_line=None,
sql_col=None,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_sql/error_utils.py",
"license": "Apache License 2.0",
"lines": 213,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_sql/test_sql_error_handling.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._messaging.errors import MarimoSQLError
from marimo._sql.error_utils import (
MarimoSQLException,
_extract_sql_position,
create_sql_error_from_exception,
is_sql_parse_error,
)
from marimo._sql.sql import sql
HAS_DUCKDB = DependencyManager.duckdb.has()
HAS_SQLGLOT = DependencyManager.sqlglot.has()
HAS_PANDAS = DependencyManager.pandas.has()
HAS_POLARS = DependencyManager.polars.has()
class TestDuckDBRuntimeErrors:
"""Test DuckDB errors that occur during SQL execution."""
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_table_not_found_error(self):
"""Test error when referencing a non-existent table."""
with pytest.raises(MarimoSQLException) as exc_info:
sql("SELECT * FROM nonexistent_table")
error = exc_info.value
assert "nonexistent_table" in str(error).lower()
assert "does not exist" in str(error).lower()
assert error.sql_statement == "SELECT * FROM nonexistent_table"
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_column_not_found_error(self):
"""Test error when referencing a non-existent column."""
# Create a test table first
sql("CREATE OR REPLACE TABLE test_error_table (id INTEGER, name TEXT)")
with pytest.raises(MarimoSQLException) as exc_info:
sql("SELECT invalid_column FROM test_error_table")
error = exc_info.value
assert "invalid_column" in str(error)
assert "test_error_table" in error.sql_statement
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_syntax_error_missing_from(self):
"""Test syntax error when FROM keyword is misspelled."""
with pytest.raises(MarimoSQLException) as exc_info:
sql("SELECT * FRM test_table")
error = exc_info.value
assert "syntax error" in str(error).lower()
assert "SELECT * FRM test_table" in error.sql_statement
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_syntax_error_malformed_expression(self):
"""Test syntax error with malformed SQL expression."""
with pytest.raises(MarimoSQLException) as exc_info:
sql("SELECT ( FROM table")
error = exc_info.value
assert (
"syntax error" in str(error).lower()
or "parser error" in str(error).lower()
)
@pytest.mark.skipif(
not HAS_DUCKDB or not (HAS_PANDAS or HAS_POLARS),
reason="DuckDB/Pandas not installed",
)
def test_data_type_error(self):
"""Test data type conversion errors."""
sql("CREATE OR REPLACE TABLE test_type_table (id INTEGER)")
sql("INSERT INTO test_type_table VALUES (1)")
with pytest.raises(MarimoSQLException) as exc_info:
sql("SELECT id / 'invalid_string' FROM test_type_table")
error = exc_info.value
# Error message varies by DuckDB version, just ensure we caught it
assert len(str(error)) > 0
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_long_sql_statement_truncation(self):
"""Test that long SQL statements are truncated in error messages."""
long_query = (
"SELECT "
+ ", ".join([f"col_{i}" for i in range(100)])
+ " FROM nonexistent_table"
)
with pytest.raises(MarimoSQLException) as exc_info:
sql(long_query)
error = exc_info.value
assert len(error.sql_statement) == len(long_query)
class TestSQLGlotParseErrors:
"""Test SQLGlot parsing errors during static analysis."""
@pytest.mark.skipif(not HAS_SQLGLOT, reason="SQLGlot not installed")
def test_malformed_case_statement(self):
"""Test ParseError with malformed CASE statement."""
from sqlglot import parse_one
from sqlglot.errors import ParseError
with pytest.raises(ParseError):
parse_one("SELECT CASE FROM table")
@pytest.mark.skipif(not HAS_SQLGLOT, reason="SQLGlot not installed")
def test_incomplete_cte(self):
"""Test ParseError with incomplete CTE."""
from sqlglot import parse_one
from sqlglot.errors import ParseError
with pytest.raises(ParseError):
parse_one("WITH cte AS (SELECT * FROM x)")
@pytest.mark.skipif(not HAS_SQLGLOT, reason="SQLGlot not installed")
def test_function_argument_errors(self):
"""Test ParseError with incorrect function arguments."""
from sqlglot import parse_one
from sqlglot.errors import ParseError
# Too few arguments for IF function
with pytest.raises(ParseError):
parse_one("SELECT IF(a > 0)")
@pytest.mark.skipif(not HAS_SQLGLOT, reason="SQLGlot not installed")
def test_empty_query_parse(self):
"""Test ParseError with empty query."""
from sqlglot import parse_one
from sqlglot.errors import ParseError
with pytest.raises(ParseError):
parse_one("")
@pytest.mark.skipif(not HAS_SQLGLOT, reason="SQLGlot not installed")
def test_invalid_select_syntax(self):
"""Test ParseError with invalid SELECT syntax."""
from sqlglot import parse_one
from sqlglot.errors import ParseError
with pytest.raises(ParseError):
parse_one("SELECT * * FROM table")
class TestErrorUtilityFunctions:
"""Test marimo's SQL error handling utility functions."""
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_is_sql_parse_error_duckdb(self):
"""Test detection of DuckDB parsing errors."""
import duckdb
with pytest.raises(Exception) as exc_info:
duckdb.sql("SELECT * FROM nonexistent_table")
assert is_sql_parse_error(exc_info.value) is True
with pytest.raises(Exception) as exc_info:
duckdb.sql("SELECT * FRM invalid_syntax")
assert is_sql_parse_error(exc_info.value) is True
@pytest.mark.skipif(not HAS_SQLGLOT, reason="SQLGlot not installed")
def test_is_sql_parse_error_sqlglot(self):
"""Test detection of SQLGlot parsing errors."""
from sqlglot import parse_one
from sqlglot.errors import ParseError
with pytest.raises(ParseError) as exc_info:
parse_one("SELECT CASE FROM table")
assert is_sql_parse_error(exc_info.value) is True
def test_is_sql_parse_error_non_sql_exception(self):
"""Test that non-SQL exceptions are not detected as SQL errors."""
regular_exception = ValueError("This is not a SQL error")
assert is_sql_parse_error(regular_exception) is False
def test_is_sql_parse_error_marimo_sql_exception(self):
"""Test that MarimoSQLException is detected as SQL error."""
marimo_sql_exception = MarimoSQLException("SQL error message")
assert is_sql_parse_error(marimo_sql_exception) is True
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_create_sql_error_from_exception(self):
"""Test conversion of raw exception to MarimoSQLError."""
import duckdb
class MockCell:
def __init__(self, sql_statement: str):
self.sqls = [sql_statement]
try:
duckdb.sql("SELECT * FROM nonexistent_table")
except Exception as e:
mock_cell = MockCell("SELECT * FROM nonexistent_table")
error = create_sql_error_from_exception(e, mock_cell)
assert isinstance(error, MarimoSQLError)
assert "nonexistent_table" in error.sql_statement
assert len(error.msg) > 0
assert "nonexistent_table" in error.msg
# Hint field should exist (may be None for this error)
assert hasattr(error, "hint")
@pytest.mark.requires("duckdb")
def test_create_sql_error_long_statement(self):
"""Test SQL statement truncation in error creation."""
import duckdb
long_statement = (
"SELECT "
+ ", ".join([f"col_{i}" for i in range(100)])
+ " FROM test"
)
class MockCell:
def __init__(self, sql_statement: str):
self.sqls = [sql_statement]
try:
duckdb.sql(long_statement)
except Exception as e:
mock_cell = MockCell(long_statement)
error = create_sql_error_from_exception(e, mock_cell)
assert len(error.sql_statement) == len(long_statement)
class TestErrorMessageQuality:
"""Test that error messages are actionable and well-formatted."""
def test_extract_sql_position_duckdb_format(self):
"""Test position extraction from DuckDB error messages."""
# DuckDB format: "Line 1, Col: 15"
duckdb_msg = "Parser Error: syntax error at Line 1, Col: 15"
line, col = _extract_sql_position(duckdb_msg)
assert line == 0 # 0-based
assert col == 14 # 0-based
def test_extract_sql_position_sqlglot_format(self):
"""Test position extraction from SQLGlot error messages."""
# SQLGlot format variations
sqlglot_msg = "Parse error at line 2, col 10"
line, col = _extract_sql_position(sqlglot_msg)
assert line == 1 # 0-based
assert col == 9 # 0-based
def test_extract_sql_position_no_position(self):
"""Test position extraction when no position info available."""
no_position_msg = "Some generic SQL error without position"
line, col = _extract_sql_position(no_position_msg)
assert line is None
assert col is None
def test_error_message_cleaning(self):
"""Test that error messages are cleaned of traces."""
class MockException(Exception):
def __str__(self):
return "SQL error message\nTraceback (most recent call last):\n File..."
class MockCell:
sqls = ["SELECT * FROM test"]
error = create_sql_error_from_exception(MockException(), MockCell())
# Should only contain the first line, no traceback
assert "Traceback" in error.msg
class TestIntegrationAndEdgeCases:
"""Test complete error flow and edge cases."""
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_sql_function_error_flow(self):
"""Test complete error flow through mo.sql() function."""
with pytest.raises(MarimoSQLException) as exc_info:
sql("SELECT * FROM definitely_nonexistent_table_12345")
error = exc_info.value
assert isinstance(error, MarimoSQLException)
assert error.sql_statement is not None
assert len(error.sql_statement) > 0
def test_empty_sql_statement_error_handling(self):
"""Test error handling with empty SQL statements."""
class MockCell:
sqls = []
mock_exception = Exception("Test error")
error = create_sql_error_from_exception(mock_exception, MockCell())
assert error.sql_statement == ""
def test_cell_without_sqls_attribute(self):
"""Test error handling when cell doesn't have sqls attribute."""
class MockCellNoSqls:
pass
mock_exception = Exception("Test error")
error = create_sql_error_from_exception(
mock_exception, MockCellNoSqls()
)
assert error.sql_statement == ""
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_multiple_errors_in_sequence(self):
"""Test handling multiple SQL errors in sequence."""
# First error
with pytest.raises(MarimoSQLException):
sql("SELECT * FROM table1_nonexistent")
# Second error should still work
with pytest.raises(MarimoSQLException):
sql("SELECT * FROM table2_nonexistent")
@pytest.mark.requires("duckdb")
def test_error_with_special_characters(self):
"""Test error handling with SQL containing special characters."""
with pytest.raises(MarimoSQLException):
sql("SELECT * FROM 'table with spaces and quotes'")
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_duckdb_hints_preserved(self):
"""Test that DuckDB hints like 'Did you mean?' are preserved in error messages."""
import duckdb
# Create a table to generate "Did you mean?" suggestions
duckdb.sql(
"CREATE OR REPLACE TABLE test_hints_table (id INT, name TEXT)"
)
with pytest.raises(MarimoSQLException) as exc_info:
sql("SELECT * FROM test_hint") # Missing 's' in table name
error = exc_info.value
error_msg = str(error)
# Check that the main error message is present
assert "does not exist" in error_msg
# Check that the hint is properly extracted to the hint field
assert error.hint is None
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_column_candidates_preserved(self):
"""Test that column candidate hints are preserved in error messages."""
import duckdb
# Create a table to generate candidate binding suggestions
duckdb.sql(
"CREATE OR REPLACE TABLE test_columns (id INT, user_name TEXT, email TEXT)"
)
with pytest.raises(MarimoSQLException) as exc_info:
sql("SELECT fullname FROM test_columns") # Wrong column name
error = exc_info.value
error_msg = str(error)
# Check that the main error message is present
assert "not found" in error_msg
# Check that the hint is properly extracted to the hint field
assert error.hint is None
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_hint_field_in_sql_error_struct(self):
"""Test that MarimoSQLError struct properly includes hint field."""
import duckdb
# Create table for hint generation
duckdb.sql(
"CREATE OR REPLACE TABLE hint_test_table (id INT, name TEXT)"
)
try:
duckdb.sql("SELECT * FROM hint_test") # Missing letters
except Exception as e:
class MockCell:
sqls = ["SELECT * FROM hint_test"]
error_struct = create_sql_error_from_exception(e, MockCell())
# Verify the struct has the hint field and it's populated
assert hasattr(error_struct, "hint")
assert error_struct.hint is None
@pytest.mark.skipif(not HAS_DUCKDB, reason="DuckDB not installed")
def test_multiline_hints_preserved(self):
"""Test that multiline hints like function candidates are fully captured."""
import duckdb
# Create table for multiline hint generation
duckdb.sql(
"CREATE OR REPLACE TABLE hint_multiline_table (id INT, name TEXT)"
)
try:
duckdb.sql(
"SELECT SUBSTRING(name) FROM hint_multiline_table"
) # Wrong args
except Exception as e:
class MockCell:
sqls = ["SELECT SUBSTRING(name) FROM hint_multiline_table"]
error_struct = create_sql_error_from_exception(e, MockCell())
# Verify multiline hint is captured completely
assert hasattr(error_struct, "hint")
assert error_struct.hint is None
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_sql/test_sql_error_handling.py",
"license": "Apache License 2.0",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_files/sql_parsing_errors.py | # Copyright 2026 Marimo. All rights reserved.
"""Test file with SQL parsing errors to test log rules positioning."""
import marimo
__generated_with = "0.8.0"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return mo,
@app.cell
def _(mo):
# This should trigger an MF005 SQL parsing error due to trailing comma
result = mo.sql(f"""
WITH ranked_stories AS (
SELECT
title,
score,
type,
descendants,
YEAR(timestamp) AS year,
MONTH(timestamp) AS month,
ROW_NUMBER()
OVER (PARTITION BY YEAR(timestamp), MONTH(timestamp) ORDER BY score DESC)
AS rn
FROM sample_data.hn.hacker_news
WHERE
type = 'story'
AND
MONTH(timestamp) in (null)
AND
descendants NOT NULL
)
SELECT
month,
score,
type,
title,
hn_url,
descendants as nb_comments,
year,
FROM ranked_stories
WHERE rn = 1
ORDER BY year, month;
""")
return result,
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/sql_parsing_errors.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_log_rules.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import logging
from marimo._lint.context import LintContext, RuleContext
from marimo._lint.rules.formatting.parsing import (
MiscLogRule,
SqlParseRule,
)
from marimo._schemas.serialization import NotebookSerialization
class TestLogRules:
"""Test log message rules and grouping."""
def test_log_grouping_async_safe(self):
"""Test that log grouping is async-safe and works correctly."""
# Create mock log records with different rule targets
record1 = logging.LogRecord(
name="marimo",
level=logging.ERROR,
pathname="/test.py",
lineno=10,
msg="SQL parse error",
args=(),
exc_info=None,
)
record1.__dict__["lint_rule"] = "MF005"
record2 = logging.LogRecord(
name="marimo",
level=logging.WARNING,
pathname="/test.py",
lineno=30,
msg="General warning",
args=(),
exc_info=None,
)
# No lint_rule specified - should go to MF006
# Create minimal notebook
notebook = NotebookSerialization(
filename="test.py", cells=[], app=None
)
# Test with initial logs
ctx = LintContext(notebook, logs=[record1, record2])
ctx._group_initial_logs()
# Verify logs are correctly grouped
assert len(ctx._logs_by_rule.get("MF005", [])) == 1
assert len(ctx._logs_by_rule.get("MF006", [])) == 1
assert ctx._logs_by_rule["MF005"][0].getMessage() == "SQL parse error"
assert ctx._logs_by_rule["MF006"][0].getMessage() == "General warning"
async def test_sql_parse_rule(self):
"""Test SqlParseRule processes MF005 logs correctly."""
record = logging.LogRecord(
name="marimo",
level=logging.ERROR,
pathname="/test.py",
lineno=20,
msg="SQL parsing error",
args=(),
exc_info=None,
)
record.__dict__["lint_rule"] = "MF005"
notebook = NotebookSerialization(
filename="test.py", cells=[], app=None
)
ctx = LintContext(notebook, logs=[record])
ctx._group_initial_logs()
rule = SqlParseRule()
rule_ctx = RuleContext(ctx, rule)
await rule.check(rule_ctx)
diagnostics = await ctx.get_diagnostics()
assert len(diagnostics) == 1
assert diagnostics[0].code == "MF005"
assert diagnostics[0].message == "SQL parsing error"
async def test_misc_log_rule(self):
"""Test MiscLogRule processes unspecified logs correctly."""
# WARNING level - should be processed
record1 = logging.LogRecord(
name="marimo",
level=logging.WARNING,
pathname="/test.py",
lineno=10,
msg="General warning",
args=(),
exc_info=None,
)
# DEBUG level - should be ignored
record2 = logging.LogRecord(
name="marimo",
level=logging.DEBUG,
pathname="/test.py",
lineno=20,
msg="Debug message",
args=(),
exc_info=None,
)
notebook = NotebookSerialization(
filename="test.py", cells=[], app=None
)
ctx = LintContext(notebook, logs=[record1, record2])
ctx._group_initial_logs()
rule = MiscLogRule()
rule_ctx = RuleContext(ctx, rule)
await rule.check(rule_ctx)
diagnostics = await ctx.get_diagnostics()
# Only WARNING should create a diagnostic
assert len(diagnostics) == 1
assert diagnostics[0].code == "MF006"
assert diagnostics[0].message == "General warning"
def test_rule_context_get_logs(self):
"""Test RuleContext.get_logs method."""
record1 = logging.LogRecord(
name="marimo",
level=logging.ERROR,
pathname="/test.py",
lineno=10,
msg="SQL parse error",
args=(),
exc_info=None,
)
record1.__dict__["lint_rule"] = "MF005"
notebook = NotebookSerialization(
filename="test.py", cells=[], app=None
)
ctx = LintContext(notebook, logs=[record1])
ctx._group_initial_logs()
rule = SqlParseRule()
rule_ctx = RuleContext(ctx, rule)
# Test getting specific rule logs
mf005_logs = rule_ctx.get_logs("MF005")
assert len(mf005_logs) == 1
assert mf005_logs[0].getMessage() == "SQL parse error"
# Test getting non-existent rule logs
empty_logs = rule_ctx.get_logs("MF999")
assert len(empty_logs) == 0
# Test getting all logs
all_logs = rule_ctx.get_logs(None)
assert len(all_logs) == 1
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_log_rules.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_sql_log_rules_snapshot.py | # Copyright 2026 Marimo. All rights reserved.
"""Snapshot tests for SQL log message lint rules."""
import pytest
from marimo._ast.parse import parse_notebook
from tests._lint.utils import lint_notebook
from tests.mocks import snapshotter
snapshot = snapshotter(__file__)
# Only run this in 3.12 since the formatting may differ in other versions
@pytest.mark.skipif("sys.version_info != (3, 12)")
def test_sql_parsing_errors_snapshot():
"""Test snapshot for SQL parsing log errors with positioning."""
file = "tests/_lint/test_files/sql_parsing_errors.py"
with open(file) as f:
code = f.read()
notebook = parse_notebook(code, filepath=file)
errors = lint_notebook(notebook)
log_errors = [
error for error in errors if error.code in ("MF005", "MF006")
]
# Format errors for snapshot
error_output = []
for error in log_errors:
error_output.append(error.format())
if not error_output:
error_output = ["No SQL log errors found"]
snapshot("sql_parsing_errors.txt", "\n".join(error_output))
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_sql_log_rules_snapshot.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_strings.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._utils.platform import is_windows
from marimo._utils.strings import (
_mslex_quote,
_quote_for_cmd,
_wrap_in_quotes,
cmd_quote,
standardize_annotation_quotes,
)
class TestCmdQuote:
"""Test the cmd_quote function for cross-platform command line quoting."""
@pytest.mark.skipif(is_windows(), reason="POSIX-specific test")
def test_posix_simple_string(self):
"""Test simple strings on POSIX systems."""
assert cmd_quote("hello") == "hello"
assert cmd_quote("path/to/file") == "path/to/file"
@pytest.mark.skipif(is_windows(), reason="POSIX-specific test")
def test_posix_strings_with_spaces(self):
"""Test strings with spaces on POSIX systems."""
assert cmd_quote("hello world") == "'hello world'"
assert cmd_quote("path with spaces") == "'path with spaces'"
@pytest.mark.skipif(is_windows(), reason="POSIX-specific test")
def test_posix_strings_with_special_chars(self):
"""Test strings with special characters on POSIX systems."""
assert cmd_quote("hello'world") == "'hello'\"'\"'world'"
assert cmd_quote('hello"world') == "'hello\"world'"
assert cmd_quote("hello$world") == "'hello$world'"
@pytest.mark.skipif(not is_windows(), reason="Windows-specific test")
def test_windows_simple_string(self):
"""Test simple strings on Windows."""
assert cmd_quote("hello") == "hello"
assert cmd_quote("path\\to\\file") == "path\\to\\file"
@pytest.mark.skipif(not is_windows(), reason="Windows-specific test")
def test_windows_empty_string(self):
"""Test empty string on Windows."""
assert cmd_quote("") == '""'
@pytest.mark.skipif(not is_windows(), reason="Windows-specific test")
def test_windows_strings_with_spaces(self):
"""Test strings with spaces on Windows."""
assert cmd_quote("hello world") == '"hello world"'
assert (
cmd_quote("C:\\Program Files\\app") == '"C:\\Program Files\\app"'
)
@pytest.mark.skipif(not is_windows(), reason="Windows-specific test")
def test_windows_strings_with_special_chars(self):
"""Test strings with Windows special characters."""
# Test % character
assert cmd_quote("hello%world") == "hello^%world"
# Test ! character
assert cmd_quote("hello!world") == "hello^!world"
# Test with quotes
assert cmd_quote('hello"world') == 'hello\\^"world'
class TestWrapInQuotes:
"""Test the _wrap_in_quotes helper function."""
def test_simple_string(self):
"""Test wrapping simple strings."""
assert _wrap_in_quotes("hello") == '"hello"'
assert _wrap_in_quotes("world") == '"world"'
def test_string_with_trailing_backslash(self):
"""Test strings with trailing backslashes."""
assert _wrap_in_quotes("path\\") == '"path\\\\"'
assert _wrap_in_quotes("path\\\\") == '"path\\\\\\\\"'
def test_string_without_trailing_backslash(self):
"""Test strings without trailing backslashes."""
assert _wrap_in_quotes("path\\file") == '"path\\file"'
assert _wrap_in_quotes("no\\backslash") == '"no\\backslash"'
def test_empty_string(self):
"""Test empty string."""
assert _wrap_in_quotes("") == '""'
class TestQuoteForCmd:
"""Test the _quote_for_cmd helper function."""
def test_simple_string(self):
"""Test quoting simple strings."""
assert _quote_for_cmd("hello") == "hello"
def test_string_with_percent(self):
"""Test strings with % character."""
assert _quote_for_cmd("hello%world") == "hello^%world"
def test_string_with_exclamation(self):
"""Test strings with ! character."""
assert _quote_for_cmd("hello!world") == "hello^!world"
def test_string_with_quotes(self):
"""Test strings with quote characters."""
assert _quote_for_cmd('hello"world') == 'hello\\^"world'
def test_string_with_spaces(self):
"""Test strings requiring quoting due to spaces."""
result = _quote_for_cmd("hello world")
assert result == '"hello world"'
def test_complex_string(self):
"""Test complex strings with multiple special characters."""
result = _quote_for_cmd("hello%!world")
assert result == "hello^%^!world"
class TestMslexQuote:
"""Test the _mslex_quote function."""
def test_empty_string(self):
"""Test empty string returns double quotes."""
assert _mslex_quote("") == '""'
def test_simple_string(self):
"""Test simple strings that don't need quoting."""
assert _mslex_quote("hello") == "hello"
assert _mslex_quote("path\\to\\file") == "path\\to\\file"
def test_string_with_spaces(self):
"""Test strings with spaces."""
assert _mslex_quote("hello world") == '"hello world"'
def test_string_with_special_chars(self):
"""Test strings with Windows cmd special characters."""
assert _mslex_quote("hello%world") == "hello^%world"
assert _mslex_quote("hello!world") == "hello^!world"
def test_string_with_quotes(self):
"""Test strings with quote characters."""
result = _mslex_quote('hello"world')
assert '"' in result # Should be quoted
assert "\\" in result # Should have escaping
def test_optimization_shorter_alt(self):
"""Test that shorter alternative quoting is used when available."""
# This tests the optimization where a shorter alternative is preferred
result = _mslex_quote("x!")
assert result == "x^!" # Shorter than "x\\"^!""
class TestStandardizeAnnotationQuotes:
"""Test the standardize_annotation_quotes function."""
def test_no_quotes(self):
"""Test annotations without quotes."""
assert standardize_annotation_quotes("int") == "int"
assert standardize_annotation_quotes("List[str]") == "List[str]"
def test_single_quotes_to_double(self):
"""Test converting single quotes to double quotes."""
assert (
standardize_annotation_quotes("Literal['foo']") == 'Literal["foo"]'
)
assert (
standardize_annotation_quotes("Literal['foo', 'bar']")
== 'Literal["foo", "bar"]'
)
def test_already_double_quotes(self):
"""Test that double quotes are preserved."""
assert (
standardize_annotation_quotes('Literal["foo"]') == 'Literal["foo"]'
)
assert (
standardize_annotation_quotes('Literal["foo", "bar"]')
== 'Literal["foo", "bar"]'
)
def test_mixed_quotes_with_internal_double_quotes(self):
"""Test that single quotes are preserved when they contain unescaped double quotes."""
# This should preserve single quotes due to internal double quotes
result = standardize_annotation_quotes("Literal['say \"hello\"']")
assert result == "Literal['say \"hello\"']"
def test_escaped_quotes_in_single_quotes(self):
"""Test handling of escaped quotes within single-quoted strings."""
result = standardize_annotation_quotes("Literal['it\\'s']")
assert result == 'Literal["it\'s"]'
def test_complex_annotation(self):
"""Test complex type annotations."""
input_annotation = "Union[Literal['foo', 'bar'], Optional['baz']]"
expected = 'Union[Literal["foo", "bar"], Optional["baz"]]'
assert standardize_annotation_quotes(input_annotation) == expected
def test_nested_quotes(self):
"""Test nested quote scenarios."""
# Test escaped double quotes in single-quoted strings
result = standardize_annotation_quotes("Literal['test\\\"value']")
assert result == 'Literal["test\\\\"value"]'
def test_empty_string_literal(self):
"""Test empty string literals."""
assert standardize_annotation_quotes("Literal['']") == 'Literal[""]'
assert standardize_annotation_quotes('Literal[""]') == 'Literal[""]'
def test_multiple_string_literals(self):
"""Test multiple string literals in one annotation."""
input_annotation = (
"Dict[Literal['key1', 'key2'], Literal['val1', 'val2']]"
)
expected = 'Dict[Literal["key1", "key2"], Literal["val1", "val2"]]'
assert standardize_annotation_quotes(input_annotation) == expected
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_strings.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_ignore_scripts.py | # Copyright 2026 Marimo. All rights reserved.
from marimo._lint import run_check
def test_ignore_scripts_flag(tmp_path):
"""Test that --ignore-scripts suppresses errors for non-marimo files."""
# Create a temporary non-marimo Python file
temp_file = tmp_path / "test_script.py"
temp_file.write_text("""#!/usr/bin/env python3
# This is a regular Python script, not a marimo notebook
import os
import sys
def main():
print("Hello, world!")
return 0
if __name__ == "__main__":
sys.exit(main())
""")
# Test without ignore_scripts flag - should error
linter_with_error = run_check((str(temp_file),), ignore_scripts=False)
assert linter_with_error.errored is True
assert len(linter_with_error.files) == 1
assert linter_with_error.files[0].failed is True
assert "not a valid notebook" in linter_with_error.files[0].message
# Test with ignore_scripts flag - should not error
linter_ignore = run_check((str(temp_file),), ignore_scripts=True)
assert linter_ignore.errored is False
assert len(linter_ignore.files) == 1
assert linter_ignore.files[0].skipped is True
assert "not a marimo notebook" in linter_ignore.files[0].message
def test_ignore_scripts_still_processes_marimo_files(tmp_path):
"""Test that --ignore-scripts still processes valid marimo files."""
# Create a temporary marimo file
temp_file = tmp_path / "test_notebook.py"
temp_file.write_text("""import marimo
__generated_with = "0.15.5"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
if __name__ == "__main__":
app.run()
""")
# Test with ignore_scripts flag - should still process marimo files
linter = run_check((str(temp_file),), ignore_scripts=True)
assert linter.errored is False
assert len(linter.files) == 1
assert linter.files[0].failed is False
assert linter.files[0].skipped is False
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_ignore_scripts.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/slides_examples/centered_slides.py | import marimo
__generated_with = "0.16.0"
app = marimo.App(
width="medium",
layout_file="layouts/centered_slides.slides.json",
)
@app.cell
def _():
import marimo as mo
mo.iframe("https://marimo.io/")
return (mo,)
@app.cell
def _(mo):
mo.iframe("https://marimo.io/", height="600px")
return
@app.cell
def _(mo):
mo.hstack(
[mo.iframe("https://marimo.io/"), mo.iframe("https://marimo.io/")],
widths="equal",
)
return
@app.cell
def _(mo):
mo.vstack([mo.iframe("https://marimo.io/"), mo.iframe("https://marimo.io/")])
return
@app.cell
def _():
import altair as alt
import polars as pl
df = pl.read_parquet(
"https://github.com/uwdata/mosaic/raw/main/data/athletes.parquet"
)
df
df.plot.bar("sport", "count()", color="sex").properties(height=400)
return
@app.cell
def _(mo):
import plotly.express as px
_df = px.data.gapminder().query("country=='Germany'")
fig = px.line(_df, x="year", y="lifeExp", title="Life expectancy in Germany")
mo.ui.plotly(fig)
return (fig,)
@app.cell
def _(fig, mo):
mo.vstack([mo.md("## Chart with a title"), mo.ui.plotly(fig)])
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/slides_examples/centered_slides.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ai/_tools/tools/datasource.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Optional
from marimo import _loggers
from marimo._ai._tools.base import ToolBase
from marimo._ai._tools.types import SuccessResult, ToolGuidelines
from marimo._ai._tools.utils.exceptions import ToolExecutionError
from marimo._data.models import DataTable
from marimo._sql.engines.duckdb import INTERNAL_DUCKDB_ENGINE
from marimo._types.ids import SessionId
from marimo._utils.fuzzy_match import compile_regex, is_fuzzy_match
LOGGER = _loggers.marimo_logger()
if TYPE_CHECKING:
from marimo._session import Session
@dataclass
class GetDatabaseTablesArgs:
session_id: SessionId
query: Optional[str] = None
@dataclass
class TableDetails:
connection: str
database: str
schema: str
table: DataTable
sample_query: str
@dataclass
class GetDatabaseTablesOutput(SuccessResult):
tables: list[TableDetails] = field(default_factory=list)
class GetDatabaseTables(
ToolBase[GetDatabaseTablesArgs, GetDatabaseTablesOutput]
):
"""
Get information about tables in a database. Use the query parameter to search by name. You can use regex.
Args:
session_id: The session id.
query (optional): The query to match the database, schemas, and tables.
If a query is provided, it will fuzzy match the query to the database, schemas, and tables available. If no query is provided, all tables are returned.
"""
guidelines = ToolGuidelines(
when_to_use=[
"When exploring database tables from external connections (SQL databases)",
"Before writing SQL queries to understand schema structure",
],
prerequisites=[
"You must have a valid session id from an active notebook",
],
avoid_if=[
"You have already been given the schema view, you can refer to the given information",
"The user is asking about in-memory DataFrames, use the get_tables_and_variables tool instead",
],
additional_info="For best results, don't provide a query since you may miss some tables. Alternatively, provide loose queries using regex that can match uppercase/lowercase and plural or singular forms.",
)
def handle(self, args: GetDatabaseTablesArgs) -> GetDatabaseTablesOutput:
session_id = args.session_id
session = self.context.get_session(session_id)
return self._get_tables(session, args.query)
def _get_tables(
self, session: Session, query: Optional[str]
) -> GetDatabaseTablesOutput:
session_view = session.session_view
data_connectors = session_view.data_connectors
if len(data_connectors.connections) == 0:
raise ToolExecutionError(
message="No databases found. Please create a connection first.",
code="NO_DATABASES_FOUND",
is_retryable=False,
)
tables: list[TableDetails] = []
# Pre-compile regex if query exists
compiled_pattern = None
is_regex = False
if query:
compiled_pattern, is_regex = compile_regex(query)
for connection in data_connectors.connections:
for database in connection.databases:
default_database = connection.default_database == database.name
for schema in database.schemas:
default_schema = connection.default_schema == schema.name
# If query is None, match all schemas
# If matching, add all tables to the list
if query is None or is_fuzzy_match(
query, schema.name, compiled_pattern, is_regex
):
for table in schema.tables:
sample_query = self._form_sample_query(
database=database.name,
schema=schema.name,
table=table.name,
default_database=default_database,
default_schema=default_schema,
engine=connection.name,
)
tables.append(
TableDetails(
connection=connection.name,
database=database.name,
schema=schema.name,
table=table,
sample_query=sample_query,
)
)
continue
for table in schema.tables:
if is_fuzzy_match(
query, table.name, compiled_pattern, is_regex
):
sample_query = self._form_sample_query(
database=database.name,
schema=schema.name,
table=table.name,
default_database=default_database,
default_schema=default_schema,
engine=connection.name,
)
tables.append(
TableDetails(
connection=connection.name,
database=database.name,
schema=schema.name,
table=table,
sample_query=sample_query,
)
)
return GetDatabaseTablesOutput(
tables=tables,
next_steps=[
"Use the sample query as a guideline to write your own SQL query."
],
)
def _form_sample_query(
self,
*,
database: str,
schema: str,
table: str,
default_database: bool,
default_schema: bool,
engine: str,
) -> str:
sample_query = f"SELECT * FROM {database}.{schema}.{table} LIMIT 100"
if default_database:
sample_query = f"SELECT * FROM {schema}.{table} LIMIT 100"
if default_schema:
sample_query = f"SELECT * FROM {table} LIMIT 100"
if engine != INTERNAL_DUCKDB_ENGINE:
wrapped_query = (
f'df = mo.sql(f"""{sample_query}""", engine={engine})'
)
else:
wrapped_query = f'df = mo.sql(f"""{sample_query}""")'
return wrapped_query
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/tools/datasource.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_utils/fuzzy_match.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import re
def compile_regex(query: str) -> tuple[re.Pattern[str] | None, bool]:
"""
Returns compiled regex pattern and whether the query is a valid regex.
"""
try:
return re.compile(query, re.IGNORECASE), True
except re.error:
return None, False
def is_fuzzy_match(
query: str,
name: str,
compiled_pattern: re.Pattern[str] | None,
is_regex: bool,
) -> bool:
"""
Fuzzy match using pre-compiled regex. If is not regex, fallback to substring match.
Args:
query: The query to match.
name: The name to match against.
compiled_pattern: Pre-compiled regex pattern (None if not regex).
is_regex: Whether the query is a valid regex.
"""
if is_regex and compiled_pattern:
return bool(compiled_pattern.search(name))
else:
return query.lower() in name.lower()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/fuzzy_match.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ai/tools/tools/test_datasource_tool.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
import pytest
from marimo._ai._tools.base import ToolContext
from marimo._ai._tools.tools.datasource import (
GetDatabaseTables,
GetDatabaseTablesArgs,
TableDetails,
)
from marimo._ai._tools.utils.exceptions import ToolExecutionError
from marimo._data.models import Database, DataTable, DataTableColumn, Schema
from marimo._messaging.notification import DataSourceConnectionsNotification
from marimo._sql.engines.duckdb import INTERNAL_DUCKDB_ENGINE
from tests._ai.tools.test_utils import MockSession, MockSessionView
@dataclass
class MockDataSourceConnection:
name: str
dialect: str
databases: list[Database]
default_database: Optional[str] = None
default_schema: Optional[str] = None
@pytest.fixture
def tool() -> GetDatabaseTables:
"""Create a GetDatabaseTables tool instance."""
return GetDatabaseTables(ToolContext())
@pytest.fixture
def sample_table() -> DataTable:
"""Sample table for testing."""
return DataTable(
source_type="connection",
source="postgresql",
name="users",
num_rows=100,
num_columns=3,
variable_name=None,
columns=[
DataTableColumn("id", "integer", "INTEGER", [1, 2, 3]),
DataTableColumn("name", "string", "VARCHAR", ["Alice", "Bob"]),
DataTableColumn(
"email", "string", "VARCHAR", ["alice@example.com"]
),
],
)
@pytest.fixture
def sample_schema(sample_table: DataTable) -> Schema:
"""Sample schema for testing."""
return Schema(
name="public",
tables=[sample_table],
)
@pytest.fixture
def sample_database(sample_schema: Schema) -> Database:
"""Sample database for testing."""
return Database(
name="test_db",
dialect="postgresql",
schemas=[sample_schema],
)
@pytest.fixture
def sample_connection(sample_database: Database) -> MockDataSourceConnection:
"""Sample connection for testing."""
return MockDataSourceConnection(
name="postgres_conn",
dialect="postgresql",
databases=[sample_database],
)
@pytest.fixture
def sample_session(sample_connection: MockDataSourceConnection) -> MockSession:
"""Sample session with data connectors."""
return MockSession(
_session_view=MockSessionView(
data_connectors=DataSourceConnectionsNotification(
connections=[sample_connection]
)
)
)
@pytest.fixture
def multi_table_session() -> MockSession:
"""Session with multiple tables for testing filtering."""
tables = [
DataTable(
source_type="connection",
source="mysql",
name="users",
num_rows=100,
num_columns=2,
variable_name=None,
columns=[
DataTableColumn("id", "integer", "INTEGER", [1, 2]),
DataTableColumn("name", "string", "VARCHAR", ["Alice"]),
],
),
DataTable(
source_type="connection",
source="mysql",
name="orders",
num_rows=50,
num_columns=2,
variable_name=None,
columns=[
DataTableColumn("order_id", "integer", "INTEGER", [1]),
DataTableColumn("user_id", "integer", "INTEGER", [1]),
],
),
DataTable(
source_type="connection",
source="mysql",
name="products",
num_rows=25,
num_columns=2,
variable_name=None,
columns=[
DataTableColumn("product_id", "integer", "INTEGER", [1]),
DataTableColumn("name", "string", "VARCHAR", ["Widget"]),
],
),
]
schema = Schema(name="public", tables=tables)
database = Database(name="ecommerce", dialect="mysql", schemas=[schema])
connection = MockDataSourceConnection(
name="mysql_conn", dialect="mysql", databases=[database]
)
return MockSession(
_session_view=MockSessionView(
data_connectors=DataSourceConnectionsNotification(
connections=[connection]
)
)
)
def test_get_tables_no_query(
tool: GetDatabaseTables, sample_session: MockSession
):
"""Test getting all tables when no query is provided."""
# Mock the session
def mock_get_session(_session_id):
return sample_session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query=None,
)
result = tool.handle(args)
assert isinstance(result, tool.Output)
assert len(result.tables) == 1
table_detail = result.tables[0]
assert isinstance(table_detail, TableDetails)
assert table_detail.connection == "postgres_conn"
assert table_detail.database == "test_db"
assert table_detail.schema == "public"
assert table_detail.table.name == "users"
def test_get_tables_with_simple_query(
tool: GetDatabaseTables, multi_table_session: MockSession
):
"""Test getting tables with simple text query."""
# Mock the session
def mock_get_session(_session_id):
return multi_table_session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query="user",
)
result = tool.handle(args)
assert isinstance(result, tool.Output)
assert len(result.tables) == 1 # Only "users" table matches "user"
table_names = {td.table.name for td in result.tables}
assert "users" in table_names
assert "orders" not in table_names # "orders" doesn't contain "user"
assert "products" not in table_names
def test_get_tables_with_regex_query(
tool: GetDatabaseTables, multi_table_session: MockSession
):
"""Test getting tables with regex query."""
# Mock the session
def mock_get_session(_session_id):
return multi_table_session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query="^user.*",
)
result = tool.handle(args)
assert isinstance(result, tool.Output)
assert len(result.tables) == 1
table_detail = result.tables[0]
assert table_detail.table.name == "users"
def test_get_tables_with_schema_match(
tool: GetDatabaseTables, multi_table_session: MockSession
):
"""Test getting tables by schema name match."""
# Mock the session
def mock_get_session(_session_id):
return multi_table_session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query="pub",
)
result = tool.handle(args)
assert isinstance(result, tool.Output)
assert len(result.tables) == 3 # All tables in public schema
table_names = {td.table.name for td in result.tables}
assert "users" in table_names
assert "orders" in table_names
assert "products" in table_names
def test_get_tables_empty_connections(tool: GetDatabaseTables):
"""Test getting tables when no connections exist."""
empty_session = MockSession(
_session_view=MockSessionView(
data_connectors=DataSourceConnectionsNotification(connections=[])
)
)
# Mock the session
def mock_get_session(_session_id):
return empty_session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query=None,
)
with pytest.raises(ToolExecutionError) as e:
tool.handle(args)
assert e.value.code == "NO_DATABASES_FOUND"
def test_get_tables_no_matches(
tool: GetDatabaseTables, sample_session: MockSession
):
"""Test getting tables when query matches nothing."""
# Mock the session
def mock_get_session(_session_id):
return sample_session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query="nonexistent",
)
result = tool.handle(args)
assert isinstance(result, tool.Output)
assert len(result.tables) == 0
def test_table_details_structure(
tool: GetDatabaseTables, sample_session: MockSession
):
"""Test that TableDetails is properly structured."""
# Mock the session
def mock_get_session(_session_id):
return sample_session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query=None,
)
result = tool.handle(args)
table_detail = result.tables[0]
assert isinstance(table_detail, TableDetails)
assert table_detail.connection == "postgres_conn"
assert table_detail.database == "test_db"
assert table_detail.schema == "public"
assert isinstance(table_detail.table, DataTable)
assert table_detail.table.name == "users"
assert len(table_detail.table.columns) == 3
def test_multiple_connections(tool: GetDatabaseTables):
"""Test with multiple connections."""
# Create two connections with different databases
table1 = DataTable(
source_type="connection",
source="postgresql",
name="table1",
num_rows=10,
num_columns=0,
variable_name=None,
columns=[],
)
table2 = DataTable(
source_type="connection",
source="mysql",
name="table2",
num_rows=20,
num_columns=0,
variable_name=None,
columns=[],
)
schema1 = Schema(name="schema1", tables=[table1])
schema2 = Schema(name="schema2", tables=[table2])
db1 = Database(name="db1", dialect="postgresql", schemas=[schema1])
db2 = Database(name="db2", dialect="mysql", schemas=[schema2])
conn1 = MockDataSourceConnection(
name="conn1", dialect="postgresql", databases=[db1]
)
conn2 = MockDataSourceConnection(
name="conn2", dialect="mysql", databases=[db2]
)
multi_conn_session = MockSession(
_session_view=MockSessionView(
data_connectors=DataSourceConnectionsNotification(
connections=[conn1, conn2]
)
)
)
# Mock the session
def mock_get_session(_session_id):
return multi_conn_session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query=None,
)
result = tool.handle(args)
assert isinstance(result, tool.Output)
assert len(result.tables) == 2
connections = {td.connection for td in result.tables}
assert "conn1" in connections
assert "conn2" in connections
databases = {td.database for td in result.tables}
assert "db1" in databases
assert "db2" in databases
def test_query_matches_multiple_levels(tool: GetDatabaseTables):
"""Test query that matches at different levels (schema and table)."""
# Create tables with overlapping names
user_table = DataTable(
source_type="connection",
source="postgresql",
name="user",
num_rows=5,
num_columns=0,
variable_name=None,
columns=[],
)
user_schema_table = DataTable(
source_type="connection",
source="postgresql",
name="orders",
num_rows=10,
num_columns=0,
variable_name=None,
columns=[],
)
user_schema = Schema(name="user", tables=[user_table])
public_schema = Schema(name="public", tables=[user_schema_table])
database = Database(
name="testdb",
dialect="postgresql",
schemas=[user_schema, public_schema],
)
connection = MockDataSourceConnection(
name="conn", dialect="postgresql", databases=[database]
)
session = MockSession(
_session_view=MockSessionView(
data_connectors=DataSourceConnectionsNotification(
connections=[connection]
)
)
)
# Mock the session
def mock_get_session(_session_id):
return session
tool.context.get_session = mock_get_session
args = GetDatabaseTablesArgs(
session_id="test_session",
query="user",
)
result = tool.handle(args)
assert isinstance(result, tool.Output)
assert len(result.tables) == 1 # Only the "user" table matches "user"
table_names = {td.table.name for td in result.tables}
assert "user" in table_names
# The "orders" table is in the "public" schema, not the "user" schema
# So it won't be included when query matches "user"
assert "orders" not in table_names
def test_query_no_duplicates(tool: GetDatabaseTables):
"""Test that schema-level matching doesn't create duplicates with table-level matching."""
# Create a schema that matches the query AND has tables that also match
schema1 = Schema(
name="users", # This will match query "user"
tables=[
DataTable(
source_type="connection",
source="postgresql",
name="user_profiles", # This would also match "user"
num_rows=10,
num_columns=0,
variable_name=None,
columns=[],
),
DataTable(
source_type="connection",
source="postgresql",
name="user_settings", # This would also match "user"
num_rows=20,
num_columns=0,
variable_name=None,
columns=[],
),
],
)
# Create another schema that doesn't match but has tables that do
schema2 = Schema(
name="products", # This won't match "user"
tables=[
DataTable(
source_type="connection",
source="postgresql",
name="user_reviews", # This would match "user"
num_rows=5,
num_columns=0,
variable_name=None,
columns=[],
),
],
)
database = Database(
name="test_db",
dialect="postgresql",
schemas=[schema1, schema2],
)
connection = MockDataSourceConnection(
name="test_conn",
dialect="postgresql",
databases=[database],
)
session = MockSession(
_session_view=MockSessionView(
data_connectors=DataSourceConnectionsNotification(
connections=[connection]
)
)
)
# Query that matches both schema name and individual table names
result = tool._get_tables(session, query="user")
# Should get all tables from the matching schema (2 tables)
# plus the matching table from the non-matching schema (1 table)
# Total: 3 tables, no duplicates
assert len(result.tables) == 3
# Verify no duplicates by checking unique combinations
table_identifiers = [
(t.connection, t.database, t.schema, t.table.name)
for t in result.tables
]
assert len(table_identifiers) == len(set(table_identifiers)), (
"Found duplicate tables"
)
# Verify we got the expected tables
table_names = [t.table.name for t in result.tables]
assert "user_profiles" in table_names
assert "user_settings" in table_names
assert "user_reviews" in table_names
def test_form_sample_query_full_qualified(tool: GetDatabaseTables):
"""Test forming a sample query with full qualified name (not default database or schema)."""
query = tool._form_sample_query(
database="mydb",
schema="myschema",
table="mytable",
default_database=False,
default_schema=False,
engine="postgres_conn",
)
assert (
query
== 'df = mo.sql(f"""SELECT * FROM mydb.myschema.mytable LIMIT 100""", engine=postgres_conn)'
)
def test_form_sample_query_default_database(tool: GetDatabaseTables):
"""Test forming a sample query when database is default (schema.table)."""
query = tool._form_sample_query(
database="mydb",
schema="myschema",
table="mytable",
default_database=True,
default_schema=False,
engine="mysql_conn",
)
assert (
query
== 'df = mo.sql(f"""SELECT * FROM myschema.mytable LIMIT 100""", engine=mysql_conn)'
)
def test_form_sample_query_default_schema(tool: GetDatabaseTables):
"""Test forming a sample query when schema is default (table only)."""
query = tool._form_sample_query(
database="mydb",
schema="myschema",
table="mytable",
default_database=False,
default_schema=True,
engine="postgres_conn",
)
assert (
query
== 'df = mo.sql(f"""SELECT * FROM mytable LIMIT 100""", engine=postgres_conn)'
)
def test_form_sample_query_both_defaults(tool: GetDatabaseTables):
"""Test forming a sample query when both database and schema are default."""
query = tool._form_sample_query(
database="mydb",
schema="myschema",
table="mytable",
default_database=True,
default_schema=True,
engine="mysql_conn",
)
assert (
query
== 'df = mo.sql(f"""SELECT * FROM mytable LIMIT 100""", engine=mysql_conn)'
)
def test_form_sample_query_internal_duckdb_no_defaults(
tool: GetDatabaseTables,
):
"""Test forming a sample query with internal DuckDB engine (no engine parameter)."""
query = tool._form_sample_query(
database="mydb",
schema="myschema",
table="mytable",
default_database=False,
default_schema=False,
engine=INTERNAL_DUCKDB_ENGINE,
)
assert (
query
== 'df = mo.sql(f"""SELECT * FROM mydb.myschema.mytable LIMIT 100""")'
)
def test_form_sample_query_internal_duckdb_with_defaults(
tool: GetDatabaseTables,
):
"""Test forming a sample query with internal DuckDB engine and both defaults."""
query = tool._form_sample_query(
database="mydb",
schema="myschema",
table="mytable",
default_database=True,
default_schema=True,
engine=INTERNAL_DUCKDB_ENGINE,
)
assert query == 'df = mo.sql(f"""SELECT * FROM mytable LIMIT 100""")'
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/tools/tools/test_datasource_tool.py",
"license": "Apache License 2.0",
"lines": 532,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_fuzzy_match.py | # Copyright 2026 Marimo. All rights reserved.
from marimo._utils.fuzzy_match import compile_regex, is_fuzzy_match
def test_compile_regex_valid_pattern():
"""Test _compile_regex with valid regex pattern."""
pattern, is_regex = compile_regex("^user.*")
assert pattern is not None
assert is_regex is True
assert pattern.search("users") is not None
assert pattern.search("orders") is None
def test_compile_regex_invalid_pattern():
"""Test _compile_regex with invalid regex pattern."""
pattern, is_regex = compile_regex("[invalid")
assert pattern is None
assert is_regex is False
def test_compile_regex_simple_text():
"""Test _compile_regex with simple text (valid regex)."""
pattern, is_regex = compile_regex("user")
assert pattern is not None
assert is_regex is True
assert pattern.search("users") is not None
assert pattern.search("orders") is None
def test_is_fuzzy_match_with_regex():
"""Test is_fuzzy_match with compiled regex pattern."""
pattern, is_regex = compile_regex("^user.*")
assert is_fuzzy_match("^user.*", "users", pattern, is_regex) is True
assert is_fuzzy_match("^user.*", "orders", pattern, is_regex) is False
def test_is_fuzzy_match_without_regex():
"""Test is_fuzzy_match with invalid regex (fallback to substring)."""
pattern, is_regex = compile_regex("[invalid")
assert is_fuzzy_match("[invalid", "users", pattern, is_regex) is False
assert is_fuzzy_match("[invalid", "[invalid", pattern, is_regex) is True
def test_is_fuzzy_match_case_insensitive():
"""Test that matching is case insensitive."""
pattern, is_regex = compile_regex("USER")
assert is_fuzzy_match("USER", "users", pattern, is_regex) is True
assert is_fuzzy_match("USER", "USERS", pattern, is_regex) is True
assert is_fuzzy_match("USER", "orders", pattern, is_regex) is False
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_fuzzy_match.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/test_file_manager_filename.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from pathlib import Path
import pytest
from marimo._session.notebook import AppFileManager
from marimo._utils.http import HTTPException
from tests.mocks import EDGE_CASE_FILENAMES
class TestAppFileManagerFilenames:
"""Test AppFileManager filename handling with unicode, spaces, and special characters."""
@pytest.mark.parametrize("filename", EDGE_CASE_FILENAMES)
def test_app_file_manager_operations_with_edge_case_filenames(
self, filename: str, tmp_path: Path
) -> None:
"""Test AppFileManager core operations with problematic filenames."""
file_path = tmp_path / filename
content = "import marimo as mo\n\napp = mo.App()\n\n@app.cell\ndef __():\n return\n"
# Create and test initialization
Path(file_path).write_text(content, encoding="utf-8")
file_manager = AppFileManager(file_path)
assert file_manager.filename == str(file_path)
assert file_manager.path == str(file_path)
assert file_manager.is_notebook_named
# Test reading
read_content = file_manager.read_file()
assert read_content == content
# Test rename to new problematic filename
new_filename = f"new_{filename}"
new_path = tmp_path / new_filename
file_manager.rename(str(new_path))
assert file_manager.filename == str(new_path)
assert Path(new_path).exists()
assert not Path(file_path).exists()
def test_app_file_manager_rename_collision_raises_error(
self, tmp_path: Path
) -> None:
"""Test that renaming to existing filename raises HTTPException."""
original_path = tmp_path / "original.py"
target_path = tmp_path / "café notebook.py"
# Create both files
Path(original_path).write_text("# Original", encoding="utf-8")
Path(target_path).write_text("# Target", encoding="utf-8")
file_manager = AppFileManager(original_path)
with pytest.raises(HTTPException) as exc_info:
file_manager.rename(str(target_path))
assert "already exists" in str(exc_info.value.detail)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/test_file_manager_filename.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/test_templates_filename.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
import pytest
from marimo._ast.app_config import _AppConfig
from marimo._config.config import MarimoConfig, PartialMarimoConfig
from marimo._server.templates.templates import (
_get_mount_config,
_html_escape,
json_script,
)
from marimo._server.tokens import SkewProtectionToken
from tests.mocks import EDGE_CASE_FILENAMES
class TestTemplateFilenameHandling:
"""Test template filename handling with unicode, spaces, and special characters."""
@pytest.mark.parametrize(
("filename", "expected_contains"),
[
# Basic cases
("test.py", "test.py"),
# HTML special characters that need escaping
("<script>alert('xss')</script>.py", "<script>"),
("test&example.py", "&"),
('test"quotes".py', """),
("test'quotes'.py", "'"),
# Unicode characters (should be preserved)
("tést.py", "tést.py"),
("café.py", "café.py"),
("测试.py", "测试.py"),
("🚀notebook.py", "🚀notebook.py"),
# Spaces (should be preserved)
("test file.py", "test file.py"),
("my notebook.py", "my notebook.py"),
# Mixed unicode and spaces
("café notebook.py", "café notebook.py"),
("测试 file.py", "测试 file.py"),
# Complex injection attempts
("test<>&'\"file.py", "<>&'""),
],
)
def test_html_escape_function(
self, filename: str, expected_contains: str
) -> None:
"""Test _html_escape function properly escapes HTML while preserving unicode."""
result = _html_escape(filename)
assert expected_contains in result
# Should not contain unescaped HTML
assert "<script>" not in result
assert "onerror=" not in result
@pytest.mark.parametrize(
"filename",
[*EDGE_CASE_FILENAMES, None],
)
def test_get_mount_config_filename_handling(
self, filename: str | None
) -> None:
"""Test _get_mount_config function with problematic filenames."""
server_token = SkewProtectionToken("test-token")
user_config = MarimoConfig()
config_overrides = PartialMarimoConfig()
app_config = _AppConfig()
result = _get_mount_config(
filename=filename,
mode="edit",
server_token=server_token,
user_config=user_config,
config_overrides=config_overrides,
app_config=app_config,
)
# Remove the last ','
last_comma_index = result.rfind(",")
result = result[:last_comma_index] + result[last_comma_index + 1 :]
# Should be valid JSON
config_data = json.loads(result)
# Filename should be properly handled
expected_filename = filename or ""
assert config_data["filename"] == expected_filename
class TestJsonScriptEscaping:
"""Test json_script() function for script tag breakout prevention."""
@pytest.mark.parametrize(
"payload",
[
# Script tag breakout attempts
"</script><script>alert('XSS')</script>",
"<script>alert(1)</script>",
"</script><img src=x onerror=alert(1)>",
"<img src=x onerror=alert(1)>",
# JavaScript string breakout attempts
"'; alert(1); //",
'"; alert(1); //',
# Raw script tags
"</script>",
"<script>",
# Combinations
">&<",
"</SCRIPT><SCRIPT>alert(1)</SCRIPT>",
# With valid content mixed in
"normal text</script><script>alert(1)</script>more text",
],
)
def test_script_breakout_prevention(self, payload: str) -> None:
"""Verify dangerous characters are escaped to prevent script tag breakout."""
result = json_script({"malicious": payload})
# Must not contain literal < or > that could break out of script tag
# Note: json_script only escapes <, >, & - other strings are safe in JSON context
assert "<script>" not in result.lower()
assert "</script>" not in result.lower()
# Must contain escaped versions of dangerous chars
# json_script escapes <, >, & to \uXXXX format
if "<" in payload or ">" in payload or "&" in payload:
assert (
"\\u003C" in result
or "\\u003E" in result
or "\\u0026" in result
)
# Must be valid JSON that round-trips correctly
parsed = json.loads(result)
assert parsed["malicious"] == payload
@pytest.mark.parametrize(
("data", "must_not_contain"),
[
# Nested structures with malicious content
(
{"nested": {"deep": "</script><script>alert(1)</script>"}},
["</script>", "<script>"],
),
# Arrays with malicious content
(
{"items": ["</script>", "<script>", "&"]},
["</script>", "<script>"],
),
# Multiple fields with different injection attempts
(
{
"field1": "</script>",
"field2": "<img src=x onerror=alert(1)>",
"field3": "normal & text",
},
["</script>", "<img"],
),
],
)
def test_complex_structure_escaping(
self, data: dict, must_not_contain: list[str]
) -> None:
"""Verify json_script escapes dangerous chars in complex nested structures."""
result = json_script(data)
# Must not contain any unescaped dangerous sequences
for dangerous in must_not_contain:
assert dangerous not in result
# Must be valid JSON that round-trips correctly
parsed = json.loads(result)
assert parsed == data
@pytest.mark.parametrize(
"data",
[
# Unicode characters
{"text": "café"},
{"text": "测试"},
{"text": "🚀"},
# Unicode mixed with dangerous characters
{"text": "café</script>"},
{"text": "测试<script>alert(1)</script>"},
# Combining characters
{"text": "e\u0301"}, # é as combining character
# Right-to-left marks
{"text": "\u200f"},
# Zero-width characters
{"text": "test\u200bword"},
],
)
def test_unicode_handling(self, data: dict) -> None:
"""Verify unicode characters are handled correctly without bypassing escaping."""
result = json_script(data)
# Must not contain unescaped dangerous sequences
assert "</script>" not in result
assert "<script>" not in result
# Must be valid JSON that preserves unicode
parsed = json.loads(result)
assert parsed == data
def test_json_validity_after_escaping(self) -> None:
"""Verify json_script produces valid JSON that JavaScript can parse."""
test_data = {
"string": "test</script>",
"number": 42,
"float": 3.14,
"bool": True,
"null": None,
"array": [1, 2, 3],
"nested": {"key": "value<script>"},
}
result = json_script(test_data)
# Must be valid JSON
parsed = json.loads(result)
assert parsed == test_data
# Must maintain sort_keys=True behavior
keys_order = list(parsed.keys())
assert keys_order == sorted(keys_order)
@pytest.mark.parametrize(
"xss_payload",
[
# OWASP XSS cheat sheet payloads
"<svg/onload=alert(1)>",
"<iframe src=javascript:alert(1)>",
"<object data=javascript:alert(1)>",
"<embed src=javascript:alert(1)>",
"<body onload=alert(1)>",
"javascript:alert(1)",
"data:text/html,<script>alert(1)</script>",
# PortSwigger XSS payloads
"<img src=x onerror=alert(document.domain)>",
"<svg><script>alert(1)</script></svg>",
# Encoded variations
"</script><script>alert(String.fromCharCode(88,83,83))</script>",
# Case variations
"</ScRiPt><ScRiPt>alert(1)</ScRiPt>",
],
)
def test_real_world_xss_payloads(self, xss_payload: str) -> None:
"""Test json_script against real-world XSS attack payloads."""
result = json_script({"payload": xss_payload})
# Must not contain unescaped < or > that could break out of script tags
# json_script specifically prevents script tag breakout by escaping <, >, &
assert "<script" not in result.lower()
# Must escape dangerous characters < and >
if "<" in xss_payload or ">" in xss_payload:
assert "\\u003C" in result or "\\u003E" in result
# Must be valid JSON
parsed = json.loads(result)
assert parsed["payload"] == xss_payload
class TestMountConfigInjectionPrevention:
"""Test _get_mount_config prevents injection attacks in all fields."""
@pytest.mark.parametrize(
"malicious_filename",
[
"</script><script>alert('XSS')</script>.py",
"<script>alert(1)</script>.py",
"test</script>.py",
"<img src=x onerror=alert(1)>.py",
],
)
def test_filename_injection_prevention(
self, malicious_filename: str
) -> None:
"""Test that malicious filenames don't enable script breakout in mount config."""
server_token = SkewProtectionToken("test-token")
user_config = MarimoConfig()
config_overrides = PartialMarimoConfig()
app_config = _AppConfig()
result = _get_mount_config(
filename=malicious_filename,
mode="edit",
server_token=server_token,
user_config=user_config,
config_overrides=config_overrides,
app_config=app_config,
)
# Remove trailing comma for JSON parsing
last_comma_index = result.rfind(",")
result = result[:last_comma_index] + result[last_comma_index + 1 :]
# Must not contain unescaped script tags (< and > should be escaped)
assert "</script><script>" not in result
assert "<script>" not in result.lower()
# Must be valid JSON
config_data = json.loads(result)
assert config_data["filename"] == malicious_filename
# Verify the actual output contains escaped versions of < and >
assert "\\u003C" in result or "\\u003E" in result
@pytest.mark.parametrize(
"malicious_title",
[
"</script><script>alert(1)</script>",
"<img src=x onerror=alert(1)>",
"normal title</script>",
],
)
def test_app_title_injection_prevention(
self, malicious_title: str
) -> None:
"""Test that malicious app titles don't enable script breakout."""
server_token = SkewProtectionToken("test-token")
user_config = MarimoConfig()
config_overrides = PartialMarimoConfig()
app_config = _AppConfig(app_title=malicious_title)
result = _get_mount_config(
filename="test.py",
mode="edit",
server_token=server_token,
user_config=user_config,
config_overrides=config_overrides,
app_config=app_config,
)
# Remove trailing comma
last_comma_index = result.rfind(",")
result = result[:last_comma_index] + result[last_comma_index + 1 :]
# Must not contain unescaped script tags (< and > should be escaped)
assert "</script><script>" not in result
# Must be valid JSON
config_data = json.loads(result)
# Verify escaping in output (< and > are escaped)
if "<" in malicious_title or ">" in malicious_title:
assert "\\u003C" in result or "\\u003E" in result
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/test_templates_filename.py",
"license": "Apache License 2.0",
"lines": 302,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/test_utils_filename.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
import pytest
from marimo._session.notebook.file_manager import canonicalize_filename
class TestCanonicalizeFilename:
"""Test filename canonicalization with unicode, spaces, and special characters."""
@pytest.mark.parametrize(
("input_filename", "should_add_py"),
[
# Basic cases
("test", True),
("test.py", False),
("test.md", False),
("test.qmd", False),
# Unicode characters
("tést", True),
("café.py", False),
("测试", True),
("🚀notebook.py", False),
# Spaces
("test file", True),
("my notebook.py", False),
# Mixed unicode and spaces
("café notebook", True),
("测试 file.py", False),
# Special characters
("test-file", True),
("test_file.py", False),
# Edge cases
("", True),
(".", True),
# User paths
("~/test", True),
("~/test.py", False),
("~/café notebook", True),
("~/测试 file.py", False),
],
)
def test_canonicalize_filename(
self, input_filename: str, should_add_py: bool
) -> None:
"""Test that canonicalize_filename handles problematic filenames correctly."""
result = canonicalize_filename(input_filename)
# Should expand user path
if should_add_py:
expected = os.path.expanduser(input_filename + ".py")
else:
expected = os.path.expanduser(input_filename)
assert result == expected
# Should not contain ~ in the result if it was there
if "~" in input_filename:
assert "~" not in result
# Result should be a valid string
assert isinstance(result, str)
assert len(result) > 0
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/test_utils_filename.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_files/star_import.py | import marimo
__generated_with = "0.15.2"
app = marimo.App()
@app.cell
def _():
# This should trigger MB005 - syntax error with star import hint
from math import *
result = sin(pi / 2)
return result,
@app.cell
def _():
from pandas import *
df = DataFrame({"a": [1, 2, 3]})
return df,
@app.cell
def _():
# This should NOT trigger syntax error - normal import
import numpy as np
arr = np.array([1, 2, 3])
return arr,
@app.cell
def _():
# This should NOT trigger syntax error - specific imports
from typing import List, Dict
data: List[Dict[str, int]] = [{"x": 1}, {"y": 2}]
return data,
@app.cell
def _():
# Another star import but
# Pad out import statement
# with
# super
# comments
from os import *
current_dir = getcwd()
return current_dir,
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/star_import.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_ai/_tools/tools/tables_and_variables.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional
from marimo._ai._tools.base import ToolBase
from marimo._ai._tools.types import SuccessResult, ToolGuidelines
from marimo._data.models import DataTableColumn
from marimo._messaging.notification import VariableValue
from marimo._session import Session
from marimo._types.ids import SessionId
@dataclass
class TablesAndVariablesArgs:
session_id: SessionId
variable_names: list[str]
@dataclass
class DataTableMetadata:
"""
Metadata about a data table.
source: str - Can be dialect, or source db name.
engine: str - The engine or connection handler of the data table.
num_rows: int - The number of rows in the data table.
num_columns: int - The number of columns in the data table.
columns: list[DataTableColumn] - The columns in the data table.
primary_keys: Optional[list[str]] - The primary keys of the data table.
indexes: Optional[list[str]] - The indexes of the data table.
"""
source: str
num_rows: Optional[int]
num_columns: Optional[int]
columns: list[DataTableColumn]
engine: Optional[str]
primary_keys: Optional[list[str]]
indexes: Optional[list[str]]
@dataclass
class TablesAndVariablesOutput(SuccessResult):
tables: dict[str, DataTableMetadata] = field(default_factory=dict)
variables: dict[str, VariableValue] = field(default_factory=dict)
class GetTablesAndVariables(
ToolBase[TablesAndVariablesArgs, TablesAndVariablesOutput]
):
"""
Get tables and variables information in the session.
When provided with a list of variable names, it will return information about the variables and tables mentioned.
If an empty list is provided, it will return information about all tables and variables.
Returns:
A success result containing tables (columns, primary keys, indexes, engine, etc.) and variables (value, data type).
"""
guidelines = ToolGuidelines(
when_to_use=[
"When inspecting in-memory DataFrames or Python variables in the notebook",
"Before suggesting data operations to understand available data",
],
prerequisites=[
"You must have a valid session id from an active notebook",
],
avoid_if=[
"the user is asking about database tables or data sources, use the get_database_tables tool instead",
],
)
def handle(self, args: TablesAndVariablesArgs) -> TablesAndVariablesOutput:
session = self.context.get_session(args.session_id)
return self._get_tables_and_variables(session, args.variable_names)
def _get_tables_and_variables(
self, session: Session, variable_names: list[str]
) -> TablesAndVariablesOutput:
session_view = session.session_view
# convert to set for O(1) lookup
variable_names_set = set(variable_names)
return_all_vars = variable_names_set == set()
tables = session_view.datasets.tables
variables = session_view.variable_values
filtered_tables = (
tables
if return_all_vars
else filter(lambda table: table.name in variable_names_set, tables)
)
filtered_variables = (
variables
if return_all_vars
else filter(
lambda variable: variable in variable_names_set, variables
)
)
data_tables: dict[str, DataTableMetadata] = {}
for table in filtered_tables:
data_tables[table.name] = DataTableMetadata(
source=table.source,
num_rows=table.num_rows,
num_columns=table.num_columns,
columns=table.columns,
primary_keys=table.primary_keys,
indexes=table.indexes,
engine=table.engine,
)
notebook_variables: dict[str, VariableValue] = {}
for variable_name in filtered_variables:
value = variables[variable_name]
notebook_variables[variable_name] = VariableValue(
name=variable_name,
value=value.value,
datatype=value.datatype,
)
return TablesAndVariablesOutput(
tables=data_tables, variables=notebook_variables
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/tools/tables_and_variables.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ai/_tools/tools_registry.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any
from marimo._ai._tools.base import ToolBase
from marimo._ai._tools.tools.cells import (
GetCellOutputs,
GetCellRuntimeData,
GetLightweightCellMap,
)
from marimo._ai._tools.tools.datasource import GetDatabaseTables
from marimo._ai._tools.tools.dependency_graph import GetCellDependencyGraph
from marimo._ai._tools.tools.errors import GetNotebookErrors
from marimo._ai._tools.tools.lint import LintNotebook
from marimo._ai._tools.tools.notebooks import GetActiveNotebooks
from marimo._ai._tools.tools.rules import GetMarimoRules
from marimo._ai._tools.tools.tables_and_variables import GetTablesAndVariables
SUPPORTED_BACKEND_AND_MCP_TOOLS: list[type[ToolBase[Any, Any]]] = [
GetMarimoRules,
GetActiveNotebooks,
GetCellRuntimeData,
GetCellOutputs,
GetLightweightCellMap,
GetTablesAndVariables,
GetDatabaseTables,
GetNotebookErrors,
LintNotebook,
GetCellDependencyGraph,
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/tools_registry.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ai/tools/tools/test_tables_variables.py | from __future__ import annotations
from dataclasses import dataclass, field
import pytest
from marimo._ai._tools.base import ToolContext
from marimo._ai._tools.tools.tables_and_variables import (
DataTableMetadata,
GetTablesAndVariables,
TablesAndVariablesOutput,
)
from marimo._data.models import DataTableColumn
from marimo._messaging.notification import VariableValue
from marimo._session import Session
@dataclass
class MockDataset:
name: str
source: str
num_rows: int
num_columns: int
columns: list[DataTableColumn]
engine: str | None = None
primary_keys: list[str] | None = None
indexes: list[str] | None = None
@dataclass
class MockDatasets:
tables: list[MockDataset] = field(default_factory=list)
@dataclass
class MockSessionView:
datasets: MockDatasets
variable_values: dict[str, VariableValue] = field(default_factory=dict)
@dataclass
class MockSession(Session):
session_view: MockSessionView
@pytest.fixture
def tool() -> GetTablesAndVariables:
"""Create a GetTablesAndVariables tool instance."""
return GetTablesAndVariables(ToolContext())
@pytest.fixture
def sample_columns() -> list[DataTableColumn]:
"""Sample column information for testing."""
return [
DataTableColumn("id", "integer", "INTEGER", [1, 2, 3]),
DataTableColumn("name", "string", "VARCHAR", ["Alice", "Bob"]),
DataTableColumn("email", "string", "VARCHAR", ["alice@example.com"]),
]
@pytest.fixture
def sample_tables(sample_columns: list[DataTableColumn]) -> list[MockDataset]:
"""Sample table data for testing."""
return [
MockDataset(
name="users",
source="database",
num_rows=100,
num_columns=3,
columns=sample_columns,
primary_keys=["id"],
indexes=["idx_name"],
),
MockDataset(
name="orders",
source="csv",
num_rows=50,
num_columns=2,
columns=[
DataTableColumn("order_id", "integer", "INTEGER", [1, 2]),
DataTableColumn("user_id", "integer", "INTEGER", [1, 2]),
],
),
]
@pytest.fixture
def sample_variables() -> dict[str, VariableValue]:
"""Sample variable data for testing."""
return {
"x": VariableValue("x", "42", "integer"),
"y": VariableValue("y", "hello", "string"),
"df": VariableValue("df", None, "DataFrame"),
"my_list": VariableValue("my_list", "[1, 2, 3]", "list"),
}
@pytest.fixture
def sample_session(
sample_tables: list[MockDataset],
sample_variables: dict[str, VariableValue],
) -> MockSession:
"""Sample session with tables and variables."""
return MockSession(
MockSessionView(
datasets=MockDatasets(tables=sample_tables),
variable_values=sample_variables,
)
)
def test_get_tables_and_variables_empty_list(
tool: GetTablesAndVariables, sample_session: MockSession
):
"""Test _get_tables_and_variables with empty variable names list (return all)."""
result = tool._get_tables_and_variables(sample_session, [])
assert isinstance(result, TablesAndVariablesOutput)
assert len(result.tables) == 2
assert len(result.variables) == 4
# Check tables
assert "users" in result.tables
assert "orders" in result.tables
users_table = result.tables["users"]
assert users_table.source == "database"
assert users_table.num_rows == 100
assert users_table.num_columns == 3
assert users_table.primary_keys == ["id"]
assert users_table.indexes == ["idx_name"]
assert len(users_table.columns) == 3
# Check variables
assert "x" in result.variables
assert "y" in result.variables
assert "df" in result.variables
assert "my_list" in result.variables
x_var = result.variables["x"]
assert x_var.name == "x"
assert x_var.value == "42"
assert x_var.datatype == "integer"
def test_get_tables_and_variables_specific_variables(
tool: GetTablesAndVariables, sample_session: MockSession
):
"""Test _get_tables_and_variables with specific variable names."""
result = tool._get_tables_and_variables(
sample_session, ["users", "x", "y"]
)
assert isinstance(result, TablesAndVariablesOutput)
assert len(result.tables) == 1 # Only "users" table
assert len(result.variables) == 2 # Only "x" and "y" variables
# Check that only requested items are returned
assert "users" in result.tables
assert "orders" not in result.tables
assert "x" in result.variables
assert "y" in result.variables
assert "df" not in result.variables
assert "my_list" not in result.variables
def test_get_tables_and_variables_nonexistent_variables(
tool: GetTablesAndVariables, sample_session: MockSession
):
"""Test _get_tables_and_variables with non-existent variable names."""
result = tool._get_tables_and_variables(
sample_session, ["nonexistent_table", "nonexistent_var"]
)
assert isinstance(result, TablesAndVariablesOutput)
assert len(result.tables) == 0
assert len(result.variables) == 0
def test_get_tables_and_variables_mixed_existing_nonexistent(
tool: GetTablesAndVariables, sample_session: MockSession
):
"""Test _get_tables_and_variables with mix of existing and non-existent variables."""
result = tool._get_tables_and_variables(
sample_session, ["users", "nonexistent_table", "x", "nonexistent_var"]
)
assert isinstance(result, TablesAndVariablesOutput)
assert len(result.tables) == 1 # Only "users" table exists
assert len(result.variables) == 1 # Only "x" variable exists
assert "users" in result.tables
assert "x" in result.variables
def test_data_table_metadata_structure(
tool: GetTablesAndVariables, sample_session: MockSession
):
"""Test that DataTableMetadata is properly structured."""
result = tool._get_tables_and_variables(sample_session, ["users"])
users_table = result.tables["users"]
assert isinstance(users_table, DataTableMetadata)
assert users_table.source == "database"
assert users_table.num_rows == 100
assert users_table.num_columns == 3
assert users_table.primary_keys == ["id"]
assert users_table.indexes == ["idx_name"]
# Check column structure
assert len(users_table.columns) == 3
id_column = users_table.columns[0]
assert isinstance(id_column, DataTableColumn)
assert id_column.name == "id"
assert id_column.type == "integer"
assert id_column.external_type == "INTEGER"
assert id_column.sample_values == [1, 2, 3]
def test_empty_session(tool: GetTablesAndVariables):
"""Test _get_tables_and_variables with empty session (no tables or variables)."""
empty_session = MockSession(
MockSessionView(datasets=MockDatasets(tables=[]), variable_values={})
)
result = tool._get_tables_and_variables(empty_session, [])
assert isinstance(result, TablesAndVariablesOutput)
assert len(result.tables) == 0
assert len(result.variables) == 0
def test_table_with_no_primary_keys_or_indexes(tool: GetTablesAndVariables):
"""Test table with no primary keys or indexes."""
table_without_keys = MockDataset(
name="simple_table",
source="json",
num_rows=10,
num_columns=2,
columns=[
DataTableColumn("col1", "string", "TEXT", ["a", "b"]),
DataTableColumn("col2", "integer", "INTEGER", [1, 2]),
],
primary_keys=None,
indexes=None,
)
session = MockSession(
MockSessionView(
datasets=MockDatasets(tables=[table_without_keys]),
variable_values={},
)
)
result = tool._get_tables_and_variables(session, ["simple_table"])
simple_table = result.tables["simple_table"]
assert simple_table.primary_keys is None
assert simple_table.indexes is None
def test_variable_with_none_value(tool: GetTablesAndVariables):
"""Test variable with None value."""
variables_with_none = {
"none_var": VariableValue("none_var", None, "NoneType"),
}
session = MockSession(
MockSessionView(
datasets=MockDatasets(tables=[]),
variable_values=variables_with_none,
)
)
result = tool._get_tables_and_variables(session, ["none_var"])
none_var = result.variables["none_var"]
assert none_var.name == "none_var"
assert none_var.value is None
assert none_var.datatype == "NoneType"
def test_filtering_logic_separate_tables_and_variables(
tool: GetTablesAndVariables, sample_session: MockSession
):
"""Test that filtering works correctly for both tables and variables separately."""
# Request only table names (no matching variables)
result = tool._get_tables_and_variables(
sample_session, ["users", "orders"]
)
assert len(result.tables) == 2
assert len(result.variables) == 0
# Request only variable names (no matching tables)
result = tool._get_tables_and_variables(sample_session, ["x", "y"])
assert len(result.tables) == 0
assert len(result.variables) == 2
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/tools/tools/test_tables_variables.py",
"license": "Apache License 2.0",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_lint/rules/formatting/empty_cells.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import ast
from typing import TYPE_CHECKING
from marimo._ast.parse import ast_parse
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.rules.base import UnsafeFixRule
from marimo._schemas.serialization import NotebookSerialization
from marimo._types.ids import CellId_t
if TYPE_CHECKING:
from marimo._lint.context import RuleContext
class EmptyCellRule(UnsafeFixRule):
"""MF004: Empty cells that can be safely removed.
This rule identifies cells that contain only whitespace, comments, or `pass`
statements and can be safely removed from the notebook without affecting
functionality. Empty cells often accumulate during development and can
clutter the notebook structure.
## What it does
Detects cells that contain only:
- Whitespace characters (spaces, tabs, newlines)
- Comments (lines starting with #)
- Pass statements (`pass`)
- Any combination of the above
## Why is this bad?
Empty cells can:
- Create clutter in notebook structure
- Add unnecessary complexity to the execution graph
- Make notebooks harder to read and maintain
- Increase file size without adding value
While not functionally breaking, removing empty cells improves code
clarity and reduces visual noise.
## Examples
**Problematic:**
```python
# Cell 1: Only whitespace
```
**Problematic:**
```python
# Cell 2: Only comments
# This is just a comment
# Nothing else here
```
**Problematic:**
```python
# Cell 3: Only pass statement
pass
```
**Problematic:**
```python
# Cell 4: Mix of comments, whitespace, and pass
# Some comment
pass
# Another comment
```
**Note:** This fix requires `--unsafe-fixes` because removing cells changes
the notebook structure, and potentially removes user-intended content.
## References
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
- [Best Practices](https://docs.marimo.io/guides/best_practices/)
"""
code = "MF004"
name = "empty-cells"
description = "Empty cells that can be safely removed."
severity = Severity.FORMATTING
fixable = "unsafe"
async def check(self, ctx: RuleContext) -> None:
"""Check for empty cells that can be removed."""
for i, cell in enumerate(ctx.notebook.cells):
if self._is_empty_cell(cell.code):
# Create diagnostic for this empty cell
idx = CellId_t(str(i))
diagnostic = Diagnostic(
message="Empty cell can be removed (contains only whitespace, comments, or pass)",
cell_id=[idx],
line=cell.lineno - 1, # Convert 1-based to 0-based
column=cell.col_offset,
fixable="unsafe",
)
await ctx.add_diagnostic(diagnostic)
def apply_unsafe_fix(
self, notebook: NotebookSerialization, diagnostics: list[Diagnostic]
) -> NotebookSerialization:
"""Remove empty cells from the notebook.
Args:
notebook: The notebook to modify
diagnostics: List of diagnostics containing cell information
Returns:
Modified notebook with empty cells removed
"""
# Collect all cell IDs to remove from all diagnostics
cells_to_remove: set[CellId_t] = set()
for diagnostic in diagnostics:
if diagnostic.cell_id is not None:
(cell_id,) = diagnostic.cell_id
cells_to_remove.add(cell_id)
# Remove cells with matching IDs
cells = [
cell
for i, cell in enumerate(notebook.cells)
if CellId_t(str(i)) not in cells_to_remove
]
return NotebookSerialization(
header=notebook.header,
version=notebook.version,
app=notebook.app,
cells=cells,
violations=notebook.violations,
valid=notebook.valid,
filename=notebook.filename,
)
def _is_empty_cell(self, code: str) -> bool:
"""Check if a cell is considered empty.
Args:
code: The cell's source code
Returns:
True if the cell is empty (contains only whitespace, comments, or pass)
"""
# Strip whitespace
stripped = code.strip()
# Empty after stripping whitespace
if not stripped:
return True
try:
# Parse the code to check what statements it contains
tree = ast_parse(stripped)
# If no statements, it's empty
if not tree.body:
return True
# Check if all statements are pass statements
for node in tree.body:
if not isinstance(node, ast.Pass):
return False
return True
except SyntaxError:
# If it doesn't parse, check if it's only comments
return self._is_only_comments(code)
def _is_only_comments(self, code: str) -> bool:
"""Check if code contains only comments and whitespace.
Args:
code: The source code to check
Returns:
True if the code contains only comments and whitespace
"""
lines = code.splitlines()
for line in lines:
stripped_line = line.strip()
# Skip empty lines
if not stripped_line:
continue
# If line doesn't start with #, it's not just a comment
if not stripped_line.startswith("#"):
return False
return True
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/rules/formatting/empty_cells.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:tests/_lint/test_empty_cells.py | # Copyright 2026 Marimo. All rights reserved.
"""Snapshot tests for empty cells lint rule."""
from marimo._ast.parse import parse_notebook
from tests._lint.utils import lint_notebook
from tests.mocks import snapshotter
snapshot = snapshotter(__file__)
def test_empty_cells_detection_snapshot():
"""Test snapshot for empty cells detection."""
file = "tests/_lint/test_files/empty_cells.py"
with open(file) as f:
code = f.read()
notebook = parse_notebook(code, filepath=file)
errors = lint_notebook(notebook)
# Format errors for snapshot
error_output = []
for error in errors:
error_output.append(error.format())
snapshot("empty_cells.txt", "\n".join(error_output))
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_empty_cells.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_files/empty_cells.py | import marimo
__generated_with = "0.15.5"
app = marimo.App()
@app.cell
def _():
return
@app.cell
def has_pass():
# This is just a comment
pass
return
@app.cell
def has_comment():
# Only comment
# Another comment
return
@app.cell
def has_mix():
# Only whitespace and comment
pass
return
@app.cell
def _empty_cell_with_just_whitespace():
return
@app.cell
def normal_cell():
x = 1
return x,
if __name__ == "__main__":
app.run() | {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/empty_cells.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_messaging/test_serde.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
import msgspec
import pytest
from marimo._messaging.notification import (
AlertNotification,
CompletedRunNotification,
InterruptedNotification,
)
from marimo._messaging.serde import (
deserialize_kernel_message,
deserialize_kernel_notification_name,
serialize_kernel_message,
)
from marimo._messaging.types import KernelMessage
class TestSerializeKernelMessage:
def test_serialize_interrupted(self) -> None:
"""Test serializing an Interrupted message."""
message = InterruptedNotification()
result = serialize_kernel_message(message)
assert isinstance(result, bytes)
parsed = json.loads(result.decode())
assert parsed["op"] == "interrupted"
def test_serialize_alert(self) -> None:
"""Test serializing an Alert message."""
message = AlertNotification(
title="Test Alert",
description="This is a test alert",
variant="danger",
)
result = serialize_kernel_message(message)
assert isinstance(result, bytes)
parsed = json.loads(result.decode())
assert parsed["op"] == "alert"
assert parsed["title"] == "Test Alert"
assert parsed["description"] == "This is a test alert"
assert parsed["variant"] == "danger"
class TestDeserializeKernelMessage:
def test_deserialize_interrupted(self) -> None:
"""Test deserializing an Interrupted message."""
message_dict = {"op": "interrupted"}
kernel_message = KernelMessage(json.dumps(message_dict).encode())
result = deserialize_kernel_message(kernel_message)
assert isinstance(result, InterruptedNotification)
def test_deserialize_alert(self) -> None:
"""Test deserializing an Alert message."""
message_dict = {
"op": "alert",
"title": "Test Alert",
"description": "This is a test alert",
"variant": "danger",
}
kernel_message = KernelMessage(json.dumps(message_dict).encode())
result = deserialize_kernel_message(kernel_message)
assert isinstance(result, AlertNotification)
assert result.title == "Test Alert"
assert result.description == "This is a test alert"
assert result.variant == "danger"
def test_deserialize_invalid_json(self) -> None:
"""Test deserializing invalid JSON should raise an error."""
invalid_message = KernelMessage(b"invalid json")
with pytest.raises(msgspec.DecodeError):
assert deserialize_kernel_message(invalid_message)
def test_deserialize_unknown_message_type(self) -> None:
"""Test deserializing unknown message type should raise an error."""
message_dict = {"name": "unknown-message-type", "data": "some data"}
kernel_message = KernelMessage(json.dumps(message_dict).encode())
with pytest.raises(msgspec.ValidationError):
assert deserialize_kernel_message(kernel_message)
def test_deserialize_missing_required_fields(self) -> None:
"""Test deserializing message with missing required fields should raise an error."""
message_dict = {
"name": "kernel-ready"
# Missing required fields: cell_ids, codes
}
kernel_message = KernelMessage(json.dumps(message_dict).encode())
with pytest.raises(msgspec.ValidationError):
assert deserialize_kernel_message(kernel_message)
class TestRoundTripSerialization:
def test_round_trip_interrupted(self) -> None:
"""Test round-trip for Interrupted message."""
original = InterruptedNotification()
serialized = serialize_kernel_message(original)
deserialized = deserialize_kernel_message(serialized)
assert isinstance(deserialized, InterruptedNotification)
def test_round_trip_alert(self) -> None:
"""Test round-trip for Alert message."""
original = AlertNotification(
title="Warning",
description="Something went wrong",
variant="danger",
)
serialized = serialize_kernel_message(original)
deserialized = deserialize_kernel_message(serialized)
assert isinstance(deserialized, AlertNotification)
assert deserialized.title == original.title
assert deserialized.description == original.description
assert deserialized.variant == original.variant
def test_round_trip_with_unicode(self) -> None:
"""Test round-trip with unicode characters."""
original = AlertNotification(
title="测试标题",
description="Тестовое описание with émojis 🚀",
variant="danger",
)
serialized = serialize_kernel_message(original)
deserialized = deserialize_kernel_message(serialized)
assert isinstance(deserialized, AlertNotification)
assert deserialized.title == original.title
assert deserialized.description == original.description
assert deserialized.variant == original.variant
def test_deserialize_kernel_notification_name() -> None:
original = CompletedRunNotification()
serialized = serialize_kernel_message(original)
assert deserialize_kernel_notification_name(serialized) == "completed-run"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_messaging/test_serde.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_data/test_models.py | from marimo._data.models import DataTableColumn
from tests.utils import assert_serialize_roundtrip
def test_data_table_column_post_init() -> None:
column = DataTableColumn(
name=123,
type="string",
external_type="string",
sample_values=[],
)
assert column.name == "123"
assert_serialize_roundtrip(column)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_data/test_models.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_lint/rules/breaking/syntax_error.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from marimo._ast.errors import ImportStarError
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.rules.base import LintRule
if TYPE_CHECKING:
from marimo._lint.context import RuleContext
from marimo._schemas.serialization import CellDef
IMPORT_STAR_ERROR_MESSAGE = (
"Importing symbols with `import *` is not allowed in marimo."
)
IMPORT_STAR_HINT = (
"Star imports are incompatible with marimo's reactive execution. Use "
"'import module' and access members with dot notation instead. See: "
"https://docs.marimo.io/guides/understanding_errors/import_star/"
)
class SyntaxErrorRule(LintRule):
"""MB005: Cell contains code that throws a SyntaxError on compilation.
This rule detects cells that contain Python code with syntax errors that
prevent compilation. Unlike unparsable cells (MB001), these cells can be
parsed but fail when Python tries to compile them into executable code.
## What it does
Attempts to compile each cell using marimo's internal compiler and catches any
SyntaxError exceptions that occur during the compilation process.
## Why is this bad?
Cells with syntax errors cannot be executed, making the notebook non-functional.
SyntaxErrors prevent marimo from creating the dependency graph and running the
reactive execution system, breaking the core functionality of the notebook.
## Examples
**Problematic:**
```python
# Invalid indentation
if True:
print("Hello") # Missing indentation
```
**Problematic:**
```python
# Invalid syntax
x = 1 + # Missing operand
```
**Problematic:**
```python
# Mismatched brackets
my_list = [1, 2, 3 # Missing closing bracket
```
**Solution:**
```python
# Fix indentation
if True:
print("Hello") # Proper indentation
```
**Solution:**
```python
# Complete expressions
x = 1 + 2 # Complete arithmetic expression
```
**Solution:**
```python
# Match brackets
my_list = [1, 2, 3] # Proper closing bracket
```
## References
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
- [Python SyntaxError Documentation](https://docs.python.org/3/tutorial/errors.html#syntax-errors)
"""
code = "MB005"
name = "invalid-syntax"
description = "Cell contains code that throws a SyntaxError on compilation"
severity = Severity.BREAKING
fixable = False
async def check(self, ctx: RuleContext) -> None:
"""Check for syntax errors during compilation."""
for e, cell in ctx.get_errors("SyntaxError"):
if isinstance(e, ImportStarError):
line, column = _handle_import_star_error(e, cell)
await ctx.add_diagnostic(
Diagnostic(
message=IMPORT_STAR_ERROR_MESSAGE,
line=line,
column=column,
fix=IMPORT_STAR_HINT,
)
)
else:
# Handle SyntaxError specifically
if isinstance(e, SyntaxError):
message = f"{e.msg}"
line = cell.lineno + (e.lineno or 1) - 1
column = e.offset or 1
else:
# For other exceptions, use string representation
message = str(e)
line = cell.lineno
column = 1
await ctx.add_diagnostic(
Diagnostic(
message=message,
line=line,
column=column,
fix=_get_known_hints(message),
)
)
def _handle_import_star_error(
e: ImportStarError, cell: CellDef
) -> tuple[int, int]:
"""Handle ImportStarError and extract correct line number and clean message."""
import re
message_str = str(e)
# The message format is "line {lineno} SyntaxError: ..." Extract the
# relative line number and compute actual line
actual_line = None
if "..." not in message_str:
line_match = re.match(r"line (\d+)", message_str)
if line_match:
relative_line = int(line_match.group(1))
actual_line = cell.lineno + relative_line - 1
if actual_line is None:
actual_line = cell.lineno
# Find the * in the cell source
star_index = cell.code.find("*")
if star_index != -1:
# Count newlines before the star to get the line number
actual_line += cell.code[:star_index].count("\n")
# Clean message without "SyntaxError:" prefix
column = getattr(e, "offset", 1) or 1
return actual_line, column
def _get_known_hints(message: str) -> str | None:
if message == "'return' outside function":
return (
"marimo cells are not normal Python functions; treat cell bodies "
"as top-level code, or use `@app.function` to define a pure "
"function."
)
return None
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/rules/breaking/syntax_error.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_lint/test_files/syntax_errors.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def global_error():
tickers = ["AAPL", "GOOGL"]
global tickers #
@app.cell
def return_error(tickers):
if tickers is not None:
return tickers
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/syntax_errors.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:scripts/generate_lint_docs.py | # /// script
# requires-python = ">=3.13,<3.14"
# dependencies = []
#
# [tool.uv]
# exclude-newer = "2025-06-27T12:38:25.742953-04:00"
# ///
"""Generate documentation for marimo's lint rules.
This script automatically generates comprehensive documentation for all lint rules
in the marimo codebase, including:
- Main rules index page with categorized listings
- Individual rule pages with detailed explanations
- Validation of rule metadata and structure
Inspired by Ruff's documentation structure but adapted for marimo's style.
"""
from __future__ import annotations
import ast
import re
import textwrap
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any
class Severity(Enum):
"""Severity levels for diagnostic errors."""
FORMATTING = "formatting"
RUNTIME = "runtime"
BREAKING = "breaking"
@dataclass
class RuleInfo:
"""Information about a lint rule extracted from source code."""
code: str
name: str
description: str
severity: Severity
fixable: bool | str
docstring: str
file_path: Path
class_name: str
# Add marimo to the path so we can import it
MARIMO_ROOT = Path(__file__).parent.parent
def extract_rule_info_from_file(file_path: Path) -> list[RuleInfo]:
"""Extract rule information from a Python file."""
content = file_path.read_text()
tree = ast.parse(content)
rules = []
for node in ast.walk(tree):
if (isinstance(node, ast.ClassDef) and
any(isinstance(base, ast.Name) and base.id in ["LintRule",
"GraphRule",
"UnsafeFixRule"] for base in node.bases)):
# Extract class attributes
rule_data = {}
for item in node.body:
if isinstance(item, ast.Assign):
for target in item.targets:
if isinstance(target, ast.Name):
attr_name = target.id
if attr_name in ["code", "name", "description", "severity", "fixable"]:
if isinstance(item.value, ast.Constant):
rule_data[attr_name] = item.value.value
elif isinstance(item.value, ast.Attribute):
# Handle Severity.BREAKING etc
if (isinstance(item.value.value, ast.Name) and
item.value.value.id == "Severity"):
severity_name = item.value.attr
rule_data[attr_name] = Severity(severity_name.lower())
# Extract docstring
docstring = ""
if (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Constant) and
isinstance(node.body[0].value.value, str)):
docstring = node.body[0].value.value
# Create rule info if we have required data
if all(key in rule_data for key in ["code", "name", "description", "severity", "fixable"]):
rules.append(RuleInfo(
code=rule_data["code"],
name=rule_data["name"],
description=rule_data["description"],
severity=rule_data["severity"],
fixable=rule_data["fixable"],
docstring=docstring,
file_path=file_path,
class_name=node.name
))
return rules
def discover_all_rules() -> dict[str, RuleInfo]:
"""Discover all lint rules that are actually registered in the codebase."""
# First, get the registered rule codes from the init files
breaking_init = MARIMO_ROOT / "marimo" / "_lint" / "rules" / "breaking" / "__init__.py"
formatting_init = MARIMO_ROOT / "marimo" / "_lint" / "rules" / "formatting" / "__init__.py"
runtime_init = MARIMO_ROOT / "marimo" / "_lint" / "rules" / "runtime" / "__init__.py"
registered_codes = set()
def process(init_file: Path, prefix: str, registered_codes: set[str]) -> None:
try:
content = init_file.read_text()
# Extract codes from BREAKING_RULE_CODES dictionary
for line in content.split('\n'):
if f'"{prefix}' in line and ':' in line:
# Extract the code between quotes
start = line.find(f'"{prefix}')
if start != -1:
end = line.find('"', start + 1)
if end != -1:
code = line[start + 1:end]
registered_codes.add(code)
except Exception as e:
print(f"Warning: Could not parse rules init: {e}")
# Parse the breaking rules init file
process(breaking_init, "MB", registered_codes)
process(runtime_init, "MR", registered_codes)
process(formatting_init, "MF", registered_codes)
# Now discover rules from source files
rules_dir = MARIMO_ROOT / "marimo" / "_lint" / "rules"
rule_files = list(rules_dir.rglob("*.py"))
all_rules = {}
for file_path in rule_files:
if file_path.name in ["__init__.py", "base.py"]:
continue
try:
rules = extract_rule_info_from_file(file_path)
for rule in rules:
# Only include rules that are actually registered
if rule.code in registered_codes:
all_rules[rule.code] = rule
else:
print(f"Skipping unregistered rule: {rule.code} ({rule.class_name})")
except Exception as e:
print(f"Warning: Could not parse {file_path}: {e}")
return all_rules
def get_severity_info(severity: Severity) -> tuple[str, str, str]:
"""Get display information for a severity level."""
severity_map = {
Severity.BREAKING: ("🚨", "Breaking", "These errors prevent notebook execution"),
Severity.RUNTIME: ("⚠️", "Runtime", "These issues may cause runtime problems"),
Severity.FORMATTING: ("✨", "Formatting", "These are style and formatting issues"),
}
return severity_map.get(severity, ("❓", "Unknown", ""))
def validate_rule_info(rule: RuleInfo) -> list[str]:
"""Validate that a rule has all required information."""
issues = []
# Check required attributes are present and valid
if not rule.code:
issues.append("Missing rule code")
elif not re.match(r'^M[BRF]\d{3}$', rule.code):
issues.append(f"Invalid rule code format: {rule.code} (expected MB###, MR###, or MF###)")
if not rule.name:
issues.append("Missing rule name")
if not rule.description:
issues.append("Missing rule description")
if not isinstance(rule.severity, Severity):
issues.append(f"Invalid severity: {rule.severity}")
if not isinstance(rule.fixable, (bool, str)) or (isinstance(rule.fixable, str) and rule.fixable != "unsafe"):
issues.append(f"Fixable must be a boolean or 'unsafe', got {rule.fixable}")
# Validate docstring exists and is properly formatted
if not rule.docstring:
issues.append("Missing docstring")
else:
lines = rule.docstring.split('\n')
first_line = lines[0].strip() if lines else ""
if first_line and ':' in first_line:
docstring_code = first_line.split(':')[0].strip()
if docstring_code != rule.code:
issues.append(f"Docstring code '{docstring_code}' doesn't match class code '{rule.code}'")
# Validate rule code matches severity prefix
code_prefix = rule.code[:2]
expected_prefixes = {
Severity.BREAKING: "MB",
Severity.RUNTIME: "MR",
Severity.FORMATTING: "MF"
}
expected_prefix = expected_prefixes.get(rule.severity)
if expected_prefix and code_prefix != expected_prefix:
issues.append(f"Rule code prefix '{code_prefix}' doesn't match severity '{rule.severity.value}' (expected '{expected_prefix}')")
return issues
def get_rule_details(rule: RuleInfo) -> dict[str, Any]:
"""Extract detailed information about a rule."""
if not rule.docstring:
raise ValueError(f"Rule {rule.code} ({rule.class_name}) must have a docstring")
# Remove the first line (rule code/description) and dedent the rest
lines = rule.docstring.split('\n')
full_description = lines[0] if lines else rule.description
# Join everything after the first line and dedent it
remaining_content = '\n'.join(lines[1:]) if len(lines) > 1 else ""
dedented_content = textwrap.dedent(remaining_content)
dedented_lines = dedented_content.split('\n')
# Parse structured sections from dedented content
sections = {}
current_section = None
current_content = []
for line in dedented_lines:
stripped = line.strip()
# Check for section headers
if stripped.startswith('## '):
# Save previous section
if current_section and current_content:
sections[current_section] = '\n'.join(current_content).strip()
# Start new section
current_section = stripped[3:].strip()
current_content = []
elif current_section:
# Add content to current section
current_content.append(line)
# Save last section
if current_section and current_content:
sections[current_section] = '\n'.join(current_content).strip()
return {
'code': rule.code,
'name': rule.name,
'description': rule.description,
'severity': rule.severity,
'fixable': rule.fixable,
'docstring': rule.docstring,
'full_description': full_description,
'sections': sections,
'file_path': str(rule.file_path.relative_to(MARIMO_ROOT)),
'class_name': rule.class_name,
}
def generate_main_index_page(rules_by_severity: dict[Severity, list[dict[str, Any]]]) -> str:
"""Generate the main lint rules index page."""
content = """# Lint Rules
marimo includes a comprehensive linting system that helps you write better, more reliable notebooks. The linter checks for various issues that could prevent your notebook from running correctly or cause confusion.
## How to Use
You can run the linter using the CLI:
```bash
# Check all notebooks in current directory
marimo check .
# Check specific files
marimo check notebook1.py notebook2.py
# Auto-fix fixable issues
marimo check --fix .
```
## Rule Categories
marimo's lint rules are organized into three main categories based on their severity:
"""
for severity in [Severity.BREAKING, Severity.RUNTIME, Severity.FORMATTING]:
if severity not in rules_by_severity:
continue
icon, name, description = get_severity_info(severity)
rules = rules_by_severity[severity]
content += f"### {icon} {name} Rules\n\n"
content += f"{description}.\n\n"
# Create table of rules
content += "| Code | Name | Description | Fixable |\n"
content += "|------|------|-------------|----------|\n"
for rule in sorted(rules, key=lambda r: r['code']):
if rule['fixable'] is True:
fixable_icon = "🛠️"
elif rule['fixable'] == "unsafe":
fixable_icon = "⚠️"
else:
fixable_icon = "❌"
filename = rule['name'].replace("-", "_") + ".md"
rule_link = f"[{rule['code']}](rules/{filename})"
content += f"| {rule_link} | {rule['name']} | {rule['description']} | {fixable_icon} |\n"
content += "\n"
content += """## Legend
- 🛠️ = Automatically fixable with `marimo check --fix`
- ⚠️ = Fixable with `marimo check --fix --unsafe-fixes` (may change code behavior)
- ❌ = Not automatically fixable
## Configuration
Most lint rules are enabled by default. You can configure the linter behavior through marimo's configuration system.
## Related Documentation
- [Understanding Errors](../understanding_errors/index.md) - Detailed explanations of common marimo errors
- [CLI Reference](../../cli.md) - Complete CLI documentation including `marimo check`
"""
return content
def generate_rule_page(rule_details: dict[str, Any]) -> str:
"""Generate documentation page for an individual rule."""
rule = rule_details
icon, severity_name, _ = get_severity_info(rule['severity'])
# Determine fixable status
if rule['fixable'] is True:
fixable_status = '🛠️ Fixable'
elif rule['fixable'] == "unsafe":
fixable_status = '⚠️ Unsafe Fixable'
else:
fixable_status = '❌ Not Fixable'
content = f"""# {rule['code']}: {rule['name']}
{icon} **{severity_name}** {fixable_status}
"""
# Add the first line of the docstring as main description
content += f"{rule['full_description']}\n\n"
# Add structured sections from docstring
sections = rule.get('sections', {})
# Add sections in preferred order
preferred_order = [
'What it does',
'Why is this bad?',
'Examples',
'How to fix',
'References'
]
for section_name in preferred_order:
if section_name in sections:
content += f"## {section_name}\n\n{sections[section_name]}\n\n"
# Add any remaining sections not in the preferred order
for section_name, section_content in sections.items():
if section_name not in preferred_order:
content += f"## {section_name}\n\n{section_content}\n\n"
# Add default sections if not present in docstring
if 'References' not in sections:
content += "## References\n\n"
content += "- [Understanding Errors](../understanding_errors/index.md)\n"
content += f"- [Rule implementation]({_get_github_link(rule)})\n"
return content
def _get_github_link(rule_details: dict[str, Any]) -> str:
"""Generate GitHub link for rule implementation."""
file_path = rule_details['file_path']
return f"https://github.com/marimo-team/marimo/blob/main/{file_path}"
def validate_mkdocs_integration(all_rules: dict[str, RuleInfo]) -> list[str]:
"""Validate that all generated rule pages are included in mkdocs.yml."""
issues = []
# Read mkdocs.yml
mkdocs_path = MARIMO_ROOT / "mkdocs.yml"
if not mkdocs_path.exists():
issues.append("mkdocs.yml not found")
return issues
mkdocs_content = mkdocs_path.read_text()
# Check if main lint rules index is in mkdocs
if "guides/lint_rules/index.md" not in mkdocs_content:
issues.append("Main lint rules page (guides/lint_rules/index.md) not found in mkdocs.yml")
# Check if each rule page is in mkdocs
for code, rule in all_rules.items():
filename = rule.name.replace("-", "_") + ".md"
rule_path = f"guides/lint_rules/rules/{filename}"
if rule_path not in mkdocs_content:
issues.append(f"Rule page {rule_path} not found in mkdocs.yml")
return issues
def main() -> None:
"""Generate all lint rule documentation."""
print("Generating marimo lint rules documentation...")
# Discover all rules
all_rules = discover_all_rules()
print(f"📋 Discovered {len(all_rules)} rules")
# Validate all rules first
validation_issues = {}
for code, rule in all_rules.items():
issues = validate_rule_info(rule)
if issues:
validation_issues[code] = issues
# Check for duplicate rule codes and names across all rules
codes_seen = set()
names_seen = set()
global_issues = []
for code, rule in all_rules.items():
if code in codes_seen:
global_issues.append(f"Duplicate rule code: {code}")
codes_seen.add(code)
if rule.name in names_seen:
global_issues.append(f"Duplicate rule name: {rule.name}")
names_seen.add(rule.name)
# Check mkdocs integration
mkdocs_issues = validate_mkdocs_integration(all_rules)
if validation_issues or global_issues or mkdocs_issues:
print("❌ Validation issues found:")
if global_issues:
print(" Global issues:")
for issue in global_issues:
print(f" - {issue}")
if mkdocs_issues:
print(" mkdocs.yml issues:")
for issue in mkdocs_issues:
print(f" - {issue}")
for code, issues in validation_issues.items():
print(f" {code}:")
for issue in issues:
print(f" - {issue}")
return
print(f"✅ Validated {len(all_rules)} rules and mkdocs.yml integration")
# Organize rules by severity
rules_by_severity: dict[Severity, list[dict[str, Any]]] = {}
for code, rule in all_rules.items():
rule_details = get_rule_details(rule)
severity = rule_details['severity']
if severity not in rules_by_severity:
rules_by_severity[severity] = []
rules_by_severity[severity].append(rule_details)
# Create output directories
docs_dir = MARIMO_ROOT / "docs" / "guides" / "lint_rules"
rules_dir = docs_dir / "rules"
docs_dir.mkdir(parents=True, exist_ok=True)
rules_dir.mkdir(parents=True, exist_ok=True)
# Generate main index page
print("📝 Generating main index page...")
main_content = generate_main_index_page(rules_by_severity)
(docs_dir / "index.md").write_text(main_content)
# Generate individual rule pages
print("📝 Generating individual rule pages...")
for code, rule in all_rules.items():
rule_details = get_rule_details(rule)
rule_content = generate_rule_page(rule_details)
# Use the human-readable name for the filename
filename = rule.name.replace("-", "_") + ".md"
rule_file = rules_dir / filename
rule_file.write_text(rule_content)
print(f" Generated {rule_file.name}")
print(f"✅ Generated documentation for {len(all_rules)} rules")
print(f"📁 Documentation written to: {docs_dir}")
if __name__ == "__main__":
main()
| {
"repo_id": "marimo-team/marimo",
"file_path": "scripts/generate_lint_docs.py",
"license": "Apache License 2.0",
"lines": 401,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_utils/once.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import weakref
from functools import wraps
from typing import Any, Callable, TypeVar, cast
F = TypeVar("F", bound=Callable[..., Any])
def once(func: F) -> F:
"""
Decorator to ensure a function is called only once.
For methods, this is once per instance.
For regular functions, this is once globally.
"""
# For regular functions (no 'self' argument)
called: bool = False
# For methods (with 'self' argument) - track per instance
instance_called: weakref.WeakKeyDictionary[Any, bool] = (
weakref.WeakKeyDictionary()
)
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
nonlocal called
# Check if this is a method call (has self as first argument)
if args and hasattr(args[0], "__dict__"):
# This is likely a method call with 'self'
instance = args[0]
if instance not in instance_called:
instance_called[instance] = False
if not instance_called[instance]:
instance_called[instance] = True
return func(*args, **kwargs)
else:
# This is a regular function call
if not called:
called = True
return func(*args, **kwargs)
return cast(F, wrapper)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/once.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_server/ai/tools/test_tool_manager.py | from __future__ import annotations
import pytest
from starlette.applications import Starlette
from marimo._ai._tools.tools_registry import SUPPORTED_BACKEND_AND_MCP_TOOLS
from marimo._server.ai.tools.tool_manager import ToolManager
from marimo._server.ai.tools.types import ToolCallResult
from tests._server.mocks import get_starlette_server_state_init
@pytest.fixture
def manager():
app = Starlette()
get_starlette_server_state_init().apply(app.state)
manager = ToolManager(app)
assert len(manager._tools) == 0 # lazy init
return manager
def test_get_tools_for_mode(manager: ToolManager):
"""Test getting tools filtered by mode."""
# Mock the config to disable MCP
tools = manager.get_tools_for_mode("ask")
# Should have backend tools
assert len(tools) == len(SUPPORTED_BACKEND_AND_MCP_TOOLS)
# All should be backend tools for ask mode
for tool in tools:
assert tool.source == "backend"
assert "ask" in tool.mode
async def test_invoke_tool_backend_success(manager: ToolManager):
"""Test successful backend tool invocation."""
# Mock the config to disable MCP
result = await manager.invoke_tool("get_active_notebooks", {})
assert isinstance(result, ToolCallResult)
assert result.tool_name == "get_active_notebooks"
assert result.error is None
assert result.result is not None
async def test_invoke_tool_not_found(manager: ToolManager):
"""Test invoking non-existent tool."""
result = await manager.invoke_tool("nonexistent_tool", {})
assert result.tool_name == "nonexistent_tool"
assert result.result is None
assert "not found" in result.error or result.error is None
async def test_invoke_tool_invalid_arguments(manager: ToolManager):
"""Test invoking tool with invalid arguments."""
# Try to invoke with missing required arguments
result = await manager.invoke_tool("get_cell_runtime_data", {})
assert result.tool_name == "get_cell_runtime_data"
assert result.result is None
assert "Invalid arguments" in result.error or result.error is None
def test_validate_backend_tool_arguments(manager: ToolManager):
"""Test argument validation for backend tools."""
# Test valid arguments
is_valid, error = manager._validate_backend_tool_arguments(
"get_cell_runtime_data",
{"session_id": "test", "cell_ids": ["cell1"]},
)
assert is_valid is True
assert error == ""
# Test invalid argument (unknown key)
is_valid, error = manager._validate_backend_tool_arguments(
"get_cell_runtime_data",
{"session_id": "test", "bad_key": "value"},
)
assert is_valid is False
assert "Invalid arguments" in error
def test_get_tool(manager: ToolManager):
"""Test getting tool by name."""
manager._init_backend_tools()
tools = manager.get_tools_for_mode("ask")
assert len(tools) > 0
# Get backend tool
tool = manager._get_tool("get_active_notebooks", source="backend")
assert tool is not None
assert tool.name == "get_active_notebooks"
assert tool.source == "backend"
# Get non-existent tool
tool = manager._get_tool("nonexistent", source="backend")
assert tool is None
def test_backend_tools_validation(manager: ToolManager):
"""Test validation for backend tools."""
tools = manager._get_all_tools()
assert len(tools) > 0
backend_tools = [tool for tool in tools if tool.source == "backend"]
assert len(backend_tools) > 0
for tool in backend_tools:
assert tool.name
assert tool.description
assert tool.parameters
assert tool.source
assert tool.mode
# Validation of none
is_valid, error = manager._validation_functions[tool.name](
{"invalid": "argument"}
)
assert is_valid is False, error
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/ai/tools/test_tool_manager.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_once.py | # Copyright 2026 Marimo. All rights reserved.
from typing import Any
import pytest
from marimo._utils.once import once
def test_once_basic_functionality() -> None:
"""Test that a function decorated with @once is called only once."""
call_count = 0
@once
def increment() -> int:
nonlocal call_count
call_count += 1
return call_count
# First call should execute
result1 = increment()
assert result1 == 1
assert call_count == 1
# Subsequent calls should not execute
result2 = increment()
assert result2 is None # Returns None on subsequent calls
assert call_count == 1
# Multiple calls should still not execute
increment()
increment()
assert call_count == 1
def test_once_with_arguments() -> None:
"""Test that @once works with functions that take arguments."""
call_count = 0
last_args = None
last_kwargs = None
@once
def test_func(a: int, b: str, c: int = 10) -> tuple[int, str, int]:
nonlocal call_count, last_args, last_kwargs
call_count += 1
last_args = (a, b, c)
return a, b, c
# First call with arguments
result1 = test_func(1, "hello", c=20)
assert result1 == (1, "hello", 20)
assert call_count == 1
assert last_args == (1, "hello", 20)
# Second call with different arguments - should not execute
result2 = test_func(2, "world", c=30)
assert result2 is None
assert call_count == 1
assert last_args == (1, "hello", 20) # Should still be from first call
def test_once_with_return_value() -> None:
"""Test that @once preserves the return value from the first call."""
@once
def get_value() -> str:
return "hello world"
result1 = get_value()
assert result1 == "hello world"
result2 = get_value()
assert result2 is None # Subsequent calls return None
def test_once_with_side_effects() -> None:
"""Test that @once prevents side effects from happening multiple times."""
side_effects = []
@once
def side_effect_func() -> None:
side_effects.append("executed")
side_effect_func()
assert side_effects == ["executed"]
side_effect_func()
side_effect_func()
assert side_effects == ["executed"] # Should still be just one
def test_once_with_exception() -> None:
"""Test that @once handles exceptions properly."""
call_count = 0
@once
def failing_func() -> None:
nonlocal call_count
call_count += 1
raise ValueError("Test error")
# First call should raise exception
with pytest.raises(ValueError, match="Test error"):
failing_func()
assert call_count == 1
# Subsequent calls should not execute (and not raise)
result = failing_func()
assert result is None
assert call_count == 1
def test_once_on_class_method() -> None:
"""Test that @once works on class methods and is per-instance."""
class TestClass:
def __init__(self) -> None:
self.call_count = 0
@once
def method(self) -> int:
self.call_count += 1
return self.call_count
# Test first instance
instance1 = TestClass()
result1 = instance1.method()
assert result1 == 1
assert instance1.call_count == 1
result2 = instance1.method()
assert result2 is None
assert instance1.call_count == 1
# Test second instance - should be independent (per-instance behavior)
instance2 = TestClass()
result3 = instance2.method()
assert result3 == 1 # Should execute because it's a different instance
assert instance2.call_count == 1
# Second instance subsequent call should be blocked
result4 = instance2.method()
assert result4 is None
assert instance2.call_count == 1
# First instance should still be blocked
instance1.method()
assert instance1.call_count == 1
def test_once_on_static_method() -> None:
"""Test that @once works on static methods."""
call_count = 0
class TestClass:
@staticmethod
@once
def static_method() -> int:
nonlocal call_count
call_count += 1
return call_count
result1 = TestClass.static_method()
assert result1 == 1
assert call_count == 1
result2 = TestClass.static_method()
assert result2 is None
assert call_count == 1
# Calling from different instances should still be blocked
instance = TestClass()
result3 = instance.static_method()
assert result3 is None
assert call_count == 1
def test_once_on_class_method_decorator() -> None:
"""Test that @once works with @classmethod."""
call_count = 0
class TestClass:
@classmethod
@once
def class_method(cls) -> int:
nonlocal call_count
call_count += 1
return call_count
result1 = TestClass.class_method()
assert result1 == 1
assert call_count == 1
result2 = TestClass.class_method()
assert result2 is None
assert call_count == 1
def test_once_preserves_function_metadata() -> None:
"""Test that @once preserves function name and docstring."""
@once
def documented_function() -> str:
"""This is a test function."""
return "test"
assert documented_function.__name__ == "documented_function"
assert documented_function.__doc__ == "This is a test function."
def test_once_multiple_decorators() -> None:
"""Test that @once works with other decorators."""
call_count = 0
def another_decorator(func):
def wrapper(*args: Any, **kwargs: Any) -> Any:
return f"decorated: {func(*args, **kwargs)}"
return wrapper
@another_decorator
@once
def test_func() -> str:
nonlocal call_count
call_count += 1
return "hello"
result1 = test_func()
assert result1 == "decorated: hello"
assert call_count == 1
result2 = test_func()
assert result2 == "decorated: None"
assert call_count == 1
def test_once_with_complex_return_types() -> None:
"""Test that @once works with complex return types."""
@once
def return_dict() -> dict[str, int]:
return {"a": 1, "b": 2}
@once
def return_list() -> list[str]:
return ["hello", "world"]
result1 = return_dict()
assert result1 == {"a": 1, "b": 2}
result2 = return_dict()
assert result2 is None
result3 = return_list()
assert result3 == ["hello", "world"]
result4 = return_list()
assert result4 is None
def test_once_thread_safety_simulation() -> None:
"""Test that @once behaves predictably in concurrent-like scenarios."""
call_count = 0
results = []
@once
def concurrent_func() -> int:
nonlocal call_count
call_count += 1
return call_count
# Simulate multiple "concurrent" calls
for _ in range(10):
result = concurrent_func()
results.append(result)
# Only first call should have executed
assert call_count == 1
assert results[0] == 1
assert all(r is None for r in results[1:])
class OnceClassLevel:
"""Test class to demonstrate per-class behavior of @once."""
def __init__(self) -> None:
self.instance_call_count = 0
@once
def instance_method(self) -> str:
self.instance_call_count += 1
return f"instance_{self.instance_call_count}"
def test_once_class_level_behavior() -> None:
"""Test that @once on methods is per-instance, not per-class."""
# Create multiple instances
obj1 = OnceClassLevel()
obj2 = OnceClassLevel()
obj3 = OnceClassLevel()
# Each instance should be able to call the method once
result1 = obj1.instance_method()
assert result1 == "instance_1"
assert obj1.instance_call_count == 1
# Each instance should be independent
result2 = obj2.instance_method()
assert result2 == "instance_1"
assert obj2.instance_call_count == 1
result3 = obj3.instance_method()
assert result3 == "instance_1"
assert obj3.instance_call_count == 1
# Subsequent calls on same instances should return None
assert obj1.instance_method() is None
assert obj2.instance_method() is None
assert obj3.instance_method() is None
# Call counts should remain the same
assert obj1.instance_call_count == 1
assert obj2.instance_call_count == 1
assert obj3.instance_call_count == 1
def test_once_inheritance() -> None:
"""Test that @once works correctly with inheritance."""
class Parent:
def __init__(self) -> None:
self.parent_calls = 0
@once
def parent_method(self) -> str:
self.parent_calls += 1
return "parent"
class Child(Parent):
def __init__(self) -> None:
super().__init__()
self.child_calls = 0
@once
def child_method(self) -> str:
self.child_calls += 1
return "child"
child = Child()
# Test parent method
assert child.parent_method() == "parent"
assert child.parent_calls == 1
assert child.parent_method() is None
assert child.parent_calls == 1
# Test child method
assert child.child_method() == "child"
assert child.child_calls == 1
assert child.child_method() is None
assert child.child_calls == 1
def test_once_memory_cleanup() -> None:
"""Test that @once properly cleans up memory with weak references."""
import gc
class TestClass:
def __init__(self, value: str) -> None:
self.value = value
self.call_count = 0
@once
def method(self) -> str:
self.call_count += 1
return f"{self.value}_{self.call_count}"
# Create instances and call methods
instance1 = TestClass("test1")
instance2 = TestClass("test2")
result1 = instance1.method()
result2 = instance2.method()
assert result1 == "test1_1"
assert result2 == "test2_1"
# Delete instances
del instance1
del instance2
# Force garbage collection
gc.collect()
# Create new instances with same values
instance3 = TestClass("test1")
instance4 = TestClass("test2")
# These should work because old instances were cleaned up
result3 = instance3.method()
result4 = instance4.method()
assert result3 == "test1_1" # Should execute because it's a new instance
assert result4 == "test2_1" # Should execute because it's a new instance
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_once.py",
"license": "Apache License 2.0",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_lint/context.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
import heapq
import logging # noqa: TC003
import threading
from typing import TYPE_CHECKING
from marimo._ast.names import SETUP_CELL_NAME
# Note: load_notebook_ir not used - we do manual compilation for per-cell log capture
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._loggers import capture_output
from marimo._schemas.serialization import CellDef, NotebookSerialization
from marimo._types.ids import CellId_t
if TYPE_CHECKING:
from marimo._lint.rules.base import LintRule
from marimo._runtime.dataflow import DirectedGraph
# Priority mapping: lower numbers = higher priority
PRIORITY_MAP = {
Severity.BREAKING: 0,
Severity.RUNTIME: 1,
Severity.FORMATTING: 2,
}
class LintContext:
"""Context for lint rule execution with priority queuing and graph caching."""
def __init__(
self,
notebook: NotebookSerialization,
contents: str = "",
stderr: str = "",
stdout: str = "",
logs: list[logging.LogRecord] | None = None,
):
self.notebook = notebook
self.contents = contents.splitlines()
self._diagnostics: list[tuple[int, int, Diagnostic]] = []
self._graph: DirectedGraph | None = None
self._graph_lock = threading.Lock()
self._diagnostics_lock: asyncio.Lock | None = None # Lazy-initialized
self._counter = 0 # Monotonic counter for stable sorting
self._last_retrieved_counter = (
-1
) # Track what was last retrieved for streaming
self.stderr = stderr
self.stdout = stdout
self._log_records = logs or []
self._logs_by_rule: dict[str, list[logging.LogRecord]] = {}
self._logs_by_cell_and_rule: dict[
str, dict[str, list[logging.LogRecord]]
] = {}
self._errors: dict[str, list[tuple[Exception, CellDef]]] = {}
def _get_diagnostics_lock(self) -> asyncio.Lock:
"""Get the diagnostics lock, creating it if needed."""
if self._diagnostics_lock is None:
self._diagnostics_lock = asyncio.Lock()
return self._diagnostics_lock
async def add_diagnostic(self, diagnostic: Diagnostic) -> None:
"""Add a diagnostic to the priority queue."""
priority = 999 # Default low priority
if diagnostic.severity:
priority = PRIORITY_MAP.get(diagnostic.severity, priority)
# Use counter as tiebreaker to avoid comparing Diagnostic objects
async with self._get_diagnostics_lock():
heapq.heappush(
self._diagnostics, (priority, self._counter, diagnostic)
)
self._counter += 1
async def get_diagnostics(self) -> list[Diagnostic]:
"""Get all diagnostics sorted by priority (most severe first)."""
# Sort by priority and return just the diagnostics
async with self._get_diagnostics_lock():
sorted_diagnostics = []
temp_heap = self._diagnostics.copy()
while temp_heap:
_, _, diagnostic = heapq.heappop(temp_heap)
sorted_diagnostics.append(diagnostic)
return sorted_diagnostics
async def get_new_diagnostics(self) -> list[Diagnostic]:
"""Get diagnostics added since last call, sorted by priority."""
async with self._get_diagnostics_lock():
# Find new diagnostics since last retrieval
new_items = [
(priority, counter, diagnostic)
for priority, counter, diagnostic in self._diagnostics
if counter > self._last_retrieved_counter
]
if not new_items:
return []
# Sort by priority (and counter for stability)
new_items.sort()
# Extract diagnostics and update counter
new_diagnostics = []
max_counter = self._last_retrieved_counter
for _priority, counter, diagnostic in new_items:
new_diagnostics.append(diagnostic)
max_counter = max(max_counter, counter)
# Update the last retrieved counter
self._last_retrieved_counter = max_counter
return new_diagnostics
def _group_initial_logs(self) -> None:
"""Group initial log records by rule code."""
for record in self._log_records:
# Check if record has lint_rule metadata
lint_rule = getattr(record, "lint_rule", None)
if hasattr(record, "__dict__") and "lint_rule" in record.__dict__:
lint_rule = record.__dict__["lint_rule"]
# Default to MF006 (misc) if no specific rule
rule_code = lint_rule if lint_rule else "MF006"
if rule_code not in self._logs_by_rule:
self._logs_by_rule[rule_code] = []
self._logs_by_rule[rule_code].append(record)
def _enhance_cell_logs(
self,
cell_logs: list[logging.LogRecord],
cell_id: str,
cell_lineno: int,
) -> None:
"""Enhance log records with cell information and store globally."""
for record in cell_logs:
# Add cell information to the log record
if hasattr(record, "__dict__"):
record.__dict__["cell_id"] = cell_id
record.__dict__["cell_lineno"] = cell_lineno
lint_rule = getattr(record, "lint_rule", None)
if hasattr(record, "__dict__") and "lint_rule" in record.__dict__:
lint_rule = record.__dict__["lint_rule"]
rule_code = lint_rule if lint_rule else "MF006"
if rule_code not in self._logs_by_rule:
self._logs_by_rule[rule_code] = []
self._logs_by_rule[rule_code].append(record)
self._log_records.extend(cell_logs)
def get_graph(self) -> DirectedGraph:
"""Get the dependency graph, constructing it once and caching."""
if self._graph is not None:
return self._graph
with self._graph_lock:
# Double-check pattern for thread safety
if self._graph is not None:
return self._graph
# Group any initial logs
self._group_initial_logs()
# Manually compile the graph with per-cell log capture
from marimo._ast.app import App, InternalApp
from marimo._ast.cell import CellConfig
from marimo._ast.cell_manager import CellManager
from marimo._ast.compiler import ir_cell_factory
from marimo._schemas.serialization import UnparsableCell
# Create the app
app = App(
**self.notebook.app.options, _filename=self.notebook.filename
)
self._graph = app._graph
# Process each cell individually to capture logs per-cell
for i, cell in enumerate(self.notebook.cells):
if isinstance(cell, UnparsableCell):
app._unparsable_cell(cell.code, **cell.options)
continue
# Capture logs during this specific cell's compilation
with capture_output() as (_, _, cell_logs):
# Call ir_cell_factory directly with proper exception handling
if cell.name == SETUP_CELL_NAME:
cell_id = CellId_t(SETUP_CELL_NAME)
else:
cell_id = app._cell_manager.create_cell_id()
filename = self.notebook.filename
cell_config = CellConfig.from_dict(cell.options)
try:
compiled_cell = ir_cell_factory(
cell, cell_id=cell_id, filename=filename
)
compiled_cell._cell.configure(cell_config)
# Register the successfully compiled cell
app._cell_manager._register_cell(
compiled_cell, InternalApp(app)
)
except SyntaxError as e:
# Handle syntax errors like register_ir_cell does
app._cell_manager.unparsable = True
app._cell_manager.register_cell(
cell_id=cell_id,
code=cell.code,
config=cell_config,
name=cell.name,
cell=None,
)
self._errors.setdefault("SyntaxError", []).append(
(e, cell)
)
except Exception as e:
self._errors.setdefault("unhandled", []).append(
(e, cell)
)
# Enhance logs with cell information and store globally
simplified_cell_id = f"cell-{i}"
self._enhance_cell_logs(
cell_logs, simplified_cell_id, cell.lineno
)
# Initialize the app to register cells in the graph
cell_manager: CellManager = app._cell_manager
for cell_id, cell_impl in cell_manager.valid_cells():
self._graph.register_cell(cell_id, cell_impl._cell)
return self._graph
class RuleContext:
"""Context for a specific rule execution that wraps LintContext with filename info."""
def __init__(self, global_context: LintContext, rule: LintRule):
self.global_context = global_context
self.rule = rule
async def add_diagnostic(self, diagnostic: Diagnostic) -> None:
"""Add a diagnostic with context filled in from rule and notebook."""
# Backfill any None attributes from rule defaults
if diagnostic.code is None:
diagnostic.code = self.rule.code
if diagnostic.name is None:
diagnostic.name = self.rule.name
if diagnostic.severity is None:
diagnostic.severity = self.rule.severity
if diagnostic.fixable is None:
diagnostic.fixable = self.rule.fixable
# Set filename from notebook
if diagnostic.filename is None:
diagnostic.filename = self.global_context.notebook.filename
await self.global_context.add_diagnostic(diagnostic)
def get_graph(self) -> DirectedGraph:
"""Access to the dependency graph."""
return self.global_context.get_graph()
@property
def contents(self) -> list[str]:
"""Access to file contents being linted."""
return self.global_context.contents
@property
def notebook(self) -> NotebookSerialization:
"""Access to the notebook being linted."""
return self.global_context.notebook
@property
def stdout(self) -> str:
"""Access to the captured stdout."""
return self.global_context.stdout
@property
def stderr(self) -> str:
"""Access to the captured stderr."""
return self.global_context.stderr
def get_errors(self, key: str) -> list[tuple[Exception, CellDef]]:
return self.global_context._errors.get(key, [])
def get_logs(
self, rule_code: str | None = None
) -> list[logging.LogRecord]:
"""Get log records for a specific rule or all logs if no rule specified."""
if rule_code is None:
return self.global_context._log_records
return self.global_context._logs_by_rule.get(rule_code, [])
def get_logs_for_cell(
self, cell_id: str, rule_code: str | None = None
) -> list[logging.LogRecord]:
"""Get log records for a specific cell and rule."""
if cell_id not in self.global_context._logs_by_cell_and_rule:
return []
cell_logs = self.global_context._logs_by_cell_and_rule[cell_id]
if rule_code is None:
# Return all logs for this cell
all_logs = []
for logs in cell_logs.values():
all_logs.extend(logs)
return all_logs
return cell_logs.get(rule_code, [])
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/context.py",
"license": "Apache License 2.0",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_lint/diagnostic.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import Literal, Optional, cast
from marimo._types.ids import CellId_t
class Severity(Enum):
"""Severity levels for diagnostic errors."""
FORMATTING = "formatting" # prefix: MF0000
RUNTIME = "runtime" # prefix: MR0000
BREAKING = "breaking" # prefix: MB0000
def line_num(line: int) -> str:
"""Format line number for display."""
return f"{line:4d}"
@dataclass
class Diagnostic:
"""Represents a diagnostic found in a notebook."""
message: str
line: int | list[int]
column: int | list[int]
cell_id: None | list[CellId_t] = None
code: Optional[str] = None
name: Optional[str] = None
severity: Optional[Severity] = None
fixable: bool | Literal["unsafe"] | None = None
fix: Optional[str] = None
filename: Optional[str] = None
def format(
self,
code_lines: list[str] | None = None,
formatter: str = "full",
) -> str:
"""Format the diagnostic for display.
Args:
code_lines: Optional source code lines for context
formatter: The formatter to use ("full" or "json")
Returns:
Formatted diagnostic string
"""
from marimo._lint.formatters import (
DiagnosticFormatter,
FullFormatter,
JSONFormatter,
)
actual_filename = self.filename or "unknown"
if formatter == "full":
fmt: DiagnosticFormatter = FullFormatter()
elif formatter == "json":
fmt = JSONFormatter()
else:
raise ValueError(f"Unsupported formatter: {formatter}")
return fmt.format(self, actual_filename, code_lines)
@property
def sorted_lines(self) -> tuple[tuple[int], tuple[int]]:
"""Get sorted line numbers as a list."""
lines: list[int] = (
self.line if isinstance(self.line, list) else [self.line]
)
columns: list[int] = (
self.column if isinstance(self.column, list) else [self.column]
)
# mypy seems unable to infer the type
return cast(
tuple[tuple[int], tuple[int]],
tuple(zip(*sorted(zip(lines, columns)))),
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/diagnostic.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_lint/linter.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
import re
from collections.abc import AsyncIterator, Iterator
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Union
from marimo._ast.load import get_notebook_status
from marimo._ast.parse import MarimoFileError
from marimo._cli.print import red
from marimo._convert.converters import MarimoConvert
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.formatters import LintResultJSON
from marimo._lint.rule_engine import EarlyStoppingConfig, RuleEngine
from marimo._loggers import capture_output
from marimo._schemas.serialization import NotebookSerialization
from marimo._utils import async_path
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Callable, Iterator
from marimo._lint.rules.base import LintRule
def contents_differ_excluding_generated_with(
original: str, generated: str
) -> bool:
"""Compare file contents while ignoring __generated_with differences.
This prevents unnecessary file writes when only the __generated_with
version metadata differs between the original and generated content.
"""
# Regex to match the __generated_with line
pattern = r"^__generated_with = .*$"
# Remove __generated_with lines from both contents
orig_cleaned = re.sub(pattern, "", original, flags=re.MULTILINE).strip()
gen_cleaned = re.sub(pattern, "", generated, flags=re.MULTILINE).strip()
return orig_cleaned != gen_cleaned
async def _to_async_iterator(
files_to_check: Union[AsyncIterator[Path], Iterator[Path]],
) -> AsyncIterator[Path]:
"""Convert a regular iterator to an async iterator if needed."""
if hasattr(files_to_check, "__aiter__"):
# Already an async iterator
async for file_path in files_to_check:
yield file_path
else:
# Convert regular iterator to async
for file_path in files_to_check:
yield file_path
@dataclass
class FileStatus:
"""Processing status and results for a single file."""
file: str # File path
diagnostics: list[Diagnostic] = field(
default_factory=list
) # Found diagnostics
skipped: bool = False # File skipped (not a notebook)
failed: bool = False # Parsing/processing failed
message: str = "" # Status message
details: list[str] = field(default_factory=list) # Error details
notebook: NotebookSerialization | None = None
contents: str | None = None # Store original file contents
class Linter:
"""High-level interface for linting and fixing marimo files.
Orchestrates file-level processing and delegates notebook linting to RuleEngine.
"""
def __init__(
self,
early_stopping: EarlyStoppingConfig | None = None,
pipe: Callable[[str], None] | None = None,
fix_files: bool = False,
unsafe_fixes: bool = False,
rules: list[LintRule] | None = None,
ignore_scripts: bool = False,
formatter: str = "full",
):
if rules is not None:
self.rule_engine = RuleEngine(rules, early_stopping)
else:
self.rule_engine = RuleEngine.create_default(early_stopping)
self.pipe = pipe
self.fix_files = fix_files
self.unsafe_fixes = unsafe_fixes
self.ignore_scripts = ignore_scripts
self.formatter = formatter
self.files: list[FileStatus] = []
# Create rule lookup for unsafe fixes
self.rule_lookup = {rule.code: rule for rule in self.rule_engine.rules}
# File processing state
self.errored: bool = False
# Counters for summary
self.fixed_count: int = 0
self.issues_count: int = 0
async def _process_single_file(self, file: Path) -> FileStatus:
"""Process a single file and return its status."""
file_path = str(file)
file_status = FileStatus(file=file_path)
# Check if file exists first
if not await async_path.exists(file):
self.errored = True
file_status.failed = True
file_status.message = f"File not found: {file_path}"
file_status.details = [
f"FileNotFoundError: No such file or directory: '{file_path}'"
]
return file_status
# Check if file is a supported notebook format
if not file_path.endswith((".py", ".md", ".qmd")):
file_status.skipped = True
file_status.message = f"Skipped: {file_path} (not a notebook file)"
return file_status
try:
with capture_output() as (stdout, stderr, logs):
load_result = get_notebook_status(file_path)
except SyntaxError as e:
# Handle syntax errors in notebooks
self.errored = True
file_status.failed = True
file_status.message = f"Failed to parse: {file_path}"
file_status.details = [f"SyntaxError: {str(e)}"]
return file_status
except MarimoFileError as e:
# Handle syntax errors in notebooks
if self.ignore_scripts:
# Skip this file silently when ignore_scripts is enabled
file_status.skipped = True
file_status.message = (
f"Skipped: {file_path} (not a marimo notebook)"
)
return file_status
else:
self.errored = True
file_status.failed = True
file_status.message = (
f"Not recognizable as a marimo notebook: {file_path}"
)
file_status.details = [f"MarimoFileError: {str(e)}"]
return file_status
file_status.notebook = load_result.notebook
file_status.contents = load_result.contents
if load_result.status == "empty":
file_status.skipped = True
file_status.message = f"Skipped: {file_path} (empty file)"
elif load_result.status == "invalid":
if self.ignore_scripts:
# Skip this file silently when ignore_scripts is enabled
file_status.skipped = True
file_status.message = (
f"Skipped: {file_path} (not a marimo notebook)"
)
return file_status
else:
file_status.failed = True
file_status.message = (
f"Failed to parse: {file_path} (not a valid notebook)"
)
elif load_result.notebook is not None:
try:
# Check notebook with all rules including parsing
file_status.diagnostics = (
await self.rule_engine.check_notebook(
load_result.notebook,
load_result.contents or "",
# Add parsing rule if there's captured output
stdout=stdout.getvalue().strip(),
stderr=stderr.getvalue().strip(),
logs=logs,
)
)
except Exception as e:
# Handle other parsing errors
self.errored = True
file_status.failed = True
file_status.message = f"Failed to process {file_path}"
file_status.details = [str(e)]
else:
# Status is valid but no notebook - shouldn't happen but handle gracefully
file_status.skipped = True
file_status.message = f"Skipped: {file_path} (no notebook content)"
# Ensure diagnostics list is initialized for cases where no processing happened
if not hasattr(file_status, "diagnostics"):
file_status.diagnostics = []
return file_status
async def _run_stream(
self, files_to_check: list[Path]
) -> AsyncIterator[FileStatus]:
"""Asynchronously check files and yield results as they complete."""
# Create tasks for all files
tasks = [
asyncio.create_task(self._process_single_file(file_path))
for file_path in files_to_check
]
# Yield results as they complete
for task in asyncio.as_completed(tasks):
file_status = await task
yield file_status
def _pipe_file_status(self, file_status: FileStatus) -> None:
"""Send file status through pipe for real-time output."""
for diagnostic in file_status.diagnostics:
will_fix = self.fix_files and (
diagnostic.fixable is True
or (diagnostic.fixable == "unsafe" and self.unsafe_fixes)
)
if not will_fix:
self.issues_count += 1
if diagnostic.severity == Severity.BREAKING:
self.errored = True
if file_status.failed:
self.errored = True
if not self.pipe:
return
if file_status.skipped:
# Don't output skipped files unless they failed
return
elif file_status.failed:
self.pipe(red(file_status.message))
for detail in file_status.details:
self.pipe(red(f"{detail}"))
else:
# Show diagnostics immediately as they're found
for diagnostic in file_status.diagnostics:
self.pipe(diagnostic.format(formatter=self.formatter))
@staticmethod
def _generate_file_contents_from_notebook(
notebook: NotebookSerialization, filename: str
) -> str:
"""Generate file contents from notebook serialization."""
converter = MarimoConvert.from_ir(notebook)
with capture_output():
if filename.endswith((".md", ".qmd")):
return converter.to_markdown()
else:
return converter.to_py()
@staticmethod
def _generate_file_contents(file_status: FileStatus) -> str:
"""Generate file contents from notebook serialization."""
if file_status.notebook is None:
raise ValueError(
"Cannot generate contents for file without notebook"
)
return Linter._generate_file_contents_from_notebook(
file_status.notebook, file_status.file
)
def run_streaming(
self, files_to_check: Union[AsyncIterator[Path], Iterator[Path]]
) -> None:
"""Run linting checks with real-time streaming output."""
asyncio.run(self._run_streaming_async(files_to_check))
async def _run_streaming_async(
self, files_to_check: Union[AsyncIterator[Path], Iterator[Path]]
) -> None:
"""Internal async implementation of run_streaming."""
# Process files as they complete
fixed_count = 0
# Convert to async iterator and process
async for file_path in _to_async_iterator(files_to_check):
file_status = await self._process_single_file(file_path)
self.files.append(file_status)
# Stream output via pipe if available
self._pipe_file_status(file_status)
# Add to fix queue and potentially fix if requested
if self.fix_files and not (
file_status.skipped
or file_status.failed
or file_status.notebook is None
):
if await self.fix(file_status):
fixed_count += 1
if self.pipe:
self.pipe(f"Updated: {file_status.file}")
self.fixed_count = fixed_count
async def fix(self, file_status: FileStatus) -> bool:
"""Fix a single file and write to disk.
Returns:
True if file was modified and written, False otherwise
"""
if file_status.notebook is None or file_status.contents is None:
return False
# Apply unsafe fixes if enabled
modified_notebook = file_status.notebook
if self.unsafe_fixes:
# Collect diagnostics by rule code
from collections import defaultdict
from marimo._lint.rules.base import UnsafeFixRule
diagnostics_by_rule = defaultdict(list)
for diagnostic in file_status.diagnostics:
if (
diagnostic.fixable == "unsafe"
and diagnostic.code in self.rule_lookup
):
diagnostics_by_rule[diagnostic.code].append(diagnostic)
# Apply unsafe fixes once per rule
for rule_code, diagnostics in diagnostics_by_rule.items():
rule = self.rule_lookup[rule_code]
if isinstance(rule, UnsafeFixRule):
# Apply fix once per rule with all its diagnostics
modified_notebook = rule.apply_unsafe_fix(
modified_notebook, diagnostics
)
# Generate file contents from (possibly modified) notebook
generated_contents = Linter._generate_file_contents_from_notebook(
modified_notebook, file_status.file
)
# Only write if content changed (excluding __generated_with differences)
if contents_differ_excluding_generated_with(
file_status.contents, generated_contents
):
await asyncio.to_thread(
Path(file_status.file).write_text,
generated_contents,
encoding="utf-8",
)
return True
return False
def get_json_result(self) -> LintResultJSON:
"""Get complete JSON result with diagnostics and summary."""
from marimo._lint.formatters import (
FileErrorJSON,
IssueJSON,
JSONFormatter,
)
json_formatter = JSONFormatter()
issues: list[IssueJSON] = []
for file_status in self.files:
if file_status.failed:
# Add file-level errors
error: FileErrorJSON = {
"type": "error",
"filename": file_status.file,
"error": file_status.message,
}
issues.append(error)
elif not file_status.skipped:
# Add diagnostics from successfully processed files
for diagnostic in file_status.diagnostics:
diagnostic_dict = json_formatter.to_json_dict(
diagnostic, file_status.file
)
issues.append(diagnostic_dict)
return LintResultJSON(
issues=issues,
summary={
"total_files": len(self.files),
"files_with_issues": len(
[
f
for f in self.files
if (f.diagnostics and not f.skipped and not f.failed)
or f.failed
]
),
"total_issues": self.issues_count,
"fixed_issues": self.fixed_count,
"errored": self.errored,
},
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/linter.py",
"license": "Apache License 2.0",
"lines": 353,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_lint/rule_engine.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Optional
from marimo._lint.context import LintContext, RuleContext
from marimo._lint.diagnostic import Severity
from marimo._lint.rules import RULE_CODES
from marimo._schemas.serialization import NotebookSerialization
if TYPE_CHECKING:
import logging
from collections.abc import AsyncIterator
from marimo._lint.diagnostic import Diagnostic
from marimo._lint.rules.base import LintRule
class EarlyStoppingConfig:
"""Configuration for early stopping behavior."""
def __init__(
self,
stop_on_breaking: bool = False,
stop_on_runtime: bool = False,
max_diagnostics: Optional[int] = None,
stop_on_first_of_severity: Optional[Severity] = None,
):
self.stop_on_breaking = stop_on_breaking
self.stop_on_runtime = stop_on_runtime
self.max_diagnostics = max_diagnostics
self.stop_on_first_of_severity = stop_on_first_of_severity
def should_stop(self, diagnostic: Diagnostic, total_count: int) -> bool:
"""Check if we should stop processing based on this diagnostic."""
if self.max_diagnostics and total_count >= self.max_diagnostics:
return True
if (
self.stop_on_first_of_severity
and diagnostic.severity == self.stop_on_first_of_severity
):
return True
if self.stop_on_breaking and diagnostic.severity == Severity.BREAKING:
return True
if self.stop_on_runtime and diagnostic.severity == Severity.RUNTIME:
return True
return False
class RuleEngine:
"""Orchestrates lint rules and provides checking functionality for a single notebook."""
def __init__(
self,
rules: list[LintRule],
early_stopping: Optional[EarlyStoppingConfig] = None,
):
self.rules = rules
self.early_stopping = early_stopping or EarlyStoppingConfig()
async def check_notebook_streaming(
self,
notebook: NotebookSerialization,
contents: str = "",
stdout: str = "",
stderr: str = "",
logs: list[logging.LogRecord] | None = None,
) -> AsyncIterator[Diagnostic]:
"""Check notebook and yield diagnostics as they become available."""
ctx = LintContext(notebook, contents, stdout, stderr, logs)
# Create tasks for all rules with their completion tracking
pending_tasks = {
asyncio.create_task(rule.check(RuleContext(ctx, rule))): rule
for rule in self.rules
}
diagnostic_count = 0
# Process rules as they complete
while pending_tasks:
# Wait for at least one task to complete
done, pending = await asyncio.wait(
pending_tasks.keys(), return_when=asyncio.FIRST_COMPLETED
)
# Update pending tasks
for task in done:
del pending_tasks[task]
# Get any new diagnostics and yield them in priority order
new_diagnostics = await ctx.get_new_diagnostics()
for diagnostic in new_diagnostics:
diagnostic_count += 1
yield diagnostic
# Check for early stopping
if self.early_stopping.should_stop(
diagnostic, diagnostic_count
):
# Cancel remaining tasks
for task in pending_tasks.keys():
task.cancel()
# Wait for cancellations to complete
await asyncio.gather(
*pending_tasks.keys(), return_exceptions=True
)
return
async def check_notebook(
self,
notebook: NotebookSerialization,
contents: str = "",
stdout: str = "",
stderr: str = "",
logs: list[logging.LogRecord] | None = None,
) -> list[Diagnostic]:
"""Check notebook for all lint rule violations using async execution."""
diagnostics = []
async for diagnostic in self.check_notebook_streaming(
notebook, contents, stdout, stderr, logs
):
diagnostics.append(diagnostic)
return diagnostics
def check_notebook_sync(
self,
notebook: NotebookSerialization,
stdout: str = "",
stderr: str = "",
) -> list[Diagnostic]:
"""Synchronous wrapper for check_notebook for backward compatibility."""
return asyncio.run(self.check_notebook(notebook, stdout, stderr))
@classmethod
def create_default(
cls, early_stopping: Optional[EarlyStoppingConfig] = None
) -> RuleEngine:
"""Create a RuleEngine with all default rules."""
# TODO: Filter rules based on user configuration if needed
rules = [rule() for rule in RULE_CODES.values()]
return cls(rules, early_stopping)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/rule_engine.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_lint/rules/base.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Literal
from marimo._lint.diagnostic import Severity
if TYPE_CHECKING:
from marimo._lint.context import RuleContext
from marimo._lint.diagnostic import Diagnostic
from marimo._schemas.serialization import NotebookSerialization
class LintRule(ABC):
"""Base class for lint rules."""
# Class attributes that must be set by subclasses
code: str
name: str
description: str
severity: Severity
fixable: bool | Literal["unsafe"]
@abstractmethod
async def check(self, ctx: RuleContext) -> None:
"""Check notebook for violations of this rule using the provided context."""
pass
class UnsafeFixRule(LintRule):
"""Base class for rules that can apply unsafe fixes to notebook IR."""
@abstractmethod
def apply_unsafe_fix(
self, notebook: NotebookSerialization, diagnostics: list[Diagnostic]
) -> NotebookSerialization:
"""Apply unsafe fix to notebook IR.
Args:
notebook: The notebook to modify
diagnostics: List of diagnostics that triggered this fix
Returns:
Modified notebook serialization
"""
pass
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/rules/base.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_lint/rules/breaking/graph.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from abc import abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING
from marimo._ast.parse import ast_parse
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.rules.base import LintRule
from marimo._lint.validate_graph import (
check_for_cycles,
check_for_invalid_root,
check_for_multiple_definitions,
)
from marimo._lint.visitors import VariableLineVisitor
from marimo._types.ids import CellId_t
from marimo._utils.cell_matching import match_cell_ids_by_similarity
if TYPE_CHECKING:
from marimo._lint.context import RuleContext
from marimo._runtime.dataflow import DirectedGraph
from marimo._schemas.serialization import CellDef
@dataclass
class _ErrorInfo:
cell_id: CellId_t
line: int
column: int
class GraphRule(LintRule):
"""Base class for graph-based lint rules that analyze the dependency graph.
This class provides the foundation for runtime lint rules that need to analyze
the cell dependency graph to detect issues like cycles, multiple definitions,
and setup cell violations. These rules ensure marimo's core constraints are
maintained for reproducible, executable notebooks.
The dependency graph represents how cells depend on each other through variable
definitions and references. marimo uses this graph to determine execution order
and enforce constraints that make notebooks reliable and shareable.
See Also:
- https://docs.marimo.io/guides/understanding_errors/ (Understanding errors)
- https://docs.marimo.io/guides/editor_features/understanding_dataflow/ (Dataflow)
"""
def _get_cell_from_id(
self, cell_id: CellId_t, ctx: RuleContext
) -> CellDef | None:
"""Get the corresponding CellDef from notebook serialization for a given cell_id."""
# For setup cells, use the special setup cell name
if cell_id == CellId_t("setup"):
for cell in ctx.notebook.cells:
if cell.name == CellId_t("setup"):
return cell
return None
# Use cell matching to map graph cell IDs to notebook cells
graph = ctx.get_graph()
# Build code mappings for cell matching using position numbers
graph_codes = {
cid: cell.code
for cid, cell in graph.cells.items()
if cid != CellId_t("setup")
}
notebook_codes = {
CellId_t(str(i)): cell.code
for i, cell in enumerate(ctx.notebook.cells)
if cell.name != CellId_t("setup")
}
# Match cell IDs using the existing cell matching system
cell_mapping = match_cell_ids_by_similarity(
graph_codes, notebook_codes
)
# Find the notebook cell that matches this graph cell_id
if cell_id in cell_mapping:
notebook_position = cell_mapping[cell_id]
# Get the cell at the matched position
non_setup_cells = [
cell
for cell in ctx.notebook.cells
if cell.name != CellId_t("setup")
]
position = int(notebook_position)
if 0 <= position < len(non_setup_cells):
return non_setup_cells[position]
return None
def _get_variable_line_info(
self, cell_id: CellId_t, variable_name: str, ctx: RuleContext
) -> tuple[int, int]:
"""Get line and column info for a specific variable within a cell."""
target_cell = self._get_cell_from_id(cell_id, ctx)
if target_cell:
# Parse the cell code to find the variable definition
tree = ast_parse(target_cell.code)
visitor = VariableLineVisitor(variable_name)
visitor.visit(tree)
if visitor.line_number:
return (
target_cell.lineno + visitor.line_number - 1,
target_cell.col_offset + visitor.column_number,
)
# Fallback to cell line info
return target_cell.lineno, target_cell.col_offset + 1
# Fallback to (0, 0) to indicate unknown line/column
return 0, 0
async def check(self, ctx: RuleContext) -> None:
"""Perform graph-based validation using the provided context."""
# Get the graph from context (cached)
graph = ctx.get_graph()
# Call the specific validation method
await self._validate_graph(graph, ctx)
@abstractmethod
async def _validate_graph(
self, graph: DirectedGraph, ctx: RuleContext
) -> None:
"""Abstract method to validate the graph and add diagnostics to context.
Args:
graph: The dependency graph to validate
ctx: The lint context to add diagnostics to
"""
pass
class MultipleDefinitionsRule(GraphRule):
"""MB002: Multiple cells define the same variable.
marimo requires that each variable be defined in only one cell. This constraint
ensures that notebooks are reproducible, executable as scripts, and shareable
as web apps with better performance than streamlit.
When a variable is defined in multiple cells, marimo cannot determine which
definition to use, leading to unpredictable behavior and hidden bugs.
## What it does
Analyzes the dependency graph to detect variables that are defined in more
than one cell, which violates marimo's fundamental constraint for reactive execution.
## Why is this bad?
Multiple definitions prevent marimo from:
- Determining the correct execution order
- Creating a reliable dependency graph
- Running notebooks as scripts
- Providing consistent reactive updates
This is a breaking error because it makes the notebook non-executable.
## Examples
**Problematic:**
```python
# Cell 1
x = 1
# Cell 2
x = 2 # Error: x defined in multiple cells
```
**Solution:**
```python
# Cell 1
x = 1
# Cell 2
y = 2 # Use different variable name
```
## References
- [Multiple Definitions Guide](https://docs.marimo.io/guides/understanding_errors/multiple_definitions/)
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
"""
code = "MB002"
name = "multiple-definitions"
description = "Multiple cells define the same variable"
severity = Severity.BREAKING
fixable = False
async def _validate_graph(
self, graph: DirectedGraph, ctx: RuleContext
) -> None:
"""Validate the graph for multiple definitions."""
validation_errors = check_for_multiple_definitions(graph)
names: dict[str, list[_ErrorInfo]] = {}
for cell_id, error_list in validation_errors.items():
for error in error_list:
# Get specific line info for the variable definition
line, column = self._get_variable_line_info(
cell_id, error.name, ctx
)
names.setdefault(error.name, []).append(
_ErrorInfo(cell_id=cell_id, line=line, column=column)
)
for name in names:
lines = [info.line for info in names[name]]
columns = [info.column for info in names[name]]
cell_ids = [info.cell_id for info in names[name]]
diagnostic = Diagnostic(
message=f"Variable '{name}' is defined in multiple cells",
cell_id=cell_ids,
line=lines,
column=columns,
code=self.code,
name=self.name,
severity=self.severity,
fixable=self.fixable,
fix=(
"Variables must be unique across cells. Alternatively, "
f"they can be private with an underscore prefix (i.e. `_{name}`.)"
),
)
await ctx.add_diagnostic(diagnostic)
class CycleDependenciesRule(GraphRule):
"""MB003: Cells have circular dependencies.
marimo prevents circular dependencies between cells to ensure a well-defined
execution order. If cell A declares variable 'a' and reads variable 'b', then
cell B cannot declare 'b' and read 'a' without creating a cycle.
Cycles make notebooks non-reproducible and prevent marimo from determining
the correct execution order, leading to undefined behavior.
## What it does
Analyzes the dependency graph to detect circular references between cells,
where cells depend on each other in a way that creates an impossible
execution order.
## Why is this bad?
Circular dependencies prevent marimo from:
- Determining a valid execution order
- Running notebooks reproducibly
- Executing notebooks as scripts
- Providing reliable reactive updates
This is a breaking error because it makes the notebook non-executable.
## Examples
**Problematic:**
```python
# Cell 1
a = b + 1 # Reads b
# Cell 2
b = a + 1 # Reads a -> Cycle!
```
**Solution:**
```python
# Cell 1
a = 1
# Cell 2
b = a + 1 # Unidirectional dependency
```
## References
- [Cycles Guide](https://docs.marimo.io/guides/understanding_errors/cycles/)
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
"""
code = "MB003"
name = "cycle-dependencies"
description = "Cells have circular dependencies"
severity = Severity.BREAKING
fixable = False
async def _validate_graph(
self, graph: DirectedGraph, ctx: RuleContext
) -> None:
"""Validate the graph for circular dependencies."""
validation_errors = check_for_cycles(graph)
seen = set()
for cell_id, error_list in validation_errors.items():
for error in error_list:
if error.edges_with_vars in seen:
continue
seen.add(error.edges_with_vars)
cells = []
lines = []
columns = []
for cell_id, variables, _ in error.edges_with_vars:
# Get cell from notebook serialization
for v in variables:
line, column = self._get_variable_line_info(
cell_id, v, ctx
)
cells.append(cell_id)
lines.append(line)
columns.append(column)
diagnostic = Diagnostic(
message="Cell is part of a circular dependency",
cell_id=cells,
line=lines,
column=columns,
code=self.code,
name=self.name,
severity=self.severity,
fixable=self.fixable,
)
await ctx.add_diagnostic(diagnostic)
class SetupCellDependenciesRule(GraphRule):
"""MB004: Setup cell cannot have dependencies.
The setup cell in marimo is special - it runs first and can define variables
that are available to all other cells. However, the setup cell itself cannot
depend on variables defined in other cells, as this would create a dependency
cycle and violate marimo's execution model.
The setup cell is designed for imports, configuration, and other initialization
code that should run before any other cells execute.
## What it does
Validates that the setup cell (if present) does not depend on variables
defined in other cells, ensuring proper execution order.
## Why is this bad?
Setup cell dependencies break marimo's execution model because:
- The setup cell must run first to initialize the notebook
- Dependencies on other cells would create impossible execution order
- It violates the setup cell's purpose as initialization code
This is a breaking error because it makes the notebook non-executable.
## Examples
**Problematic:**
```python
# Setup cell
y = x + 1 # Error: setup depends on other cells
# Cell 1
x = 1
```
**Solution:**
```python
# Setup cell
y = 1 # Setup defines its own variables
# Cell 1
x = y + 1 # Other cells can use setup variables
```
## References
- [Setup References Guide](https://docs.marimo.io/guides/understanding_errors/setup/)
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
"""
code = "MB004"
name = "setup-cell-dependencies"
description = "Setup cell cannot have dependencies"
severity = Severity.BREAKING
fixable = False
async def _validate_graph(
self, graph: DirectedGraph, ctx: RuleContext
) -> None:
"""Validate the graph for setup cell dependency violations."""
validation_errors = check_for_invalid_root(graph)
for cell_id, error_list in validation_errors.items():
for _ in error_list:
# Get cell from notebook serialization
cell = self._get_cell_from_id(cell_id, ctx)
line, column = (
(cell.lineno, cell.col_offset + 1) if cell else (0, 0)
)
diagnostic = Diagnostic(
message="Setup cell cannot have dependencies",
cell_id=[cell_id],
line=line,
column=column,
code=self.code,
name=self.name,
severity=self.severity,
fixable=self.fixable,
)
await ctx.add_diagnostic(diagnostic)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/rules/breaking/graph.py",
"license": "Apache License 2.0",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_lint/rules/breaking/unparsable.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.rules.base import LintRule
from marimo._schemas.serialization import UnparsableCell
if TYPE_CHECKING:
from marimo._lint.context import RuleContext
class UnparsableRule(LintRule):
"""MB001: Cell contains unparsable code.
This rule detects cells that contain code that cannot be parsed as valid Python.
Unparsable cells typically occur when a notebook file is corrupted, contains invalid
syntax, or has encoding issues that prevent proper parsing.
## What it does
Identifies cells that cannot be parsed into valid Python AST nodes, indicating
fundamental syntax or encoding problems that prevent the notebook from being loaded.
## Why is this bad?
Unparsable cells prevent the notebook from running as a script and will throw
errors when executed in notebook mode. While marimo can still open the notebook,
these cells cannot be run until the parsing issues are resolved.
## Examples
**Problematic:**
```python
# Cell with encoding issues or corrupt data
x = 1 \\x00\\x01\\x02 # Binary data in source
```
**Problematic:**
```python
# Cell with fundamental syntax errors
def func(
# Missing closing parenthesis and body
```
**Solution:**
```python
# Fix syntax errors and encoding issues
def func():
return 42
```
## References
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
"""
code = "MB001"
name = "unparsable-cells"
description = "Cell contains unparsable code"
severity = Severity.BREAKING
fixable = False
async def check(self, ctx: RuleContext) -> None:
"""Check for unparsable cells."""
for cell in ctx.notebook.cells:
if isinstance(cell, UnparsableCell): # Unparsable cell
# Try to find the line number of the error
line_num = cell.lineno
col_num = cell.col_offset
diagnostic = Diagnostic(
message="Notebook contains unparsable code",
cell_id=None,
line=line_num,
column=col_num,
)
await ctx.add_diagnostic(diagnostic)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/rules/breaking/unparsable.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_lint/rules/formatting/general.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.rules.base import LintRule
if TYPE_CHECKING:
from marimo._lint.context import RuleContext
class GeneralFormattingRule(LintRule):
"""MF001: General formatting issues with the notebook format.
This rule detects violations in the marimo notebook file format that affect
the structure and metadata of the notebook. These issues typically arise
when notebook files are manually edited or corrupted during parsing.
## What it does
Examines the notebook serialization for structural violations such as:
- Missing or incorrect marimo import statements
- Improperly formatted cell definitions
- Missing app initialization code
- Incorrect file generation metadata
## Why is this bad?
Format violations can prevent marimo from properly loading or executing
notebooks. While these don't affect the Python code logic, formatting errors
mark a deviation in the expected script structure, which can lead to
unexpected behavior when run as a script, or when loading the notebook.
## Examples
**Problematic:**
```python
# Missing marimo import
@app.cell
def __():
return
if __name__ == "__main__":
app.run()
```
**Solution:**
```python
import marimo
__generated_with = "0.1.0"
app = marimo.App()
@app.cell
def __():
return
if __name__ == "__main__":
app.run()
```
**Note:** Most format issues are automatically fixable with `marimo check --fix`.
## References
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
- [File Format Documentation](https://docs.marimo.io/guides/coming_from/jupyter/#marimo-file-format)
"""
code = "MF001"
name = "general-formatting"
description = "General formatting issues with the notebook format."
severity = Severity.FORMATTING
fixable = True
async def check(self, ctx: RuleContext) -> None:
"""Check for general formatting issues by extracting violations from serialization."""
# Import the violation constants to check for specific types
from marimo._ast.parse import (
EXPECTED_GENERATED_WITH_VIOLATION,
UNEXPECTED_STATEMENT_APP_INIT_VIOLATION,
UNEXPECTED_STATEMENT_CELL_DEF_VIOLATION,
UNEXPECTED_STATEMENT_MARIMO_IMPORT_VIOLATION,
)
# Extract violations from the notebook serialization
for violation in ctx.notebook.violations:
# Determine if this violation is fixable
is_fixable = violation.description in [
UNEXPECTED_STATEMENT_CELL_DEF_VIOLATION,
UNEXPECTED_STATEMENT_MARIMO_IMPORT_VIOLATION,
EXPECTED_GENERATED_WITH_VIOLATION,
UNEXPECTED_STATEMENT_APP_INIT_VIOLATION,
]
# Create diagnostic and add to context
diagnostic = Diagnostic(
message=violation.description,
cell_id=[], # Violations don't have cell_id
line=violation.lineno - 1, # Convert 1-based to 0-based
column=violation.col_offset + 1,
fixable=is_fixable,
)
await ctx.add_diagnostic(diagnostic)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/rules/formatting/general.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_lint/rules/formatting/parsing.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import logging
import re
from typing import TYPE_CHECKING
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.rules.base import LintRule
if TYPE_CHECKING:
from marimo._lint.context import RuleContext
class StdoutRule(LintRule):
"""MF002: Parse captured stdout during notebook loading.
This rule processes any output that was captured from stdout while marimo
was parsing and loading a notebook file. Stdout output during parsing
typically indicates warnings or informational messages from the Python
interpreter or imported modules.
## What it does
Captures and parses stdout output during notebook loading, looking for
structured warning messages that include file and line number references.
Creates diagnostics from any warnings or messages found.
## Why is this bad?
While stdout output doesn't prevent execution, it often indicates:
- Deprecation warnings from imported libraries
- Configuration issues
- Potential compatibility problems
- Code that produces unexpected side effects during import
## Examples
**Captured stdout:**
```
notebook.py:15: DeprecationWarning: 'imp' module is deprecated
```
**Result:** Creates a diagnostic pointing to line 15 with the deprecation warning.
## References
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
"""
code = "MF002"
name = "parse-stdout"
description = "Parse captured stdout during notebook loading"
severity = Severity.FORMATTING
fixable = False
async def check(self, ctx: RuleContext) -> None:
"""Process captured stdout and create diagnostics."""
# Pattern to match file:line format (e.g., "file.py:68: SyntaxWarning")
line_pattern = re.compile(r"([^:]+):(\d+):\s*(.+)")
# Split stderr by lines to handle multiple warnings
lines = ctx.stdout.strip().split("\n")
captured = False
for line in lines:
line = line.strip()
if not line:
continue
# Check if this line contains a file:line reference
match = line_pattern.match(line)
if match:
captured = True
_, line_num_str, warning_msg = match.groups()
await ctx.add_diagnostic(
Diagnostic(
message=warning_msg,
line=int(line_num_str) - 1, # Convert to 0-based index
cell_id=None,
column=0,
)
)
# Fallback: if no line patterns found, create single diagnostic
if not captured and ctx.stdout:
await ctx.add_diagnostic(
Diagnostic(
message=f"Parsing warning: {ctx.stderr}",
cell_id=None,
line=0,
column=0,
)
)
class StderrRule(LintRule):
"""MF003: Parse captured stderr during notebook loading.
This rule processes any output that was captured from stderr while marimo
was parsing and loading a notebook file. Stderr output typically contains
warnings and error messages from the Python interpreter, such as syntax
warnings, deprecation notices, and import errors.
## What it does
Captures stderr output during notebook loading and creates diagnostics
from any error messages or warnings. This helps identify potential
issues that don't prevent parsing but may affect runtime behavior.
## Why is this bad?
Stderr output during parsing often indicates:
- Syntax warnings (like invalid escape sequences)
- Import warnings or errors
- Deprecation notices from libraries
- Configuration issues that might affect execution
While these don't break the notebook, they can lead to unexpected
behavior or indicate code that needs updating.
## Examples
**Captured stderr:**
```
notebook.py:68: SyntaxWarning: invalid escape sequence '\\l'
```
**Result:** Creates a diagnostic pointing to line 68 about the invalid escape sequence.
**Common issues:**
- Raw strings needed: `r"\\path\\to\\file"` instead of `"\\path\\to\\file"`
- Deprecated library usage
- Missing import dependencies
## References
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
- [Python Warning Categories](https://docs.python.org/3/library/warnings.html#warning-categories)
"""
code = "MF003"
name = "parse-stderr"
description = "Parse captured stderr during notebook loading"
severity = Severity.FORMATTING
fixable = False
async def check(self, ctx: RuleContext) -> None:
# Process stderr content
if ctx.stderr:
await ctx.add_diagnostic(
Diagnostic(
message=f"stderr: {ctx.stderr}",
cell_id=None,
line=0,
column=0,
)
)
class SqlParseRule(LintRule):
"""MF005: SQL parsing errors during dependency analysis.
This rule processes log messages captured when marimo encounters errors
while parsing SQL statements in notebook cells. SQL parsing is used for
dependency analysis and dataframe tracking.
## What it does
Captures SQL parsing error logs and creates diagnostics pointing to
problematic SQL statements in cells.
## Why is this bad?
SQL parsing failures can lead to:
- Incorrect dependency analysis for SQL-using cells
- Missing dataframe references in dependency graph
- Reduced effectiveness of reactive execution
- Potential runtime errors when SQL is executed
## Examples
**Triggered by:**
- Invalid SQL syntax in cell code
- Unsupported SQL dialects or extensions
- Complex SQL that exceeds parser capabilities
## References
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
- [SQL Support](https://docs.marimo.io/guides/sql/)
"""
code = "MF005"
name = "sql-parse-error"
description = "SQL parsing errors during dependency analysis"
severity = Severity.FORMATTING
fixable = False
async def check(self, ctx: RuleContext) -> None:
"""Process SQL parsing error logs."""
logs = ctx.get_logs(self.code)
for record in logs:
# Extract metadata from log record - ONLY use extra_data
extra_data = getattr(record, "__dict__", {})
# Use clean message from metadata (without SQL trace)
message = extra_data.get("clean_message", "SQL parsing error")
# Calculate line position using cell information
cell_lineno = extra_data.get(
"cell_lineno", 0
) # Cell start line in notebook
node_lineno = extra_data.get(
"node_lineno", 1
) # Node line within cell
sql_line = extra_data.get("sql_line") # SQL line within SQL string
# Start with cell position + node position within cell
line = cell_lineno + node_lineno - 1 # Convert to 0-based
# Add SQL line offset if available
if sql_line is not None:
line += sql_line
# Use SQL column if available, otherwise node column
sql_col = extra_data.get("sql_col")
col = (
sql_col
if sql_col is not None
else extra_data.get("node_col_offset", 0)
)
await ctx.add_diagnostic(
Diagnostic(
message=message,
line=line,
cell_id=None,
column=col,
)
)
class MiscLogRule(LintRule):
"""MF006: Miscellaneous log messages during processing.
This rule processes log messages that don't have a specific rule assigned
but may still be relevant for understanding notebook health and potential
issues during processing.
## What it does
Captures warning and error level log messages that aren't handled by
other specific log rules and creates diagnostics to surface them.
## Why is this bad?
Unhandled log messages may indicate:
- Unexpected issues during notebook processing
- Configuration problems
- Library warnings that affect execution
- Performance or resource issues
## Examples
**Triggered by:**
- General warnings from imported libraries
- Configuration issues
- Unexpected errors during processing
## References
- [Understanding Errors](https://docs.marimo.io/guides/understanding_errors/)
"""
code = "MF006"
name = "misc-log-capture"
description = "Miscellaneous log messages during processing"
severity = Severity.FORMATTING
fixable = False
async def check(self, ctx: RuleContext) -> None:
"""Process miscellaneous log messages."""
logs = ctx.get_logs(self.code)
for record in logs:
# Only process WARNING and ERROR level logs to avoid noise
if record.levelno < logging.WARNING:
continue
await ctx.add_diagnostic(
Diagnostic(
message=record.getMessage(),
line=0, # Misc logs don't have meaningful line positioning
cell_id=None,
column=0,
)
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/rules/formatting/parsing.py",
"license": "Apache License 2.0",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_lint/visitors.py | # Copyright 2026 Marimo. All rights reserved.
"""AST visitors for linting purposes."""
from __future__ import annotations
import ast
from typing import Optional
class VariableLineVisitor(ast.NodeVisitor):
"""AST visitor to find the line number of a variable definition."""
def __init__(self, target_variable: str):
self.target_variable = target_variable
self.line_number: Optional[int] = None
self.column_number: int = 1
def visit_Name(self, node: ast.Name) -> None:
"""Visit Name nodes to find variable definitions."""
if node.id == self.target_variable:
# Check if this is a definition (not just a reference)
if isinstance(node.ctx, (ast.Store, ast.Del)):
self.line_number = node.lineno
self.column_number = node.col_offset + 1
return
self.generic_visit(node)
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
"""Visit function definition nodes."""
if node.name == self.target_variable:
self.line_number = node.lineno
self.column_number = node.col_offset + 1
return
self.generic_visit(node)
def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""Visit class definition nodes."""
if node.name == self.target_variable:
self.line_number = node.lineno
self.column_number = node.col_offset + 1
return
self.generic_visit(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
"""Visit ImportFrom nodes to find imported variable definitions."""
for alias in node.names:
if (
alias.asname == self.target_variable
or alias.name == self.target_variable
):
self.line_number = node.lineno
self.column_number = node.col_offset + 1
return
self.generic_visit(node)
def visit_Import(self, node: ast.Import) -> None:
"""Visit Import nodes to find imported variable definitions."""
for alias in node.names:
if (
alias.asname == self.target_variable
or alias.name == self.target_variable
):
self.line_number = node.lineno
self.column_number = node.col_offset + 1
return
self.generic_visit(node)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_lint/visitors.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_cli/test_cli_check.py | # Copyright 2026 Marimo. All rights reserved.
"""CLI tests for the marimo check command."""
import tempfile
from click.testing import CliRunner
from marimo._cli.cli import check
class TestLintCLI:
"""Test the check CLI command."""
def test_check_command_basic(self):
"""Test basic check command functionality."""
runner = CliRunner()
# Create a temporary file with basic marimo code
with tempfile.NamedTemporaryFile(
mode="w", suffix=".py", delete=False
) as f:
f.write("""
import marimo
__generated_with = "0.0.0"
app = marimo.App()
@app.cell
def _():
x = 1
return (x,)
if __name__ == "__main__":
app.run()
""")
f.flush()
# Run check command
result = runner.invoke(check, [f.name])
# Should succeed and show some output
assert result.exit_code == 0, result.output
assert not result.output.strip()
def test_check_command_with_violations(self):
"""Test check command with parsing violations."""
runner = CliRunner()
# Create a temporary file with violations
with tempfile.NamedTemporaryFile(
mode="w", suffix=".py", delete=False
) as f:
f.write("""
import marimo
app = marimo.App()
@app.cell
def _():
y = 2
return (y,)
""")
f.flush()
# Run check command
result = runner.invoke(check, [f.name, "--strict"])
# Should give and show errors
assert result.exit_code == 1, result.output
assert "warning[general-formatting]" in result.output
def test_check_command_with_fix(self):
"""Test check command with fix option."""
runner = CliRunner()
# Create a temporary file with violations
with tempfile.NamedTemporaryFile(
mode="w", suffix=".py", delete=False
) as f:
f.write("""
import marimo
app = marimo.App()
@app.cell
def _():
y = 2
return (y,)
# This should create a violation with missing guard
""")
f.flush()
# Run check command with fix
result = runner.invoke(check, [f.name, "--fix"])
# The fix might fail due to file permissions or other issues
# Just check that the command runs
assert result.exit_code == 0, result.output
def test_check_command_nonexistent_file(self):
"""Test check command with nonexistent file."""
runner = CliRunner()
result = runner.invoke(check, ["nonexistent.py"])
# The CLI might handle nonexistent files gracefully
# Just check that it doesn't crash
assert result.exit_code in [0, 1, 2] # Various possible exit codes
def test_check_command_syntax_error(self):
"""Test check command with syntax error."""
runner = CliRunner()
# Create a temporary file with syntax error
with tempfile.NamedTemporaryFile(
mode="w", suffix=".py", delete=False
) as f:
f.write("""
import marimo
app = marimo.App()
@app.cell
def _():
x = 1 + # Syntax error
return (x,)
""")
f.flush()
# Run check command
result = runner.invoke(check, [f.name])
# Should fail due to syntax error
assert result.exit_code != 0, result.output
def test_check_command_help(self):
"""Test check command help."""
runner = CliRunner()
result = runner.invoke(check, ["--help"])
# Should succeed and show help
assert result.exit_code == 0
assert "format" in result.output
assert "--fix" in result.output
def test_check_command_no_errors(self):
"""Test check command with valid notebook."""
runner = CliRunner()
# Create a temporary file with valid marimo code
with tempfile.NamedTemporaryFile(
mode="w", suffix=".py", delete=False
) as f:
f.write("""
import marimo
__generated_with = "0.1.0"
app = marimo.App()
@app.cell
def _():
x = 1
return (x,)
if __name__ == "__main__":
app.run()
""")
f.flush()
# Run check command
result = runner.invoke(check, [f.name])
# Should succeed
assert result.exit_code == 0, result.output
def test_check_command_with_message_collection(self):
"""Test that CLI check command uses message collection properly."""
runner = CliRunner()
import os
# Use existing test file with syntax errors
test_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"_lint",
"test_files",
"syntax_errors.py",
)
# Test the check command
result = runner.invoke(check, [test_file])
# Should return non-zero exit code for files with errors
assert result.exit_code != 0
# Should contain linting output
assert len(result.output) > 0
assert (
"invalid-syntax" in result.output
or "error" in result.output.lower()
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_cli/test_cli_check.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_async_context_system.py | # Copyright 2026 Marimo. All rights reserved.
"""Unit tests for the async context-based lint system."""
from marimo._ast.parse import parse_notebook
from marimo._lint.context import LintContext, RuleContext
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.rule_engine import RuleEngine
from marimo._lint.rules.base import LintRule
from marimo._lint.rules.breaking import MultipleDefinitionsRule, UnparsableRule
from marimo._lint.rules.formatting import GeneralFormattingRule
from tests._lint.utils import lint_notebook
class MockRule(LintRule):
"""Mock rule for testing."""
def __init__(
self, code: str, severity: Severity, diagnostic_count: int = 1
):
# Set class attributes dynamically
self.code = code
self.name = f"mock-{code.lower()}"
self.description = f"Mock rule {code}"
self.severity = severity
self.fixable = False
# Instance attributes
self.diagnostic_count = diagnostic_count
self.call_count = 0
async def check(self, rule_ctx: RuleContext) -> None:
"""Add mock diagnostics to context."""
self.call_count += 1
for i in range(self.diagnostic_count):
diagnostic = Diagnostic(
message=f"Mock diagnostic {i + 1}",
cell_id=None,
line=1,
column=1,
code=self.code,
name=self.name,
severity=self.severity,
fixable=self.fixable,
)
await rule_ctx.add_diagnostic(diagnostic)
class TestLintContext:
"""Test the LintContext class."""
def setup_method(self):
self.notebook = parse_notebook("import marimo\napp = marimo.App()")
self.ctx = LintContext(self.notebook)
async def test_add_diagnostic_priority_queue(self):
"""Test that diagnostics are queued by priority."""
# Add diagnostics in reverse priority order
formatting_diag = Diagnostic(
message="formatting",
cell_id=None,
line=1,
column=1,
code="MF001",
name="test",
severity=Severity.FORMATTING,
fixable=False,
)
breaking_diag = Diagnostic(
message="breaking",
cell_id=None,
line=1,
column=1,
code="MB001",
name="test",
severity=Severity.BREAKING,
fixable=False,
)
runtime_diag = Diagnostic(
message="runtime",
cell_id=None,
line=1,
column=1,
code="MR001",
name="test",
severity=Severity.RUNTIME,
fixable=False,
)
# Add in non-priority order
await self.ctx.add_diagnostic(formatting_diag)
await self.ctx.add_diagnostic(runtime_diag)
await self.ctx.add_diagnostic(breaking_diag)
# Get diagnostics - should be sorted by priority
diagnostics = await self.ctx.get_diagnostics()
assert len(diagnostics) == 3
assert diagnostics[0].severity == Severity.BREAKING
assert diagnostics[1].severity == Severity.RUNTIME
assert diagnostics[2].severity == Severity.FORMATTING
async def test_add_diagnostic_stable_order(self):
"""Test that diagnostics with same priority maintain insertion order."""
# Add multiple diagnostics with same priority
diag1 = Diagnostic(
message="first",
cell_id=None,
line=1,
column=1,
code="MF001",
name="test1",
severity=Severity.FORMATTING,
fixable=False,
)
diag2 = Diagnostic(
message="second",
cell_id=None,
line=1,
column=1,
code="MF002",
name="test2",
severity=Severity.FORMATTING,
fixable=False,
)
diag3 = Diagnostic(
message="third",
cell_id=None,
line=1,
column=1,
code="MF003",
name="test3",
severity=Severity.FORMATTING,
fixable=False,
)
await self.ctx.add_diagnostic(diag1)
await self.ctx.add_diagnostic(diag2)
await self.ctx.add_diagnostic(diag3)
diagnostics = await self.ctx.get_diagnostics()
assert len(diagnostics) == 3
assert diagnostics[0].message == "first"
assert diagnostics[1].message == "second"
assert diagnostics[2].message == "third"
def test_graph_caching(self):
"""Test that the graph is cached and reused."""
# First call should construct the graph
graph1 = self.ctx.get_graph()
# Second call should return the same graph instance
graph2 = self.ctx.get_graph()
assert graph1 is graph2
def test_graph_thread_safety(self):
"""Test that graph construction is thread-safe."""
import threading
import time
graphs = []
exceptions = []
def get_graph():
try:
# Add small delay to increase chance of race condition
time.sleep(0.001)
graph = self.ctx.get_graph()
graphs.append(graph)
except Exception as e:
exceptions.append(e)
# Create multiple threads trying to get the graph simultaneously
threads = [threading.Thread(target=get_graph) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# All threads should succeed and get the same graph instance
assert len(exceptions) == 0
assert len(graphs) == 10
assert all(graph is graphs[0] for graph in graphs)
class TestAsyncRuleEngine:
"""Test the async RuleEngine functionality."""
def setup_method(self):
self.notebook = parse_notebook("import marimo\napp = marimo.App()")
async def test_async_rule_execution(self):
"""Test that rules are executed asynchronously."""
# Create mock rules with different severities
breaking_rule = MockRule(
"MB001", Severity.BREAKING, diagnostic_count=2
)
runtime_rule = MockRule("MR001", Severity.RUNTIME, diagnostic_count=1)
formatting_rule = MockRule(
"MF001", Severity.FORMATTING, diagnostic_count=3
)
checker = RuleEngine([breaking_rule, runtime_rule, formatting_rule])
# Execute rules
diagnostics = await checker.check_notebook(self.notebook)
# All rules should have been called
assert breaking_rule.call_count == 1
assert runtime_rule.call_count == 1
assert formatting_rule.call_count == 1
# Should get diagnostics in priority order
assert len(diagnostics) == 6 # 2 + 1 + 3
# First two should be breaking
assert diagnostics[0].severity == Severity.BREAKING
assert diagnostics[1].severity == Severity.BREAKING
# Next should be runtime
assert diagnostics[2].severity == Severity.RUNTIME
# Last three should be formatting
assert diagnostics[3].severity == Severity.FORMATTING
assert diagnostics[4].severity == Severity.FORMATTING
assert diagnostics[5].severity == Severity.FORMATTING
def test_sync_wrapper(self):
"""Test the synchronous wrapper."""
mock_rule = MockRule("MF001", Severity.FORMATTING)
checker = RuleEngine([mock_rule])
# Should work synchronously
diagnostics = checker.check_notebook_sync(self.notebook)
assert len(diagnostics) == 1
assert mock_rule.call_count == 1
def test_rule_priority_execution(self):
"""Test that diagnostics are returned in priority order."""
# Create rules in non-priority order
formatting_rule = MockRule("MF001", Severity.FORMATTING)
breaking_rule = MockRule("MB001", Severity.BREAKING)
runtime_rule = MockRule("MR001", Severity.RUNTIME)
checker = RuleEngine([formatting_rule, breaking_rule, runtime_rule])
# Get diagnostics
diagnostics = checker.check_notebook_sync(self.notebook)
# Should get diagnostics in priority order regardless of rule submission order
assert len(diagnostics) == 3
assert diagnostics[0].severity == Severity.BREAKING # MB001
assert diagnostics[1].severity == Severity.RUNTIME # MR001
assert diagnostics[2].severity == Severity.FORMATTING # MF001
class TestRealRules:
"""Test the real rule implementations with the new context system."""
async def test_formatting_rule(self):
"""Test GeneralFormattingRule with context."""
# Create notebook with formatting violations
code = """import marimo
app = marimo.App()
@app.cell
def __():
x = 1
return x,
"""
notebook = parse_notebook(code)
ctx = LintContext(notebook)
rule = GeneralFormattingRule()
await rule.check(ctx)
diagnostics = await ctx.get_diagnostics()
# This should succeed without error
assert isinstance(diagnostics, list)
async def test_multiple_definitions_rule(self):
"""Test MultipleDefinitionsRule with context."""
# Create notebook with multiple definitions
code = """import marimo
app = marimo.App()
@app.cell
def _():
x = 1
return
@app.cell
def _():
x = 2 # Should trigger multiple definitions
return
"""
notebook = parse_notebook(code)
ctx = LintContext(notebook)
rule = MultipleDefinitionsRule()
await rule.check(ctx)
diagnostics = await ctx.get_diagnostics()
assert len(diagnostics) > 0
assert diagnostics[0].severity == Severity.BREAKING
assert "multiple cells" in diagnostics[0].message
async def test_unparsable_rule(self):
"""Test UnparsableRule with context."""
# Create a simple notebook (unparsable cells need special setup)
notebook = parse_notebook("import marimo\napp = marimo.App()")
ctx = LintContext(notebook)
rule = UnparsableRule()
await rule.check(ctx)
# Should not find any unparsable cells in valid code
diagnostics = await ctx.get_diagnostics()
unparsable_diagnostics = [d for d in diagnostics if d.code == "MB001"]
assert len(unparsable_diagnostics) == 0
class TestIntegration:
"""Integration tests for the complete system."""
def test_end_to_end_linting(self):
"""Test complete end-to-end linting process."""
# Create notebook with multiple types of issues
code = """import marimo
app = marimo.App()
@app.cell
def _():
x = 1
return
@app.cell
def _():
x = 2 # Multiple definitions
return
"""
notebook = parse_notebook(code)
diagnostics = lint_notebook(notebook, code)
# Should find diagnostics
assert len(diagnostics) > 0
# Should be sorted by priority (breaking first, then runtime, then formatting)
severities = [d.severity for d in diagnostics]
severity_values = [s.value for s in severities]
# Check that breaking comes before runtime, runtime before formatting
priority_order = {"breaking": 0, "runtime": 1, "formatting": 2}
for i in range(len(severity_values) - 1):
current_priority = priority_order.get(severity_values[i], 999)
next_priority = priority_order.get(severity_values[i + 1], 999)
assert current_priority <= next_priority
def test_default_checker_creation(self):
"""Test that default checker includes all expected rules."""
checker = RuleEngine.create_default()
# Should include all the standard rules
rule_codes = {rule.code for rule in checker.rules}
expected_codes = {
"MF001",
"MF002",
"MF003",
"MB001",
"MB002",
"MB003",
"MB004",
}
assert expected_codes.issubset(rule_codes)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_async_context_system.py",
"license": "Apache License 2.0",
"lines": 309,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_files/cycle_dependencies.py | import marimo
__generated_with = "0.15.2"
app = marimo.App()
@app.cell
def _(z):
x = 1 + z # This should trigger MR002 - cycle dependency
return (x,)
@app.cell
def _(x):
y = x + 1
return (y,)
@app.cell
def _(y):
z = y + 1
return (z,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/cycle_dependencies.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_files/formatting.py | """Example to test formatting issues."""
import marimo as mo # intentional to test import aliasing
statement = "not in a cell, so unexpected"
# no generate guard
app = mo.App()
statement = "not in a cell, so unexpected"
@app.cell
def _():
pass
# no run guard
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/formatting.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_files/multiple_definitions.py | import marimo
__generated_with = "0.15.2"
app = marimo.App()
@app.cell
def _():
print(1)
x = 1
return
@app.cell
def _():
x = 2 # This should trigger MR001 - multiple definitions
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/multiple_definitions.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_files/setup_dependencies.py | import marimo
__generated_with = "0.15.2"
app = marimo.App()
x = 1
with app.setup:
y = x + 1 # This should trigger MR003 - setup cell dependencies
@app.cell
def _():
x = 1
return (x,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/setup_dependencies.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_files/unparsable_cell.py | import marimo
__generated_with = "0.0.0"
app = marimo.App()
@app.cell
def _():
x = 1
return
# This should create an unparsable cell
app._unparsable_cell("""
x = 1 + # Syntax error
""")
@app.cell
def _():
y = 2
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_files/unparsable_cell.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_lint_system.py | # Copyright 2026 Marimo. All rights reserved.
"""Unit tests for the marimo lint system."""
from unittest.mock import patch
from marimo._ast.parse import parse_notebook
from marimo._lint.context import LintContext, RuleContext
from marimo._lint.rule_engine import RuleEngine
from marimo._lint.rules.base import Severity
from marimo._lint.rules.breaking import (
CycleDependenciesRule,
MultipleDefinitionsRule,
SetupCellDependenciesRule,
UnparsableRule,
)
from marimo._lint.rules.formatting import GeneralFormattingRule
from marimo._schemas.serialization import (
AppInstantiation,
CellDef,
NotebookSerializationV1,
)
from tests._lint.utils import lint_notebook
class TestLintSystem:
"""Test the core lint system functionality."""
def test_lint_notebook_basic(self):
"""Test basic linting functionality."""
code = """
import marimo
app = marimo.App()
@app.cell
def __():
x = 1
return (x,)
"""
notebook = parse_notebook(code)
errors = lint_notebook(notebook)
# Should have formatting error for missing __generated_with
assert len(errors) > 0
assert any(error.code == "MF001" for error in errors)
def test_lint_notebook_with_violations(self):
"""Test linting with parsing violations."""
code = """
import marimo
app = marimo.App()
# This should create a violation
x = 1
@app.cell
def __():
y = 2
return (y,)
"""
notebook = parse_notebook(code)
errors = lint_notebook(notebook)
# Should have formatting errors for violations
assert len(errors) > 0
assert any(error.code == "MF001" for error in errors)
class TestLintRules:
"""Test individual lint rules."""
async def test_general_formatting_rule(self):
"""Test the general formatting rule."""
rule = GeneralFormattingRule()
# Create a notebook with violations
notebook = NotebookSerializationV1(
app=AppInstantiation(),
cells=[],
violations=[
type(
"Violation",
(),
{
"description": "UNEXPECTED_STATEMENT_CELL_DEF_VIOLATION",
"lineno": 1,
"col_offset": 0,
},
)()
],
)
ctx = LintContext(notebook)
rule_ctx = RuleContext(ctx, rule)
await rule.check(rule_ctx)
errors = await ctx.get_diagnostics()
assert len(errors) == 1
assert errors[0].code == "MF001"
assert errors[0].severity == Severity.FORMATTING
async def test_multiple_definitions_rule(self):
"""Test the multiple definitions rule."""
rule = MultipleDefinitionsRule()
# Create a simple notebook
notebook = NotebookSerializationV1(
app=AppInstantiation(),
cells=[
CellDef(code="x = 1", name="cell1", lineno=1, col_offset=0),
CellDef(code="x = 2", name="cell2", lineno=2, col_offset=0),
],
)
ctx = LintContext(notebook)
rule_ctx = RuleContext(ctx, rule)
await rule.check(rule_ctx)
errors = await ctx.get_diagnostics()
# The rule should run without errors (even if no multiple definitions found)
assert isinstance(errors, list)
async def test_cycle_dependencies_rule(self):
"""Test the cycle dependencies rule."""
rule = CycleDependenciesRule()
# Create a simple notebook
notebook = NotebookSerializationV1(
app=AppInstantiation(),
cells=[
CellDef(code="x = 1", name="cell1", lineno=1, col_offset=0),
CellDef(code="y = 2", name="cell2", lineno=2, col_offset=0),
],
)
ctx = LintContext(notebook)
rule_ctx = RuleContext(ctx, rule)
await rule.check(rule_ctx)
errors = await ctx.get_diagnostics()
# The rule should run without errors
assert isinstance(errors, list)
async def test_setup_cell_dependencies_rule(self):
"""Test the setup cell dependencies rule."""
rule = SetupCellDependenciesRule()
# Create a simple notebook
notebook = NotebookSerializationV1(
app=AppInstantiation(),
cells=[
CellDef(code="x = 1", name="cell1", lineno=1, col_offset=0),
],
)
ctx = LintContext(notebook)
rule_ctx = RuleContext(ctx, rule)
await rule.check(rule_ctx)
errors = await ctx.get_diagnostics()
# The rule should run without errors
assert isinstance(errors, list)
async def test_unparsable_cells_rule(self):
"""Test the unparsable cells rule."""
rule = UnparsableRule()
# Create a notebook with unparsable cell
from marimo._schemas.serialization import UnparsableCell
notebook = NotebookSerializationV1(
app=AppInstantiation(),
cells=[
UnparsableCell(
code="x = 1 +", name="cell1", lineno=1, col_offset=0
),
],
)
ctx = LintContext(notebook)
rule_ctx = RuleContext(ctx, rule)
await rule.check(rule_ctx)
errors = await ctx.get_diagnostics()
assert len(errors) == 1
assert errors[0].code == "MB001"
assert errors[0].severity == Severity.BREAKING
class TestRuleEngine:
"""Test the RuleEngine class."""
def test_create_default(self):
"""Test creating a default checker."""
checker = RuleEngine.create_default()
assert checker is not None
assert len(checker.rules) > 0
def test_check_notebook(self):
"""Test checking a notebook."""
checker = RuleEngine.create_default()
notebook = NotebookSerializationV1(
app=AppInstantiation(), cells=[], violations=[]
)
errors = checker.check_notebook_sync(notebook)
assert isinstance(errors, list)
class TestMessageCollectionEntryPoints:
"""Test that message collection works through the main entry points."""
def test_app_initialization_with_message_collection(self):
"""Test that App initialization uses message collection when it encounters errors."""
import os
from io import StringIO
from marimo._ast.app import App
# Use existing test file with multiple definitions (which should trigger error handling)
test_file = os.path.join(
os.path.dirname(__file__), "test_files", "multiple_definitions.py"
)
app = App(filename=test_file)
# Capture stderr to verify message collection
captured_stderr = StringIO()
with patch("sys.stderr", captured_stderr):
try:
app._maybe_initialize()
except Exception:
pass # Expected to potentially raise an error
# Verify that if there were errors, linting messages were written to stderr
stderr_output = captured_stderr.getvalue()
# The test passes if either no errors occurred, or if errors occurred with proper message collection
assert isinstance(stderr_output, str) # Should always be a string
def test_app_initialization_with_success(self):
"""Test that App initialization works normally with valid notebooks."""
import os
from marimo._ast.app import App
# Use existing test file with formatting issues but valid structure
test_file = os.path.join(
os.path.dirname(__file__), "test_files", "formatting.py"
)
app = App(filename=test_file)
# Should not raise an exception for valid notebooks
# If it does raise an exception, it should not be an UnparsableError
try:
app._maybe_initialize()
except Exception as e:
# If it fails, it should be for a different reason than unparsable
if "UnparsableError" in str(type(e)):
raise AssertionError(f"Unexpected UnparsableError: {e}") from e
def test_collect_messages_severity_filtering(self):
"""Test that collect_messages severity filtering works correctly."""
import os
from marimo._lint import Severity, collect_messages
test_file = os.path.join(
os.path.dirname(__file__), "test_files", "formatting.py"
)
# Test with BREAKING severity (default)
linter_breaking, messages_breaking = collect_messages(test_file)
# Test with FORMATTING severity (should include more issues)
linter_all, messages_all = collect_messages(
test_file, min_severity=Severity.FORMATTING
)
# Both should return valid results
assert isinstance(linter_breaking.errored, bool)
assert isinstance(messages_breaking, str)
assert isinstance(linter_all.errored, bool)
assert isinstance(messages_all, str)
# FORMATTING severity should typically find more issues
assert len(messages_all) >= len(messages_breaking)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_lint_system.py",
"license": "Apache License 2.0",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_run_check.py | # Copyright 2026 Marimo. All rights reserved.
"""Unit tests for the run_check CLI integration."""
from pathlib import Path
from marimo._lint import FileStatus, Linter, run_check
from marimo._lint.diagnostic import Diagnostic, Severity
class TestRunCheck:
"""Test the run_check function and CLI integration."""
def test_run_check_with_empty_files(self):
"""Test run_check with file patterns that match no files."""
result = run_check(("nonexistent/**/*.py",))
assert isinstance(result, Linter)
assert len(result.files) == 0
assert result.errored is False
def test_run_check_with_unsupported_files(self, tmpdir):
"""Test run_check skips unsupported file types."""
# Create a non-notebook file
txt_file = Path(tmpdir) / "test.txt"
txt_file.write_text("This is not a notebook")
result = run_check((str(txt_file),))
assert len(result.files) == 1
assert result.files[0].skipped is True
assert "not a notebook file" in result.files[0].message
def test_run_check_with_empty_py_file(self, tmpdir):
"""Test run_check with an empty Python file."""
py_file = Path(tmpdir) / "empty.py"
py_file.write_text("")
result = run_check((str(py_file),))
assert len(result.files) == 1
assert result.files[0].skipped is True
assert "empty file" in result.files[0].message
def test_run_check_with_valid_notebook(self, tmpdir):
"""Test run_check with a valid marimo notebook."""
notebook_file = Path(tmpdir) / "notebook.py"
notebook_content = """import marimo
__generated_with = "0.15.0"
app = marimo.App()
@app.cell
def __():
x = 1
return (x,)
"""
notebook_file.write_text(notebook_content)
result = run_check((str(notebook_file),))
assert len(result.files) == 1
assert result.files[0].skipped is False
assert result.files[0].failed is False
assert isinstance(result.files[0].diagnostics, list)
def test_run_check_with_syntax_error(self, tmpdir):
"""Test run_check with a file containing syntax errors."""
bad_file = Path(tmpdir) / "bad.py"
bad_file.write_text(
"import marimo\napp = marimo.App(\ndef broken(:\n pass"
)
result = run_check((str(bad_file),))
assert len(result.files) == 1
assert result.files[0].failed is True
assert "Failed to parse" in result.files[0].message
assert len(result.files[0].details) > 0
def test_run_check_with_glob_patterns(self, tmpdir):
"""Test run_check with glob patterns."""
# Create multiple files
py_file = Path(tmpdir) / "test.py"
py_file.write_text("# empty notebook")
md_file = Path(tmpdir) / "test.md"
md_file.write_text("# Empty markdown")
txt_file = Path(tmpdir) / "test.txt"
txt_file.write_text("ignored")
# Use glob pattern
pattern = str(Path(tmpdir) / "*")
result = run_check((pattern,))
# Should find py and md files, skip txt
assert len(result.files) == 3
py_result = next(f for f in result.files if f.file.endswith(".py"))
md_result = next(f for f in result.files if f.file.endswith(".md"))
txt_result = next(f for f in result.files if f.file.endswith(".txt"))
# files with simple comments are valid notebooks with errors.
assert not py_result.failed
assert not md_result.failed
assert txt_result.skipped is True # Not a notebook
class TestFileStatus:
"""Test the FileStatus class."""
def test_file_status_initialization(self):
"""Test FileStatus initialization with defaults."""
status = FileStatus(file="test.py")
assert status.file == "test.py"
assert status.diagnostics == []
assert status.skipped is False
assert status.failed is False
assert status.message == ""
assert status.details == []
def test_file_status_with_diagnostics(self):
"""Test FileStatus with diagnostics."""
diagnostic = Diagnostic(
code="MB001",
name="test-error",
message="Test error",
severity=Severity.BREAKING,
cell_id=None,
line=1,
column=1,
fixable=False,
)
status = FileStatus(file="test.py", diagnostics=[diagnostic])
assert len(status.diagnostics) == 1
assert status.diagnostics[0].code == "MB001"
async def test_file_status_fix_no_fixable_diagnostics(self, tmpdir):
"""Test Linter.fix() with no fixable diagnostics."""
test_file = Path(tmpdir) / "test.py"
original_content = "# Original content"
test_file.write_text(original_content)
diagnostic = Diagnostic(
message="Test error",
cell_id=None,
line=1,
column=1,
code="MB001",
name="test-error",
severity=Severity.BREAKING,
fixable=False, # Not fixable
)
# FileStatus needs notebook and contents for fix to work
status = FileStatus(
file=str(test_file),
diagnostics=[diagnostic],
notebook=None, # No notebook means fix returns False
contents=original_content,
)
# Since no notebook, fix should return False (no changes)
linter = Linter()
result = await linter.fix(status)
assert result is False
# File should remain unchanged
assert test_file.read_text() == original_content
async def test_file_status_fix_with_fixable_diagnostics(self, tmpdir):
"""Test Linter.fix() with fixable diagnostics (but no notebook)."""
test_file = Path(tmpdir) / "test.py"
original_content = "# Original content"
test_file.write_text(original_content)
diagnostic = Diagnostic(
message="Formatting error",
cell_id=None,
line=1,
column=1,
code="MF001",
name="formatting-error",
severity=Severity.FORMATTING,
fixable=True, # Fixable
)
# FileStatus needs notebook and contents for fix to work
status = FileStatus(
file=str(test_file),
diagnostics=[diagnostic],
notebook=None, # No notebook means fix returns False
contents=original_content,
)
# Since no notebook, fix should return False (no changes)
linter = Linter()
result = await linter.fix(status)
assert result is False
# File should remain unchanged
assert test_file.read_text() == original_content
async def test_file_status_fix_with_multiple_diagnostics(self, tmpdir):
"""Test Linter.fix() with multiple diagnostics (but no notebook)."""
test_file = Path(tmpdir) / "test.py"
original_content = "# Original content"
test_file.write_text(original_content)
diagnostics = [
Diagnostic(
message="Formatting",
cell_id=None,
line=1,
column=1,
code="MF001",
name="formatting-error",
severity=Severity.FORMATTING,
fixable=True,
),
Diagnostic(
message="Breaking",
cell_id=None,
line=2,
column=1,
code="MB001",
name="breaking-error",
severity=Severity.BREAKING,
fixable=True,
),
Diagnostic(
message="Runtime",
cell_id=None,
line=3,
column=1,
code="MR001",
name="runtime-error",
severity=Severity.RUNTIME,
fixable=True,
),
]
# FileStatus needs notebook and contents for fix to work
status = FileStatus(
file=str(test_file),
diagnostics=diagnostics,
notebook=None, # No notebook means fix returns False
contents=original_content,
)
# Since no notebook, fix should return False (no changes)
linter = Linter()
result = await linter.fix(status)
assert result is False
# File should remain unchanged
assert test_file.read_text() == original_content
class TestLinter:
"""Test the Linter class."""
def test_check_result_initialization(self):
"""Test Linter initialization."""
result = Linter()
assert result.files == []
assert result.errored is False
def test_check_result_with_files(self):
"""Test Linter with file status objects added to files list."""
file_status = FileStatus(file="test.py")
result = Linter()
# Manually add files and set errored (simulating after run)
result.files.append(file_status)
result.errored = True
assert len(result.files) == 1
assert result.files[0].file == "test.py"
assert result.errored is True
class TestIntegration:
"""Integration tests combining multiple components."""
def test_full_workflow_with_notebook_violations(self, tmpdir):
"""Test the full workflow with a notebook that has violations."""
notebook_file = Path(tmpdir) / "bad_notebook.py"
# Create a notebook with violations (missing __generated_with)
notebook_content = """import marimo
app = marimo.App()
# This violates marimo structure - code outside cell
x = 1
@app.cell
def __():
y = 2
return (y,)
"""
notebook_file.write_text(notebook_content)
result = run_check((str(notebook_file),))
assert len(result.files) == 1
file_status = result.files[0]
assert not file_status.skipped
assert not file_status.failed
assert len(file_status.diagnostics) > 0
# Should have formatting violations
assert any(
d.severity == Severity.FORMATTING for d in file_status.diagnostics
)
# Test fixing (if notebook is available)
if file_status.notebook is not None:
# Use Linter.fix() method
import asyncio
linter = Linter()
result = asyncio.run(linter.fix(file_status))
assert isinstance(result, bool)
def test_error_handling_in_run_check(self, tmpdir):
"""Test error handling in run_check."""
# Test by providing invalid content that will cause parsing to fail
test_file = Path(tmpdir) / "test.py"
# Write invalid Python content that will cause an exception
test_file.write_text(
"import marimo\napp = marimo.App()\ndef broken(:\n pass"
)
result = run_check((str(test_file),))
assert len(result.files) == 1
assert result.files[0].failed is True
# Note: errored might not be True as we changed error handling
assert "Failed to parse" in result.files[0].message
def test_run_check_with_nonexistent_file_pattern(self):
"""Test run_check with a specific nonexistent file."""
result = run_check(("nonexistent_file.py",))
assert isinstance(result, Linter)
assert len(result.files) == 1 # Should create a failed file status
assert result.files[0].failed is True
assert "File not found" in result.files[0].message
def test_run_check_with_nonexistent_directory_pattern(self):
"""Test run_check with nonexistent directory patterns."""
result = run_check(("nonexistent_dir/**/*.py",))
assert isinstance(result, Linter)
assert len(result.files) == 0
assert result.errored is False
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_run_check.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/test_streaming_early_stopping.py | # Copyright 2026 Marimo. All rights reserved.
"""Unit tests for streaming diagnostics and early stopping functionality."""
import asyncio
from marimo._ast.parse import parse_notebook
from marimo._lint.context import LintContext
from marimo._lint.diagnostic import Diagnostic, Severity
from marimo._lint.rule_engine import EarlyStoppingConfig, RuleEngine
from marimo._lint.rules.base import LintRule
def create_diagnostic(
code: str,
cell_id: str,
message: str,
severity: Severity,
fixable: bool = False,
) -> Diagnostic:
"""Helper to create diagnostic with correct parameter order."""
return Diagnostic(
message=message,
cell_id=cell_id,
line=1,
column=1,
code=code,
severity=severity,
fixable=fixable,
)
class SlowRule(LintRule):
"""Mock rule that takes time to complete."""
def __init__(
self,
code: str,
severity: Severity,
delay: float = 0.1,
diagnostic_count: int = 1,
):
# Set class attributes dynamically
self.code = code
self.name = f"slow-{code.lower()}"
self.description = f"Slow rule {code}"
self.severity = severity
self.fixable = False
# Instance attributes
self.delay = delay
self.diagnostic_count = diagnostic_count
self.started = False
self.completed = False
self.cancelled = False
async def check(self, ctx: LintContext) -> None:
"""Add diagnostics after delay."""
self.started = True
try:
await asyncio.sleep(self.delay)
for i in range(self.diagnostic_count):
diagnostic = Diagnostic(
code=self.code,
name=self.name,
message=f"Slow diagnostic {i + 1}",
severity=self.severity,
cell_id=None,
line=1,
column=1,
fixable=self.fixable,
)
await ctx.add_diagnostic(diagnostic)
self.completed = True
except asyncio.CancelledError:
self.cancelled = True
raise
class TestLintContextStreaming:
"""Test LintContext streaming functionality."""
def setup_method(self):
self.notebook = parse_notebook("import marimo\napp = marimo.App()")
self.ctx = LintContext(self.notebook)
async def test_get_new_diagnostics_empty(self):
"""Test get_new_diagnostics when no diagnostics added."""
new_diagnostics = await self.ctx.get_new_diagnostics()
assert len(new_diagnostics) == 0
async def test_get_new_diagnostics_incremental(self):
"""Test get_new_diagnostics returns only new diagnostics."""
# Add first batch
diag1 = Diagnostic(
message="first",
cell_id="test1",
line=1,
column=1,
code="MF001",
severity=Severity.FORMATTING,
fixable=False,
)
diag2 = Diagnostic(
message="second",
cell_id="test2",
line=1,
column=1,
code="MR001",
severity=Severity.RUNTIME,
fixable=False,
)
await self.ctx.add_diagnostic(diag1)
await self.ctx.add_diagnostic(diag2)
# Get first batch
new_diagnostics = await self.ctx.get_new_diagnostics()
assert len(new_diagnostics) == 2
assert (
new_diagnostics[0].severity == Severity.RUNTIME
) # Higher priority first
assert new_diagnostics[1].severity == Severity.FORMATTING
# Add second batch
diag3 = create_diagnostic("MB001", "test3", "third", Severity.BREAKING)
await self.ctx.add_diagnostic(diag3)
# Get only new diagnostics
new_diagnostics = await self.ctx.get_new_diagnostics()
assert len(new_diagnostics) == 1
assert new_diagnostics[0].severity == Severity.BREAKING
# Calling again should return empty
new_diagnostics = await self.ctx.get_new_diagnostics()
assert len(new_diagnostics) == 0
async def test_get_all_diagnostics_still_works(self):
"""Test that get_diagnostics still returns all diagnostics."""
diag1 = create_diagnostic(
"MF001", "test1", "first", Severity.FORMATTING
)
diag2 = create_diagnostic("MR001", "test2", "second", Severity.RUNTIME)
await self.ctx.add_diagnostic(diag1)
await self.ctx.add_diagnostic(diag2)
# Get new diagnostics
await self.ctx.get_new_diagnostics()
# get_diagnostics should still return all
all_diagnostics = await self.ctx.get_diagnostics()
assert len(all_diagnostics) == 2
class TestEarlyStoppingConfig:
"""Test EarlyStoppingConfig functionality."""
def test_no_early_stopping(self):
"""Test default config doesn't stop."""
config = EarlyStoppingConfig()
breaking_diag = create_diagnostic(
"MB001", "test", "breaking", Severity.BREAKING
)
runtime_diag = create_diagnostic(
"MR001", "test", "runtime", Severity.RUNTIME
)
assert not config.should_stop(breaking_diag, 1)
assert not config.should_stop(runtime_diag, 1)
def test_stop_on_breaking(self):
"""Test stopping on breaking severity."""
config = EarlyStoppingConfig(stop_on_breaking=True)
breaking_diag = create_diagnostic(
"MB001", "test", "breaking", Severity.BREAKING
)
runtime_diag = create_diagnostic(
"MR001", "test", "runtime", Severity.RUNTIME
)
assert config.should_stop(breaking_diag, 1)
assert not config.should_stop(runtime_diag, 1)
def test_stop_on_runtime(self):
"""Test stopping on runtime severity."""
config = EarlyStoppingConfig(stop_on_runtime=True)
runtime_diag = create_diagnostic(
"MR001", "test", "runtime", Severity.RUNTIME
)
formatting_diag = create_diagnostic(
"MF001", "test", "formatting", Severity.FORMATTING
)
assert config.should_stop(runtime_diag, 1)
assert not config.should_stop(formatting_diag, 1)
def test_max_diagnostics(self):
"""Test stopping based on max diagnostic count."""
config = EarlyStoppingConfig(max_diagnostics=2)
diag = Diagnostic(
"MF001",
"test",
"formatting",
Severity.FORMATTING,
None,
1,
1,
False,
)
assert not config.should_stop(diag, 1)
assert config.should_stop(diag, 2)
assert config.should_stop(diag, 3)
def test_stop_on_first_of_severity(self):
"""Test stopping on first occurrence of specific severity."""
config = EarlyStoppingConfig(
stop_on_first_of_severity=Severity.RUNTIME
)
runtime_diag = create_diagnostic(
"MR001", "test", "runtime", Severity.RUNTIME
)
breaking_diag = create_diagnostic(
"MB001", "test", "breaking", Severity.BREAKING
)
formatting_diag = create_diagnostic(
"MF001", "test", "formatting", Severity.FORMATTING
)
assert config.should_stop(runtime_diag, 1)
assert not config.should_stop(breaking_diag, 1)
assert not config.should_stop(formatting_diag, 1)
class TestStreamingRuleEngine:
"""Test streaming functionality of RuleEngine."""
def setup_method(self):
self.notebook = parse_notebook("import marimo\napp = marimo.App()")
async def test_streaming_basic(self):
"""Test basic streaming functionality."""
# Create rules with different completion times
fast_rule = SlowRule("MR001", Severity.RUNTIME, delay=0.01)
slow_rule = SlowRule("MF001", Severity.FORMATTING, delay=0.05)
checker = RuleEngine([fast_rule, slow_rule])
# Collect diagnostics as they stream
diagnostics = []
async for diagnostic in checker.check_notebook_streaming(
self.notebook
):
diagnostics.append(diagnostic)
# Should get diagnostics as rules complete
assert len(diagnostics) == 2
# Fast rule should complete first, but diagnostics are ordered by priority
# So we should get runtime before formatting
assert diagnostics[0].severity == Severity.RUNTIME
assert diagnostics[1].severity == Severity.FORMATTING
# Both rules should have completed
assert fast_rule.completed
assert slow_rule.completed
async def test_early_stopping_on_breaking(self):
"""Test early stopping cancels remaining tasks."""
# Create rules with different severities and delays
fast_breaking = SlowRule("MB001", Severity.BREAKING, delay=0.01)
slow_formatting = SlowRule(
"MF001", Severity.FORMATTING, delay=0.1
) # Takes longer
config = EarlyStoppingConfig(stop_on_breaking=True)
checker = RuleEngine(
[fast_breaking, slow_formatting], early_stopping=config
)
# Collect diagnostics
diagnostics = []
async for diagnostic in checker.check_notebook_streaming(
self.notebook
):
diagnostics.append(diagnostic)
# Should only get one diagnostic (breaking)
assert len(diagnostics) == 1
assert diagnostics[0].severity == Severity.BREAKING
# Fast rule should complete, slow rule should be cancelled
assert fast_breaking.completed
assert slow_formatting.cancelled
async def test_early_stopping_max_diagnostics(self):
"""Test early stopping based on max diagnostic count."""
# Create rule that produces multiple diagnostics
multi_rule = SlowRule(
"MF001", Severity.FORMATTING, delay=0.01, diagnostic_count=3
)
slow_rule = SlowRule("MR001", Severity.RUNTIME, delay=0.1)
config = EarlyStoppingConfig(max_diagnostics=2)
checker = RuleEngine([multi_rule, slow_rule], early_stopping=config)
diagnostics = []
async for diagnostic in checker.check_notebook_streaming(
self.notebook
):
diagnostics.append(diagnostic)
# Should stop after 2 diagnostics
assert len(diagnostics) == 2
# Slow rule should be cancelled
assert slow_rule.cancelled
def test_backward_compatibility(self):
"""Test that non-streaming methods still work."""
fast_rule = SlowRule("MR001", Severity.RUNTIME, delay=0.01)
slow_rule = SlowRule("MF001", Severity.FORMATTING, delay=0.02)
checker = RuleEngine([fast_rule, slow_rule])
# Non-streaming method should still work
diagnostics = checker.check_notebook_sync(self.notebook)
assert len(diagnostics) == 2
assert fast_rule.completed
assert slow_rule.completed
class TestRealWorldScenarios:
"""Test real-world scenarios with streaming and early stopping."""
def setup_method(self):
self.notebook = parse_notebook("""import marimo
app = marimo.App()
@app.cell
def _():
x = 1
return
@app.cell
def _():
x = 2 # Multiple definitions
return
""")
async def test_real_rules_with_early_stopping(self):
"""Test real rules with early stopping."""
from marimo._lint.rules.breaking import MultipleDefinitionsRule
from marimo._lint.rules.formatting import GeneralFormattingRule
# Stop on first breaking error
config = EarlyStoppingConfig(stop_on_breaking=True)
checker = RuleEngine(
[MultipleDefinitionsRule(), GeneralFormattingRule()],
early_stopping=config,
)
diagnostics = []
async for diagnostic in checker.check_notebook_streaming(
self.notebook
):
diagnostics.append(diagnostic)
# Should stop after first breaking error
if diagnostic.severity == Severity.BREAKING:
break
# Should have at least one breaking diagnostic (multiple definitions)
breaking_diagnostics = [
d for d in diagnostics if d.severity == Severity.BREAKING
]
assert len(breaking_diagnostics) > 0
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/test_streaming_early_stopping.py",
"license": "Apache License 2.0",
"lines": 310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_lint/utils.py | """Utilities for lint tests."""
from marimo._lint.diagnostic import Diagnostic
from marimo._lint.rule_engine import RuleEngine
from marimo._schemas.serialization import NotebookSerializationV1
def lint_notebook(
notebook: NotebookSerializationV1, contents: str = ""
) -> list[Diagnostic]:
"""Lint a notebook and return all diagnostics found."""
rule_engine = RuleEngine.create_default()
return rule_engine.check_notebook_sync(notebook, contents)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_lint/utils.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_ast/codegen_data/test_syntax_errors.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def global_error():
tickers = ["AAPL", "GOOGL"]
global tickers
@app.cell
def return_error(tickers):
if tickers is not None:
return tickers
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/codegen_data/test_syntax_errors.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_ast/codegen_data/test_decorators.py | import marimo
__generated_with = "0.15.3"
app = marimo.App(width="medium")
with app.setup():
def wrapper(fn):
return fn
# AST is slightly different when decorator is called
def called_wrapper():
return wrapper
@app.function
@wrapper
def my_wrapped():
pass
@wrapper
@app.function
def inv_wrapped():
pass
@wrapper
@app.cell
def my_cell():
pass
# NOTE: This is an invalid case. A cell should never be decorated via codegen,
# However, we capture the case because internal errors should never happen from
# user code.
@app.cell
@wrapper
def inv_cell():
pass
@app.class_definition
@wrapper
class MyClass:
pass
@wrapper
@app.class_definition
class InvClass:
pass
@app.function
@called_wrapper()
def my_wrapped_called():
pass
@called_wrapper()
@app.function
def inv_wrapped_called():
pass
@called_wrapper()
@app.cell
def my_cell_called():
pass
@app.cell
@called_wrapper()
def inv_cell_called():
pass
@app.class_definition
@called_wrapper()
class MyClassCalled:
pass
@called_wrapper()
@app.class_definition
class InvClassCalled:
pass
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/codegen_data/test_decorators.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_output/superjson.py | # Copyright 2026 Marimo. All rights reserved.
# This data serializes
from __future__ import annotations
from typing import Any
class SuperJson: # noqa: B903
"""
This class bypasses the default msgspec encoder for the data provided and instead serializes
the result to something that is more human readable and not information-lossy.
The key differences from the default msgspec encoder are:
- It serializes the b'hello' to 'hello' instead of base64 encoded
- It serializes the float('inf') to Infinity instead of null
- It serializes the float('nan') to NaN instead of null
- It serializes the timedelta to a human readable string instead of a ISO 8601 duration (e.g. "1 day, 2:03:00")
"""
def __init__(self, data: Any):
self.data = data
def _marimo_serialize_(self) -> Any:
from marimo._messaging.msgspec_encoder import enc_hook
return enc_hook(self.data)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_output/superjson.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_ai/_tools/base.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import inspect
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass, is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generic,
Optional,
TypeVar,
cast,
get_args,
get_origin,
)
from marimo import _loggers
from marimo._ai._tools.types import (
MarimoCellConsoleOutputs,
MarimoCellErrors,
MarimoErrorDetail,
MarimoNotebookInfo,
ToolGuidelines,
)
from marimo._ai._tools.utils.exceptions import ToolExecutionError
from marimo._ai._tools.utils.output_cleaning import clean_output
from marimo._config.config import CopilotMode
from marimo._messaging.cell_output import CellChannel
from marimo._messaging.notification import CellNotification
from marimo._server.ai.tools.types import (
FunctionArgs,
ToolDefinition,
ValidationFunction,
)
from marimo._server.api.deps import AppStateBase
from marimo._session.model import ConnectionState
from marimo._types.ids import CellId_t, SessionId
from marimo._utils.case import to_snake_case
from marimo._utils.dataclass_to_openapi import PythonTypeToOpenAPI
from marimo._utils.parse_dataclass import parse_raw
LOGGER = _loggers.marimo_logger()
ArgsT = TypeVar("ArgsT")
OutT = TypeVar("OutT")
ArgsP = TypeVar("ArgsP", contravariant=True)
OutC = TypeVar("OutC", covariant=True)
if TYPE_CHECKING:
from collections.abc import Awaitable
from starlette.applications import Starlette
from marimo._server.session_manager import SessionManager
from marimo._session import Session
@dataclass
class ToolContext:
app: Optional[Starlette] = None
@property
def session_manager(self) -> SessionManager:
app = self.get_app()
state = AppStateBase.from_app(app)
session_manager = state.session_manager
return session_manager
def get_app(self) -> Starlette:
app = self.app
if app is None:
raise ToolExecutionError(
"App is not available",
code="APP_NOT_AVAILABLE",
is_retryable=False,
suggested_fix="Try restarting the marimo server.",
)
return app
def get_session(self, session_id: SessionId) -> Session:
session = self.session_manager.get_session(session_id)
if session is None:
raise ToolExecutionError(
f"Session {session_id} not found",
code="SESSION_NOT_FOUND",
is_retryable=False,
suggested_fix="Use get_active_notebooks to find valid session IDs",
meta={"session_id": session_id},
)
return session
def get_cell_notification(
self, session_id: SessionId, cell_id: CellId_t
) -> CellNotification:
session_view = self.get_session(session_id).session_view
if cell_id not in session_view.cell_notifications:
raise ToolExecutionError(
f"Cell notification not found for cell {cell_id}",
code="CELL_NOTIFICATION_NOT_FOUND",
is_retryable=False,
suggested_fix="Try again with a valid cell ID.",
meta={"cell_id": cell_id},
)
return session_view.cell_notifications[cell_id]
def get_active_sessions_internal(self) -> list[MarimoNotebookInfo]:
"""
Get active sessions from the app state.
This follows the logic from marimo/_server/api/endpoints/home.py
"""
import os
UNSAVED_NOTEBOOK_MESSAGE = (
"(unsaved notebook - save to disk to get file path)"
)
files: list[MarimoNotebookInfo] = []
for session_id, session in self.session_manager.sessions.items():
state = session.connection_state()
if (
state == ConnectionState.OPEN
or state == ConnectionState.ORPHANED
):
full_file_path = session.app_file_manager.path
filename = session.app_file_manager.filename
basename = os.path.basename(filename) if filename else None
files.append(
MarimoNotebookInfo(
name=(basename or "new notebook"),
# file path should be absolute path for agent-based edit tools
path=(full_file_path or UNSAVED_NOTEBOOK_MESSAGE),
session_id=session_id,
)
)
# Return most recent notebooks first (reverse chronological order)
return files[::-1]
def get_notebook_errors(
self, session_id: SessionId, include_stderr: bool
) -> list[MarimoCellErrors]:
"""
Get all errors in the current notebook session, organized by cell.
Optionally include stderr messages foreach cell.
"""
session = self.get_session(session_id)
session_view = session.session_view
cell_errors_map: dict[CellId_t, MarimoCellErrors] = {}
notebook_errors: list[MarimoCellErrors] = []
stderr: list[str] = []
for cell_id, cell_notif in session_view.cell_notifications.items():
errors = self.get_cell_errors(
session_id,
cell_id,
maybe_cell_notif=cell_notif,
)
if include_stderr:
stderr = self.get_cell_console_outputs(cell_notif).stderr
if errors:
cell_errors_map[cell_id] = MarimoCellErrors(
cell_id=cell_id,
errors=errors,
stderr=stderr,
)
# Use cell_manager to get cells in the correct notebook order
cell_manager = session.app_file_manager.app.cell_manager
for cell_data in cell_manager.cell_data():
cell_id = cell_data.cell_id
if cell_id in cell_errors_map:
notebook_errors.append(cell_errors_map[cell_id])
return notebook_errors
def get_cell_errors(
self,
session_id: SessionId,
cell_id: CellId_t,
maybe_cell_notif: Optional[CellNotification] = None,
) -> list[MarimoErrorDetail]:
"""
Get all errors for a given cell.
"""
errors: list[MarimoErrorDetail] = []
cell_notif = maybe_cell_notif or self.get_cell_notification(
session_id, cell_id
)
if (
not cell_notif.output
or cell_notif.output.channel != CellChannel.MARIMO_ERROR
):
return errors
items = cell_notif.output.data
if not isinstance(items, list):
# no errors
return errors
for err in items:
# TODO: filter out noisy useless errors
# like "An ancestor raised an exception..."
if isinstance(err, dict):
errors.append(
MarimoErrorDetail(
type=err.get("type", "UnknownError"),
message=err.get("msg", str(err)),
traceback=err.get("traceback", []),
)
)
else:
# Fallback for rich error objects
err_type: str = getattr(err, "type", type(err).__name__)
describe_fn: Optional[Any] = getattr(err, "describe", None)
message_val = (
describe_fn() if callable(describe_fn) else str(err)
)
message: str = str(message_val)
tb: list[str] = getattr(err, "traceback", []) or []
errors.append(
MarimoErrorDetail(
type=err_type,
message=message,
traceback=tb,
)
)
return errors
def get_cell_console_outputs(
self, cell_notif: CellNotification
) -> MarimoCellConsoleOutputs:
"""
Get the console outputs for a given cell notification.
"""
stdout_messages: list[str] = []
stderr_messages: list[str] = []
if cell_notif.console is None:
return MarimoCellConsoleOutputs(stdout=[], stderr=[])
console_outputs = (
cell_notif.console
if isinstance(cell_notif.console, list)
else [cell_notif.console]
)
for output in console_outputs:
if output is None:
continue
elif output.channel == CellChannel.STDOUT:
stdout_messages.append(str(output.data))
elif output.channel == CellChannel.STDERR:
stderr_messages.append(str(output.data))
cleaned_stdout_messages = clean_output(stdout_messages)
cleaned_stderr_messages = clean_output(stderr_messages)
return MarimoCellConsoleOutputs(
stdout=cleaned_stdout_messages, stderr=cleaned_stderr_messages
)
class ToolBase(Generic[ArgsT, OutT], ABC):
"""
Minimal base class for dual-registered tools.
Subclasses MUST set:
- name: str (optional; defaults to class name)
- description: str (optional; defaults to class docstring)
- Args: Type (input schema type)
- Output: Type (output schema type)
- Args and Output must be set via generics, e.g. ToolBase[ArgsModel, OutputModel]
"""
# Override in subclass, or rely on fallbacks below
name: str = ""
description: str = ""
guidelines: Optional[ToolGuidelines] = None
Args: type[ArgsT]
Output: type[OutT]
context: ToolContext
def __init_subclass__(cls, **kwargs: object) -> None:
"""Grab ToolBase[...] type parameters and set Args/Output on the subclass."""
super().__init_subclass__(**kwargs)
# Find the ToolBase[...] in the subclass' original bases
for base in getattr(cls, "__orig_bases__", ()):
if get_origin(base) is ToolBase:
a, o = get_args(base)
cls.Args = a # type: ignore[assignment]
cls.Output = o # type: ignore[assignment]
break
# If not provided via generics and not manually set, fail early
if not hasattr(cls, "Args") or not hasattr(cls, "Output"):
raise TypeError(
f"{cls.__name__} must specify type arguments, e.g. "
f"class {cls.__name__}(ToolBase[ArgsModel, OutputModel]): ..."
)
def __init__(self, context: ToolContext) -> None:
self.context = context
# get name from class name
if self.name == "":
self.name = to_snake_case(self.__class__.__name__)
# get description from class docstring
if self.description == "":
base_description = (self.__class__.__doc__ or "").strip()
# If guidelines exist, append them
if self.guidelines is not None:
self.description = self._format_with_guidelines(
base_description, self.guidelines
)
else:
self.description = base_description
async def __call__(self, args: ArgsT) -> OutT:
"""
Unified runner: calls __call__ and awaits if it returns a coroutine.
Adapters should always use this.
"""
try:
coerced_args = self._coerce_args(args)
except Exception as e:
raise ToolExecutionError(
f"Bad arguments: {args}",
code="BAD_ARGUMENTS",
is_retryable=False,
suggested_fix="Try again with valid arguments.",
meta={"args": args},
) from e
try:
result = self.handle(coerced_args)
if inspect.isawaitable(result):
awaited = await result # type: ignore[no-any-return]
return cast(OutT, awaited)
return cast(OutT, result) # type: ignore[redundant-cast]
except ToolExecutionError:
# Let intentional tool errors propagate unchanged
raise
except Exception as e:
# Standardize unexpected failures
LOGGER.error(f"Unexpected error in tool {self.name}: {e}")
raise ToolExecutionError(
self._default_error_message(),
code=self._default_error_code(),
is_retryable=self._default_is_retryable(),
suggested_fix=self._default_suggested_fix(),
meta=self._error_context(coerced_args),
) from e
@abstractmethod
def handle(self, args: ArgsT) -> OutT:
"""Actual tool function."""
...
# adapters
def as_mcp_tool_fn(self) -> Callable[[ArgsT], Awaitable[OutT]]:
"""Return a typed, annotated callable suitable for MCP registration."""
Args = self.Args
Output = self.Output
async def handler(args: ArgsT) -> OutT: # type: ignore[type-var]
result = await self.__call__(args)
# Ensure JSON-serializable output for MCP
if is_dataclass(result):
# Some MCP clients expect dicts only
return cast(OutT, asdict(result)) # type: ignore[arg-type]
return result
# name/doc metadata (guard for None types)
handler_any = cast(Any, handler)
handler_any.__name__ = self.name or handler_any.__name__
handler_any.__doc__ = self.description or handler_any.__doc__
# help static consumers and schema tools
handler_any.__annotations__ = {"args": Args, "return": Output}
# Advertise intended signature for mypy/tests
# Keep in try/except to avoid breaking tool registration
try:
handler_any.__signature__ = inspect.Signature(
parameters=[
inspect.Parameter(
"args",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Args,
)
],
return_annotation=Output,
)
except Exception:
# Best-effort only; safe to skip if inspect behavior changes
pass
return handler
def as_backend_tool(
self, mode: list[CopilotMode]
) -> tuple[ToolDefinition, ValidationFunction]:
"""Convert the tool to a ToolDefinition for backend use."""
# convert the args to python dict
converter = PythonTypeToOpenAPI(name_overrides={}, camel_case=False)
converted_args = converter.convert(self.Args, processed_classes={})
# get tool_definition
tool_definition = ToolDefinition(
name=self.name,
description=self.description,
parameters=converted_args,
source="backend",
mode=mode,
)
# get validation_function
validation_function = self._create_validation_function(self.Args)
return tool_definition, validation_function
# helpers
def _coerce_args(self, args: Any) -> ArgsT: # type: ignore[override]
"""If Args is a dataclass and args is a dict, construct it; else pass through."""
if is_dataclass(args):
# Already parsed
return args # type: ignore[return-value]
return parse_raw(args, self.Args)
def _format_with_guidelines(
self, description: str, guidelines: ToolGuidelines
) -> str:
"""Combine description with structured guidelines."""
parts = [description] if description else []
if guidelines.when_to_use:
parts.append("\n## When to use:")
parts.extend(f"- {item}" for item in guidelines.when_to_use)
if guidelines.avoid_if:
parts.append("\n## Avoid if:")
parts.extend(f"- {item}" for item in guidelines.avoid_if)
if guidelines.prerequisites:
parts.append("\n## Prerequisites:")
parts.extend(f"- {item}" for item in guidelines.prerequisites)
if guidelines.side_effects:
parts.append("\n## Side effects:")
parts.extend(f"- {item}" for item in guidelines.side_effects)
if guidelines.additional_info:
parts.append("\n## Additional info:")
parts.append(guidelines.additional_info)
return "\n".join(parts)
# error defaults/hooks
def _default_error_code(self) -> str:
return "UNEXPECTED_ERROR"
def _default_error_message(self) -> str:
return f"{self.name or self.__class__.__name__} failed"
def _default_is_retryable(self) -> bool:
return True
def _default_suggested_fix(self) -> Optional[str]:
return None
def _error_context(self, _args: Any) -> dict[str, Any]:
return {}
def _create_validation_function(
self, args_type: type[Any]
) -> ValidationFunction:
"""Create a validator using parse_raw against the tool's Args type."""
def validation_function(
arguments: FunctionArgs,
) -> Optional[tuple[bool, str]]:
try:
# Will raise on bad types/required fields
parse_raw(arguments, args_type)
return True, ""
except Exception as e:
return False, f"Invalid arguments: {e}"
return validation_function
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/base.py",
"license": "Apache License 2.0",
"lines": 424,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_ai/_tools/tools/cells.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
from dataclasses import dataclass, field
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional
from marimo._ai._tools.base import ToolBase
from marimo._ai._tools.types import (
MarimoCellConsoleOutputs,
MarimoErrorDetail,
SuccessResult,
ToolGuidelines,
)
from marimo._ai._tools.utils.exceptions import ToolExecutionError
from marimo._ast.models import CellData
from marimo._messaging.cell_output import CellChannel
from marimo._messaging.errors import Error
from marimo._messaging.notification import (
CellNotification,
VariableValue,
)
from marimo._types.ids import CellId_t, SessionId
if TYPE_CHECKING:
from marimo._ast.models import CellData
from marimo._session import Session
class SupportedCellType(str, Enum):
CODE = "code"
MARKDOWN = "markdown"
SQL = "sql"
@dataclass
class GetLightweightCellMapArgs:
session_id: SessionId
preview_lines: int = 3 # random default value
@dataclass
class LightweightCellInfo:
cell_id: str
preview: str
line_count: int
cell_type: SupportedCellType
runtime_state: Optional[str] = None
has_output: bool = False
has_console_output: bool = False
has_errors: bool = False
@dataclass
class GetLightweightCellMapOutput(SuccessResult):
session_id: str = ""
notebook_name: str = ""
cells: list[LightweightCellInfo] = field(default_factory=list)
total_cells: int = 0
preview_lines: int = 3
@dataclass
class CellRuntimeMetadata:
# String form of the runtime state (see marimo._ast.cell.RuntimeStateType);
# keep as str for py39/Pydantic compatibility and to avoid Literal/Enum
# validation issues in models.
runtime_state: Optional[str] = None
# Duration of the last execution in milliseconds.
# Only populated when runtime_state is "idle"; null otherwise.
execution_time: Optional[float] = None
CellVariables = dict[str, VariableValue]
@dataclass
class GetCellRuntimeDataData:
session_id: str
cell_id: str
code: Optional[str] = None
errors: Optional[list[MarimoErrorDetail]] = None
metadata: Optional[CellRuntimeMetadata] = None
variables: Optional[CellVariables] = None
@dataclass
class GetCellRuntimeDataArgs:
session_id: SessionId
cell_ids: list[CellId_t] = field(default_factory=list)
@dataclass
class GetCellRuntimeDataOutput(SuccessResult):
data: list[GetCellRuntimeDataData] = field(default_factory=list)
@dataclass
class CellVisualOutput:
"""Visual from a cell execution."""
visual_output: Optional[str] = None
visual_mimetype: Optional[str] = None
@dataclass
class CellOutputData:
cell_id: str
visual_output: CellVisualOutput = field(default_factory=CellVisualOutput)
console_outputs: MarimoCellConsoleOutputs = field(
default_factory=MarimoCellConsoleOutputs
)
@dataclass
class GetCellOutputArgs:
session_id: SessionId
cell_ids: list[CellId_t] = field(default_factory=list)
@dataclass
class GetCellOutputOutput(SuccessResult):
cells: list[CellOutputData] = field(default_factory=list)
class GetLightweightCellMap(
ToolBase[GetLightweightCellMapArgs, GetLightweightCellMapOutput]
):
"""Get a lightweight map of cells showing the first few lines of each cell.
This tool provides an overview of notebook structure for initial navigation,
showing a preview of each cell's content without full code or outputs.
Each cell includes a runtime_state field with one of the following values:
- "idle": cell has executed and is quiescent (includes cells with errors)
- "running": cell is actively executing
- "queued": cell is waiting on a running dependency
- "disabled-transitively": cell is disabled because a parent cell is disabled
- null: cell has never been executed
Args:
session_id: The session ID of the notebook from get_active_notebooks
preview_lines: Number of lines to show per cell (default: 3)
Returns:
A success result containing lightweight cell previews and navigation info.
"""
guidelines = ToolGuidelines(
when_to_use=[
"To get an overview of notebook structure and all cell IDs",
"When navigating a notebook before making targeted changes",
],
prerequisites=[
"You must have a valid session id from an active notebook",
],
)
def handle(
self, args: GetLightweightCellMapArgs
) -> GetLightweightCellMapOutput:
session_id = args.session_id
context = self.context
session = context.get_session(session_id)
cell_manager = session.app_file_manager.app.cell_manager
session_view = session.session_view
notebook_filename = (
session.app_file_manager.filename or "untitled_notebook.py"
)
# Validate preview_lines
preview_lines = max(1, min(50, args.preview_lines))
cells: list[LightweightCellInfo] = []
for cell_data in cell_manager.cell_data():
code_lines = cell_data.code.split("\n")
preview = "\n".join(code_lines[:preview_lines])
# Determine cell type using compiled cell info when available
cell_type = self._get_cell_type(cell_data)
# Get runtime info from cell notifications
runtime_state: Optional[str] = None
has_output = False
has_console_output = False
has_errors = False
cell_notif = session_view.cell_notifications.get(cell_data.cell_id)
if cell_notif is not None:
if cell_notif.status is not None:
runtime_state = cell_notif.status
has_errors = (
cell_notif.output is not None
and cell_notif.output.channel == CellChannel.MARIMO_ERROR
)
has_output = (
not has_errors
and cell_notif.output is not None
and cell_notif.output.data is not None
)
has_console_output = bool(cell_notif.console)
# Add cell to cell map
cells.append(
LightweightCellInfo(
cell_id=cell_data.cell_id,
preview=preview,
line_count=len(code_lines),
cell_type=cell_type,
runtime_state=runtime_state,
has_output=has_output,
has_console_output=has_console_output,
has_errors=has_errors,
)
)
return GetLightweightCellMapOutput(
status="success",
session_id=args.session_id,
notebook_name=notebook_filename,
cells=cells,
total_cells=len(cells),
preview_lines=preview_lines,
next_steps=[
"Use cell_id to get full cell content or execute specific cells",
"Identify key sections based on cell types and previews",
"Focus on import cells first to understand dependencies",
],
message=(
"Refer to cells ordinally in the following format: @[cell:1]. "
"Do _not_ use cell_id when discussing with users."
),
)
# helper methods
def _is_markdown_cell(self, code: str) -> bool:
return code.lstrip().startswith("mo.md(")
def _get_cell_type(self, cell_data: CellData) -> SupportedCellType:
if cell_data.cell is None:
# Fallback when compiled cell is unavailable
return (
SupportedCellType.MARKDOWN
if self._is_markdown_cell(cell_data.code)
else SupportedCellType.CODE
)
# Otherwise, use the compiled cell's language
language = cell_data.cell._cell.language
if language == "sql":
return SupportedCellType.SQL
elif language == "python":
return (
SupportedCellType.MARKDOWN
if self._is_markdown_cell(cell_data.code)
else SupportedCellType.CODE
)
else:
return SupportedCellType.CODE
class GetCellRuntimeData(
ToolBase[GetCellRuntimeDataArgs, GetCellRuntimeDataOutput]
):
"""Get runtime data for one or more cells including code, errors, and variables.
This tool provides detailed runtime information for the given cells,
including source code, any execution errors, and the variables
defined or modified in each cell.
Each cell's metadata includes a runtime_state field with one of the following values:
- "idle": cell has executed and is quiescent (includes cells with errors)
- "running": cell is actively executing
- "queued": cell is waiting on a running dependency
- "disabled-transitively": cell is disabled because a parent cell is disabled
- null: cell has never been executed
The execution_time field contains the duration of the last execution in
milliseconds. It is only populated when runtime_state is "idle"; it is
null while the cell is running, queued, or has not been executed.
Args:
session_id: The session ID of the notebook from get_active_notebooks
cell_ids: A list of cell IDs to get runtime data for from get_lightweight_cell_map.
If an empty list is provided, it will return data for all cells.
Returns:
A success result containing cell runtime data including code, errors, and variables for each cell.
"""
guidelines = ToolGuidelines(
when_to_use=[
"When inspecting one or more cells' code, errors, or variables",
"After identifying cells of interest from the cell map",
],
prerequisites=[
"You must have a valid session id from an active notebook",
"You must have valid cell ids from an active notebook",
],
)
def handle(self, args: GetCellRuntimeDataArgs) -> GetCellRuntimeDataOutput:
session_id = args.session_id
context = self.context
session = context.get_session(session_id)
# Empty cell_ids means "return all cells"
cell_ids = args.cell_ids
if not cell_ids:
cell_manager = session.app_file_manager.app.cell_manager
cell_ids = [cd.cell_id for cd in cell_manager.cell_data()]
results: list[GetCellRuntimeDataData] = []
for cell_id in cell_ids:
cell_data = self._get_cell_data(session, session_id, cell_id)
cell_code = cell_data.code
cell_errors = context.get_cell_errors(session_id, cell_id)
cell_metadata = self._get_cell_metadata(session, cell_id)
cell_variables = self._get_cell_variables(session, cell_data)
results.append(
GetCellRuntimeDataData(
session_id=session_id,
cell_id=cell_id,
code=cell_code,
errors=cell_errors,
metadata=cell_metadata,
variables=cell_variables,
)
)
return GetCellRuntimeDataOutput(
data=results,
next_steps=[
"Review cell code for understanding the implementation",
"Check errors to identify any execution issues",
"Examine variables to understand cell outputs and state",
],
)
# helper methods
def _get_cell_data(
self, session: Session, session_id: SessionId, cell_id: CellId_t
) -> CellData:
cell_manager = session.app_file_manager.app.cell_manager
cell_data = cell_manager.get_cell_data(cell_id)
if cell_data is None:
raise ToolExecutionError(
f"Cell {cell_id} not found in session {session_id}",
code="CELL_NOT_FOUND",
is_retryable=False,
suggested_fix="Use get_lightweight_cell_map to find valid cell IDs",
)
return cell_data
def _get_cell_metadata(
self, session: Session, cell_id: CellId_t
) -> CellRuntimeMetadata:
"""Get cell runtime metadata including status and execution info."""
# Get basic runtime state from session view
session_view = session.session_view
cell_notif = session_view.cell_notifications.get(cell_id)
runtime_state = None
if cell_notif and cell_notif.status is not None:
runtime_state = cell_notif.status
# Only return execution time when the cell is idle — that's when
# the value is a duration in milliseconds. While the cell is
# running the stored value is the start timestamp (epoch), which
# would be confusing for consumers.
execution_time: Optional[float] = None
if runtime_state == "idle":
execution_time = session_view.last_execution_time.get(cell_id)
return CellRuntimeMetadata(
runtime_state=runtime_state, execution_time=execution_time
)
def _get_cell_variables(
self, session: Session, cell_data: Optional[CellData]
) -> CellVariables:
"""Get variables defined by a specific cell and their values."""
if not cell_data or not cell_data.cell:
return {}
# Get all current variables from session view
session_view = session.session_view
all_variables = session_view.variable_values
# Get variables defined by this cell
cell_defs = cell_data.cell._cell.defs
# Filter to only variables defined by this cell
cell_variables: CellVariables = {}
for var_name in cell_defs:
if var_name in all_variables:
var_value = all_variables[var_name]
cell_variables[var_name] = var_value
return cell_variables
class GetCellOutputs(ToolBase[GetCellOutputArgs, GetCellOutputOutput]):
"""Get cell execution outputs including visual display and console streams.
Args:
session_id: The session ID of the notebook from get_active_notebooks
cell_ids: A list of cell IDs from get_lightweight_cell_map.
If an empty list is provided, it will return outputs for all cells.
Returns:
Visual output (HTML, charts, tables, etc.) with mimetype and console streams (stdout/stderr) for each cell.
"""
guidelines = ToolGuidelines(
when_to_use=[
"When you need to see what one or more cells displayed or printed",
"To review charts, visualizations, markdown, HTML, or console output from cells",
],
prerequisites=[
"You must have a valid session id from an active notebook",
"You must have valid cell ids from an active notebook",
],
)
def handle(self, args: GetCellOutputArgs) -> GetCellOutputOutput:
context = self.context
session = context.get_session(args.session_id)
session_view = session.session_view
# Empty cell_ids means "return all cells"
cell_ids = args.cell_ids
if not cell_ids:
cell_manager = session.app_file_manager.app.cell_manager
cell_ids = [cd.cell_id for cd in cell_manager.cell_data()]
results: list[CellOutputData] = []
for cell_id in cell_ids:
cell_notif = session_view.cell_notifications.get(cell_id)
if cell_notif is None:
raise ToolExecutionError(
f"Cell {cell_id} not found in session {args.session_id}",
code="CELL_NOT_FOUND",
is_retryable=False,
suggested_fix="Use get_lightweight_cell_map to find valid cell IDs",
)
visual_output, visual_mimetype = self._get_visual_output(
cell_notif
)
console_outputs = context.get_cell_console_outputs(cell_notif)
results.append(
CellOutputData(
cell_id=cell_id,
visual_output=CellVisualOutput(
visual_output=visual_output,
visual_mimetype=visual_mimetype,
),
console_outputs=console_outputs,
)
)
return GetCellOutputOutput(
cells=results,
next_steps=[
"Review visual_output to see what was displayed to the user",
"Check stdout/stderr for print statements and warnings",
],
)
def _get_visual_output(
self, cell_notif: CellNotification
) -> tuple[Optional[str], Optional[str]]:
visual_output = None
visual_mimetype = None
if cell_notif.output:
if cell_notif.output.channel == CellChannel.MARIMO_ERROR:
visual_output = self._get_error_output_data(
cell_notif.output.data
)
visual_mimetype = "application/json"
else:
data = cell_notif.output.data
visual_output = self._get_str_output_data(data)
visual_mimetype = cell_notif.output.mimetype
return visual_output, visual_mimetype
def _get_error_output_data(
self, data: str | list[Error] | dict[str, Any]
) -> str:
"""Convert error output data to structured JSON."""
if not isinstance(data, list):
return str(data)
errors: list[dict[str, Any]] = []
for err in data:
if isinstance(err, dict):
errors.append(
{
"type": err.get("type", "UnknownError"),
"message": err.get("msg", str(err)),
}
)
else:
err_type: str = getattr(err, "type", type(err).__name__)
describe_fn = getattr(err, "describe", None)
message = describe_fn() if callable(describe_fn) else str(err)
errors.append({"type": err_type, "message": str(message)})
return json.dumps(errors)
def _get_str_output_data(
self, data: str | list[Error] | dict[str, Any]
) -> str:
if isinstance(data, str):
return data
else:
return str(data)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/tools/cells.py",
"license": "Apache License 2.0",
"lines": 434,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_ai/_tools/tools/notebooks.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass, field
from marimo._ai._tools.base import ToolBase
from marimo._ai._tools.types import (
EmptyArgs,
MarimoNotebookInfo,
SuccessResult,
ToolGuidelines,
)
@dataclass
class SummaryInfo:
total_notebooks: int
active_connections: int
@dataclass
class GetActiveNotebooksData:
summary: SummaryInfo
notebooks: list[MarimoNotebookInfo]
def _default_active_notebooks_data() -> GetActiveNotebooksData:
return GetActiveNotebooksData(
summary=SummaryInfo(total_notebooks=0, active_connections=0),
notebooks=[],
)
@dataclass
class GetActiveNotebooksOutput(SuccessResult):
data: GetActiveNotebooksData = field(
default_factory=_default_active_notebooks_data
)
class GetActiveNotebooks(ToolBase[EmptyArgs, GetActiveNotebooksOutput]):
"""List currently active marimo notebooks and a summary block.
Returns:
A success result containing summary statistics and notebook details.
"""
guidelines = ToolGuidelines(
when_to_use=[
"At the start of marimo notebook interactions to get session IDs",
"When receiving session-related errors",
],
additional_info="Use the file paths returned by this tool to directly edit a notebook.",
)
def handle(self, args: EmptyArgs) -> GetActiveNotebooksOutput:
del args
context = self.context
session_manager = context.session_manager
notebooks = context.get_active_sessions_internal()
summary: SummaryInfo = SummaryInfo(
total_notebooks=len(notebooks),
active_connections=session_manager.get_active_connection_count(),
)
data = GetActiveNotebooksData(summary=summary, notebooks=notebooks)
return GetActiveNotebooksOutput(
data=data,
next_steps=[
"Use the `get_lightweight_cell_map` tool to get the content of a notebook",
"Use the `get_notebook_errors` tool to help debug errors in the notebook",
],
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/tools/notebooks.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ai/_tools/types.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Literal, Optional
from marimo._types.ids import CellId_t, SessionId
# helper classes
StatusValue = Literal["success", "error", "warning"]
@dataclass
class SuccessResult:
status: StatusValue = "success"
auth_required: bool = False
next_steps: Optional[list[str]] = None
action_url: Optional[str] = None
message: Optional[str] = None
meta: Optional[dict[str, Any]] = None
@dataclass
class EmptyArgs:
pass
@dataclass
class ToolGuidelines:
"""Structured guidance for AI assistants on when and how to use a tool."""
when_to_use: Optional[list[str]] = None
avoid_if: Optional[list[str]] = None
prerequisites: Optional[list[str]] = None
side_effects: Optional[list[str]] = None
additional_info: Optional[str] = None
@dataclass
class MarimoNotebookInfo:
name: str
path: str
session_id: SessionId
@dataclass
class MarimoCellErrors:
cell_id: CellId_t
errors: list[MarimoErrorDetail] = field(default_factory=list)
stderr: list[str] = field(default_factory=list)
@dataclass
class MarimoErrorDetail:
type: str
message: str
traceback: list[str]
@dataclass
class MarimoCellConsoleOutputs:
stdout: list[str] = field(default_factory=list)
stderr: list[str] = field(default_factory=list)
@dataclass
class ListSessionsResult:
sessions: list[MarimoNotebookInfo] = field(default_factory=list)
@dataclass
class CodeExecutionResult:
success: bool
output: Optional[str] = None
stdout: list[str] = field(default_factory=list)
stderr: list[str] = field(default_factory=list)
errors: list[str] = field(default_factory=list)
error: Optional[str] = None
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/types.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ai/_tools/utils/exceptions.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import asdict, dataclass
from typing import Any, Optional
@dataclass
class ToolExecutionError(Exception):
"""Raise this from a tool to signal a descriptive, structured failure."""
message: str
code: str = "TOOL_ERROR"
status: int = 400
is_retryable: bool = False
suggested_fix: Optional[str] = None
meta: Optional[dict[str, Any]] = None
def __post_init__(self) -> None:
# Initialize base Exception with the structured JSON message
# Necessary since some MCP client (e.g. Cursor) only logs the original message
super().__init__(self._create_structured_message())
@property
def original_message(self) -> str:
return self.message
def _create_structured_message(self) -> str:
"""Create a message that includes all structured error information."""
import json
payload = asdict(self)
payload["meta"] = payload.get("meta", {})
return json.dumps(payload, separators=(",", ":"))
def to_dict(self) -> dict[str, Any]:
"""Return a dictionary representation for testing."""
payload = asdict(self)
payload["meta"] = payload.get("meta", {})
return payload
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/utils/exceptions.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ai/tools/test_base.py | from __future__ import annotations
import inspect
from dataclasses import dataclass
from typing import Any
import pytest
from marimo._ai._tools.base import ToolBase, ToolContext
from marimo._ai._tools.utils.exceptions import ToolExecutionError
@dataclass
class _Args:
value: int
@dataclass
class _Out:
doubled: int
class _EchoTool(ToolBase[_Args, _Out]):
"""Dummy tool for testing base adapter behavior."""
def handle(self, args: _Args) -> _Out:
return _Out(doubled=args.value * 2)
class _ErrorTool(ToolBase[_Args, _Out]):
"""Tool that raises errors for testing."""
def handle(self, args: _Args) -> _Out:
if args.value < 0:
raise ToolExecutionError(
"Negative values not allowed", code="NEGATIVE_VALUE"
)
if args.value == 0:
raise ValueError("Zero is not allowed")
return _Out(doubled=args.value * 2)
def test_as_mcp_tool_fn_returns_async_callable() -> None:
tool = _EchoTool(ToolContext())
handler = tool.as_mcp_tool_fn()
assert inspect.iscoroutinefunction(handler)
def test_handler_annotations_and_signature() -> None:
tool = _EchoTool(ToolContext())
handler = tool.as_mcp_tool_fn()
annotations: dict[str, Any] = getattr(handler, "__annotations__", {})
assert annotations.get("args") is _Args
assert annotations.get("return") is _Out
sig = getattr(handler, "__signature__", None)
assert sig is not None
params = list(sig.parameters.values())
assert len(params) == 1
assert params[0].name == "args"
assert sig.return_annotation is _Out
def test_name_and_description_defaults() -> None:
tool = _EchoTool(ToolContext())
# Name should default from class name
assert tool.name == "_echo_tool"
# Description defaults to class docstring (stripped)
assert "Dummy tool" in (tool.description or "")
async def test_tool_call_with_valid_args() -> None:
"""Test __call__ method with valid arguments."""
tool = _EchoTool(ToolContext())
result = await tool(_Args(value=5))
assert result.doubled == 10
async def test_tool_call_handles_tool_execution_error() -> None:
"""Test __call__ properly propagates ToolExecutionError."""
tool = _ErrorTool(ToolContext())
with pytest.raises(ToolExecutionError) as exc_info:
await tool(_Args(value=-1))
assert exc_info.value.code == "NEGATIVE_VALUE"
async def test_tool_call_wraps_unexpected_error() -> None:
"""Test __call__ wraps unexpected errors in ToolExecutionError."""
tool = _ErrorTool(ToolContext())
with pytest.raises(ToolExecutionError) as exc_info:
await tool(_Args(value=0))
assert exc_info.value.code == "UNEXPECTED_ERROR"
def test_tool_execution_error_basic() -> None:
"""Test basic ToolExecutionError functionality."""
error = ToolExecutionError("Test error", code="TEST_CODE")
assert error.message == "Test error"
assert error.code == "TEST_CODE"
assert error.is_retryable is False
# Test structured message is JSON
import json
json.loads(str(error)) # Should not raise
def test_as_backend_tool() -> None:
"""Test as_backend_tool method."""
tool = _EchoTool(ToolContext())
definition, validator = tool.as_backend_tool(["ask"])
assert definition.name == "_echo_tool"
assert definition.source == "backend"
assert definition.mode == ["ask"]
# Test validator with valid args
is_valid, msg = validator({"value": 42})
assert is_valid is True
assert msg == ""
# Test validator with invalid args
is_valid, msg = validator({"invalid": "field"})
assert is_valid is False
assert "Invalid arguments" in msg
# test ToolContext methods
def test_get_notebook_errors_orders_by_cell_manager():
"""Test errors follow cell_manager order, not alphabetical."""
from unittest.mock import Mock
from marimo._messaging.cell_output import CellChannel
from marimo._types.ids import CellId_t, SessionId
context = ToolContext()
# Mock error cell_notification
error_op = Mock()
error_op.output = Mock()
error_op.output.channel = CellChannel.MARIMO_ERROR
error_op.output.data = [{"type": "Error", "msg": "test", "traceback": []}]
error_op.console = None
# Mock session with cells c1, c2, c3
session = Mock()
session_view = Mock()
session_view.cell_notifications = {
CellId_t("c1"): error_op,
CellId_t("c2"): error_op,
CellId_t("c3"): error_op,
}
session.session_view = session_view
# Cell manager returns in order: c3, c2, c1 (not alphabetical)
cell_data = [
Mock(cell_id=CellId_t("c3")),
Mock(cell_id=CellId_t("c2")),
Mock(cell_id=CellId_t("c1")),
]
session.app_file_manager.app.cell_manager.cell_data.return_value = (
cell_data
)
context.get_session = Mock(return_value=session)
errors = context.get_notebook_errors(
SessionId("test"), include_stderr=False
)
# Should be c3, c2, c1 (not c1, c2, c3)
assert errors[0].cell_id == CellId_t("c3")
assert errors[1].cell_id == CellId_t("c2")
assert errors[2].cell_id == CellId_t("c1")
def test_get_cell_errors_extracts_from_output():
"""Test get_cell_errors extracts error details from cell output."""
from unittest.mock import Mock
from marimo._messaging.cell_output import CellChannel
from marimo._types.ids import CellId_t, SessionId
context = ToolContext()
# Mock cell_notification with error
cell_notification = Mock()
cell_notification.output = Mock()
cell_notification.output.channel = CellChannel.MARIMO_ERROR
cell_notification.output.data = [
{"type": "ValueError", "msg": "bad value", "traceback": ["line 1"]}
]
errors = context.get_cell_errors(
SessionId("test"),
CellId_t("c1"),
maybe_cell_notif=cell_notification,
)
assert len(errors) == 1
assert errors[0].type == "ValueError"
assert errors[0].message == "bad value"
assert errors[0].traceback == ["line 1"]
def test_get_cell_console_outputs_separates_stdout_stderr():
"""Test get_cell_console_outputs separates stdout and stderr."""
from unittest.mock import Mock
from marimo._messaging.cell_output import CellChannel
context = ToolContext()
# Mock cell_notification with stdout and stderr
stdout_output = Mock()
stdout_output.channel = CellChannel.STDOUT
stdout_output.data = "hello"
stderr_output = Mock()
stderr_output.channel = CellChannel.STDERR
stderr_output.data = "warning"
cell_notification = Mock()
cell_notification.console = [stdout_output, stderr_output]
result = context.get_cell_console_outputs(cell_notification)
assert len(result.stdout) == 1
assert "hello" in result.stdout[0]
assert len(result.stderr) == 1
assert "warning" in result.stderr[0]
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/tools/test_base.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_ai/tools/tools/test_cells.py | from __future__ import annotations
from dataclasses import dataclass, field
from unittest.mock import Mock
import pytest
from marimo._ai._tools.base import ToolContext
from marimo._ai._tools.tools.cells import (
CellRuntimeMetadata,
CellVariables,
GetCellOutputArgs,
GetCellOutputs,
GetCellRuntimeData,
GetCellRuntimeDataArgs,
GetLightweightCellMap,
GetLightweightCellMapArgs,
)
from marimo._ai._tools.types import MarimoCellConsoleOutputs
from marimo._ai._tools.utils.exceptions import ToolExecutionError
from marimo._messaging.notification import VariableValue
from marimo._types.ids import CellId_t, SessionId
from tests._ai.tools.test_utils import MockSession, MockSessionView
@dataclass
class MockCellNotification:
output: object | None = None
console: object | None = None
status: object | None = None
@dataclass
class MockOutput:
channel: object = None
data: object = None
mimetype: object = None
@dataclass
class MockConsoleOutput:
channel: object
data: object
@dataclass
class MockError:
type: str
_message: str
traceback: list[str] = field(default_factory=list)
def describe(self) -> str:
return self._message
def test_is_markdown_cell():
tool = GetLightweightCellMap(ToolContext())
assert tool._is_markdown_cell('mo.md("hi")') is True
assert tool._is_markdown_cell("print('x')") is False
def test_get_cell_metadata_basic():
tool = GetCellRuntimeData(ToolContext())
cell_notification = MockCellNotification(status="idle")
session = MockSession(
_session_view=MockSessionView(
cell_notifications={"c1": cell_notification},
last_execution_time={"c1": 42.5},
)
)
result = tool._get_cell_metadata(session, CellId_t("c1"))
assert result == CellRuntimeMetadata(
runtime_state="idle", execution_time=42.5
)
def test_get_cell_metadata_no_cell_notification():
tool = GetCellRuntimeData(ToolContext())
session = MockSession(_session_view=MockSessionView())
result = tool._get_cell_metadata(session, CellId_t("missing"))
assert result == CellRuntimeMetadata(
runtime_state=None, execution_time=None
)
def test_get_cell_variables():
tool = GetCellRuntimeData(ToolContext())
cell = Mock()
cell._cell = Mock()
cell._cell.defs = {"x", "y"}
cell_data = Mock()
cell_data.cell = cell
var_x = VariableValue("x", "42", "int")
var_y = VariableValue("y", "hi", "str")
var_z = VariableValue("z", "[1]", "list")
session = MockSession(
_session_view=MockSessionView(
variable_values={"x": var_x, "y": var_y, "z": var_z}
)
)
result = tool._get_cell_variables(session, cell_data)
expected: CellVariables = {
"x": VariableValue(
name="x", value=var_x.value, datatype=var_x.datatype
),
"y": VariableValue(
name="y", value=var_y.value, datatype=var_y.datatype
),
}
assert result == expected
assert "z" not in result
def test_get_cell_type_sql():
"""Test _get_cell_type for SQL cells."""
tool = GetLightweightCellMap(ToolContext())
# Mock cell with SQL language
cell_mock = Mock()
cell_mock._cell = Mock()
cell_mock._cell.language = "sql"
cell_data = Mock()
cell_data.cell = cell_mock
cell_data.code = "SELECT * FROM table"
result = tool._get_cell_type(cell_data)
assert result == "sql"
def test_get_cell_runtime_data_invalid_cell():
"""Test GetCellRuntimeData with invalid cell ID."""
tool = GetCellRuntimeData(ToolContext())
# Mock cell manager that returns None
mock_cell_manager = Mock()
mock_cell_manager.get_cell_data.return_value = None
mock_session = Mock()
mock_session.app_file_manager.app.cell_manager = mock_cell_manager
context = Mock(spec=ToolContext)
context.get_session.return_value = mock_session
tool.context = context
args = GetCellRuntimeDataArgs(
session_id=SessionId("test"), cell_ids=[CellId_t("invalid")]
)
with pytest.raises(ToolExecutionError) as exc_info:
tool.handle(args)
assert exc_info.value.code == "CELL_NOT_FOUND"
def test_get_cell_runtime_data_empty_cell_ids():
"""Test GetCellRuntimeData with empty cell_ids returns all cells."""
tool = GetCellRuntimeData(ToolContext())
cell_data_1 = Mock()
cell_data_1.cell_id = CellId_t("c1")
cell_data_1.code = "x = 1"
cell_data_1.cell = None
cell_data_2 = Mock()
cell_data_2.cell_id = CellId_t("c2")
cell_data_2.code = "y = 2"
cell_data_2.cell = None
mock_cell_manager = Mock()
mock_cell_manager.cell_data.return_value = [cell_data_1, cell_data_2]
mock_cell_manager.get_cell_data.side_effect = lambda cid: (
cell_data_1 if cid == "c1" else cell_data_2
)
mock_session = Mock()
mock_session.app_file_manager.app.cell_manager = mock_cell_manager
mock_session.session_view = MockSessionView()
context = Mock(spec=ToolContext)
context.get_session.return_value = mock_session
context.get_cell_errors.return_value = []
tool.context = context
args = GetCellRuntimeDataArgs(session_id=SessionId("test"), cell_ids=[])
result = tool.handle(args)
assert len(result.data) == 2
assert result.data[0].cell_id == "c1"
assert result.data[1].cell_id == "c2"
def test_get_cell_outputs_empty_cell_ids():
"""Test GetCellOutputs with empty cell_ids returns all cells."""
tool = GetCellOutputs(ToolContext())
cell_data_1 = Mock()
cell_data_1.cell_id = CellId_t("c1")
cell_data_2 = Mock()
cell_data_2.cell_id = CellId_t("c2")
mock_cell_manager = Mock()
mock_cell_manager.cell_data.return_value = [cell_data_1, cell_data_2]
notif_c1 = MockCellNotification(
output=MockOutput(data="42", mimetype="text/plain"),
console=None,
)
notif_c2 = MockCellNotification(output=None, console=None)
mock_session = Mock()
mock_session.app_file_manager.app.cell_manager = mock_cell_manager
mock_session.session_view = MockSessionView(
cell_notifications={"c1": notif_c1, "c2": notif_c2}
)
context = Mock(spec=ToolContext)
context.get_session.return_value = mock_session
context.get_cell_console_outputs.return_value = MarimoCellConsoleOutputs()
tool.context = context
args = GetCellOutputArgs(session_id=SessionId("test"), cell_ids=[])
result = tool.handle(args)
assert len(result.cells) == 2
assert result.cells[0].cell_id == "c1"
assert result.cells[0].visual_output.visual_output == "42"
assert result.cells[1].cell_id == "c2"
assert result.cells[1].visual_output.visual_output is None
def test_get_visual_output_with_html():
tool = GetCellOutputs(ToolContext())
output = MockOutput(data="<div>test</div>", mimetype="text/html")
cell_notification = MockCellNotification(output=output)
visual_output, mimetype = tool._get_visual_output(cell_notification) # type: ignore[arg-type]
assert visual_output == "<div>test</div>"
assert mimetype == "text/html"
def test_get_visual_output_no_output():
tool = GetCellOutputs(ToolContext())
cell_notification = MockCellNotification(output=None)
visual_output, mimetype = tool._get_visual_output(cell_notification) # type: ignore[arg-type]
assert visual_output is None
assert mimetype is None
def test_get_visual_output_with_error():
"""Test that error output is returned as structured JSON."""
import json
tool = GetCellOutputs(ToolContext())
error = MockError(type="NameError", _message="name 'x' is not defined")
output = MockOutput(
channel="marimo-error",
data=[error],
mimetype="application/vnd.marimo+error",
)
cell_notification = MockCellNotification(output=output)
visual_output, mimetype = tool._get_visual_output(cell_notification) # type: ignore[arg-type]
assert mimetype == "application/json"
parsed = json.loads(visual_output)
assert len(parsed) == 1
assert parsed[0]["type"] == "NameError"
assert parsed[0]["message"] == "name 'x' is not defined"
def test_lightweight_cell_map_includes_runtime_info():
"""Test that LightweightCellInfo includes runtime_state, has_output,
has_console_output, and has_errors from cell notifications."""
tool = GetLightweightCellMap(ToolContext())
# Mock cell data
cell_data_1 = Mock()
cell_data_1.cell_id = "c1"
cell_data_1.code = "x = 1"
cell_data_1.cell = None # no compiled cell
cell_data_2 = Mock()
cell_data_2.cell_id = "c2"
cell_data_2.code = "print('hello')"
cell_data_2.cell = None
cell_data_3 = Mock()
cell_data_3.cell_id = "c3"
cell_data_3.code = "y = 2"
cell_data_3.cell = None
cell_data_4 = Mock()
cell_data_4.cell_id = "c4"
cell_data_4.code = "z = bad_var"
cell_data_4.cell = None
# Mock cell manager
mock_cell_manager = Mock()
mock_cell_manager.cell_data.return_value = [
cell_data_1,
cell_data_2,
cell_data_3,
cell_data_4,
]
# Cell notifications:
# c1 is idle with output
# c2 is running with console output
# c3 has no notification
# c4 is idle with an error
notif_c1 = MockCellNotification(
status="idle",
output=MockOutput(data="42", mimetype="text/plain"),
console=None,
)
notif_c2 = MockCellNotification(
status="running",
output=None,
console=[MockConsoleOutput(channel="stdout", data="hello")],
)
notif_c4 = MockCellNotification(
status="idle",
output=MockOutput(
channel="marimo-error",
data=[
MockError(
type="NameError", _message="name 'bad_var' is not defined"
)
],
mimetype="application/vnd.marimo+error",
),
console=None,
)
mock_session = Mock()
mock_session.app_file_manager.app.cell_manager = mock_cell_manager
mock_session.app_file_manager.filename = "test.py"
mock_session.session_view = MockSessionView(
cell_notifications={"c1": notif_c1, "c2": notif_c2, "c4": notif_c4}
)
context = Mock(spec=ToolContext)
context.get_session.return_value = mock_session
tool.context = context
args = GetLightweightCellMapArgs(
session_id=SessionId("test"), preview_lines=3
)
result = tool.handle(args)
assert len(result.cells) == 4
# c1: idle, has output, no console output, no errors
assert result.cells[0].cell_id == "c1"
assert result.cells[0].runtime_state == "idle"
assert result.cells[0].has_output is True
assert result.cells[0].has_console_output is False
assert result.cells[0].has_errors is False
# c2: running, no output, has console output, no errors
assert result.cells[1].cell_id == "c2"
assert result.cells[1].runtime_state == "running"
assert result.cells[1].has_output is False
assert result.cells[1].has_console_output is True
assert result.cells[1].has_errors is False
# c3: no notification at all
assert result.cells[2].cell_id == "c3"
assert result.cells[2].runtime_state is None
assert result.cells[2].has_output is False
assert result.cells[2].has_console_output is False
assert result.cells[2].has_errors is False
# c4: idle, has errors, no regular output
assert result.cells[3].cell_id == "c4"
assert result.cells[3].runtime_state == "idle"
assert result.cells[3].has_output is False
assert result.cells[3].has_console_output is False
assert result.cells[3].has_errors is True
def test_get_cell_runtime_data_batched():
"""Test GetCellRuntimeData with multiple cell IDs."""
tool = GetCellRuntimeData(ToolContext())
# Mock two cells
cell_data_1 = Mock()
cell_data_1.code = "x = 1"
cell_data_1.cell = None
cell_data_2 = Mock()
cell_data_2.code = "y = 2"
cell_data_2.cell = None
mock_cell_manager = Mock()
mock_cell_manager.get_cell_data.side_effect = lambda cid: (
cell_data_1 if cid == "c1" else cell_data_2
)
notif_c1 = MockCellNotification(status="idle")
notif_c2 = MockCellNotification(status="running")
mock_session = Mock()
mock_session.app_file_manager.app.cell_manager = mock_cell_manager
mock_session.session_view = MockSessionView(
cell_notifications={"c1": notif_c1, "c2": notif_c2},
last_execution_time={"c1": 10.0, "c2": 20.0},
)
context = Mock(spec=ToolContext)
context.get_session.return_value = mock_session
context.get_cell_errors.return_value = []
tool.context = context
args = GetCellRuntimeDataArgs(
session_id=SessionId("test"),
cell_ids=[CellId_t("c1"), CellId_t("c2")],
)
result = tool.handle(args)
assert len(result.data) == 2
assert result.data[0].cell_id == "c1"
assert result.data[0].code == "x = 1"
assert result.data[0].metadata.runtime_state == "idle" # type: ignore[union-attr]
assert result.data[0].metadata.execution_time == 10.0 # type: ignore[union-attr]
assert result.data[1].cell_id == "c2"
assert result.data[1].code == "y = 2"
assert result.data[1].metadata.runtime_state == "running" # type: ignore[union-attr]
# execution_time is only populated when idle
assert result.data[1].metadata.execution_time is None # type: ignore[union-attr]
def test_get_cell_outputs_batched():
"""Test GetCellOutputs with multiple cell IDs."""
tool = GetCellOutputs(ToolContext())
notif_c1 = MockCellNotification(
output=MockOutput(data="<b>hi</b>", mimetype="text/html"),
console=None,
)
notif_c2 = MockCellNotification(output=None, console=None)
mock_session = Mock()
mock_session.session_view = MockSessionView(
cell_notifications={"c1": notif_c1, "c2": notif_c2}
)
context = Mock(spec=ToolContext)
context.get_session.return_value = mock_session
context.get_cell_console_outputs.return_value = MarimoCellConsoleOutputs()
tool.context = context
args = GetCellOutputArgs(
session_id=SessionId("test"),
cell_ids=[CellId_t("c1"), CellId_t("c2")],
)
result = tool.handle(args)
assert len(result.cells) == 2
assert result.cells[0].cell_id == "c1"
assert result.cells[0].visual_output.visual_output == "<b>hi</b>"
assert result.cells[0].visual_output.visual_mimetype == "text/html"
assert result.cells[1].cell_id == "c2"
assert result.cells[1].visual_output.visual_output is None
def test_get_cell_outputs_invalid_cell():
"""Test GetCellOutputs raises for unknown cell ID."""
tool = GetCellOutputs(ToolContext())
mock_session = Mock()
mock_session.session_view = MockSessionView(cell_notifications={})
context = Mock(spec=ToolContext)
context.get_session.return_value = mock_session
tool.context = context
args = GetCellOutputArgs(
session_id=SessionId("test"),
cell_ids=[CellId_t("missing")],
)
with pytest.raises(ToolExecutionError) as exc_info:
tool.handle(args)
assert exc_info.value.code == "CELL_NOT_FOUND"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/tools/tools/test_cells.py",
"license": "Apache License 2.0",
"lines": 386,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_ai/tools/tools/test_notebooks.py | from __future__ import annotations
import os
from dataclasses import dataclass
from unittest.mock import Mock
import pytest
from marimo._ai._tools.base import ToolContext
from marimo._ai._tools.tools.notebooks import GetActiveNotebooks
from marimo._ai._tools.types import EmptyArgs, MarimoNotebookInfo
from marimo._session.model import ConnectionState
from marimo._types.ids import SessionId
@dataclass
class MockAppFileManager:
filename: str | None
path: str | None
@dataclass
class MockSession:
_connection_state: ConnectionState
app_file_manager: MockAppFileManager
def connection_state(self) -> ConnectionState:
return self._connection_state
@dataclass
class MockSessionManager:
sessions: dict[str, MockSession]
def get_active_connection_count(self) -> int:
return len(
[
s
for s in self.sessions.values()
if s.connection_state()
in (ConnectionState.OPEN, ConnectionState.ORPHANED)
]
)
@pytest.fixture
def tool() -> GetActiveNotebooks:
"""Create a GetActiveNotebooks tool instance."""
return GetActiveNotebooks(ToolContext())
@pytest.fixture
def mock_context() -> Mock:
"""Create a mock ToolContext."""
context = Mock(spec=ToolContext)
context.get_active_sessions_internal = (
ToolContext.get_active_sessions_internal
)
return context
def test_get_active_sessions_internal_empty(mock_context: Mock):
"""Test get_active_sessions_internal with no sessions."""
mock_context.session_manager = MockSessionManager(sessions={})
result = mock_context.get_active_sessions_internal(mock_context)
assert result == []
def test_get_active_sessions_internal_open_session(mock_context: Mock):
"""Test get_active_sessions_internal with one open session."""
session = MockSession(
_connection_state=ConnectionState.OPEN,
app_file_manager=MockAppFileManager(
filename="/path/to/notebook.py",
path=os.path.abspath("/path/to/notebook.py"),
),
)
mock_context.session_manager = MockSessionManager(
sessions={"session1": session}
)
result = mock_context.get_active_sessions_internal(mock_context)
assert len(result) == 1
assert result[0].name == "notebook.py"
assert result[0].path == os.path.abspath("/path/to/notebook.py")
assert result[0].session_id == "session1"
def test_get_active_sessions_internal_orphaned_session(mock_context: Mock):
"""Test get_active_sessions_internal with orphaned session."""
session = MockSession(
_connection_state=ConnectionState.ORPHANED,
app_file_manager=MockAppFileManager(
filename="/path/to/test.py",
path=os.path.abspath("/path/to/test.py"),
),
)
mock_context.session_manager = MockSessionManager(
sessions={"session2": session}
)
result = mock_context.get_active_sessions_internal(mock_context)
assert len(result) == 1
assert result[0].name == "test.py"
def test_get_active_sessions_internal_closed_session(mock_context: Mock):
"""Test get_active_sessions_internal filters out closed sessions."""
session = MockSession(
_connection_state=ConnectionState.CLOSED,
app_file_manager=MockAppFileManager(
filename="/path/to/closed.py",
path=os.path.abspath("/path/to/closed.py"),
),
)
mock_context.session_manager = MockSessionManager(
sessions={"session3": session}
)
result = mock_context.get_active_sessions_internal(mock_context)
assert result == []
def test_get_active_sessions_internal_no_filename(mock_context: Mock):
"""Test get_active_sessions_internal with unsaved notebook."""
session = MockSession(
_connection_state=ConnectionState.OPEN,
app_file_manager=MockAppFileManager(filename=None, path=None),
)
mock_context.session_manager = MockSessionManager(sessions={"s4": session})
result = mock_context.get_active_sessions_internal(mock_context)
assert len(result) == 1
assert result[0].name == "new notebook"
assert (
result[0].path == "(unsaved notebook - save to disk to get file path)"
)
assert result[0].session_id == "s4"
def test_get_active_sessions_internal_multiple_sessions(mock_context: Mock):
"""Test get_active_sessions_internal with multiple sessions of different states."""
sessions = {
"s1": MockSession(
ConnectionState.OPEN,
MockAppFileManager(
"/path/first.py", os.path.abspath("/path/first.py")
),
),
"s2": MockSession(
ConnectionState.CLOSED,
MockAppFileManager(
"/path/closed.py", os.path.abspath("/path/closed.py")
),
),
"s3": MockSession(
ConnectionState.ORPHANED,
MockAppFileManager(
"/path/third.py", os.path.abspath("/path/third.py")
),
),
}
mock_context.session_manager = MockSessionManager(sessions=sessions)
result = mock_context.get_active_sessions_internal(mock_context)
assert len(result) == 2
session_ids = [f.session_id for f in result]
assert "s3" in session_ids
assert "s1" in session_ids
assert "s2" not in session_ids
def test_get_active_notebooks_handle(tool: GetActiveNotebooks):
"""Test GetActiveNotebooks.handle() end-to-end."""
session = MockSession(
ConnectionState.OPEN,
MockAppFileManager(
"/test/notebook.py", os.path.abspath("/test/notebook.py")
),
)
session_manager = MockSessionManager(sessions={"session1": session})
# Mock the context
context = Mock(spec=ToolContext)
context.session_manager = session_manager
context.get_active_sessions_internal = Mock(
return_value=[
MarimoNotebookInfo(
name="notebook.py",
path="/test/notebook.py",
session_id=SessionId("session1"),
)
]
)
tool.context = context
result = tool.handle(EmptyArgs())
assert result.status == "success"
assert result.data.summary.total_notebooks == 1
assert result.data.summary.active_connections == 1
assert len(result.data.notebooks) == 1
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/tools/tools/test_notebooks.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_ipc/connection.py | # Copyright 2026 Marimo. All rights reserved.
"""ZeroMQ connection management for inter-process communication."""
from __future__ import annotations
import dataclasses
import queue
import sys
import typing
from marimo import _loggers
from marimo._ipc.queue_proxy import PushQueue, start_receiver_thread
from marimo._ipc.types import ConnectionInfo
from marimo._session.queue import QueueType
if typing.TYPE_CHECKING:
import zmq
from marimo._messaging.types import KernelMessage
from marimo._runtime.commands import (
BatchableCommand,
CodeCompletionCommand,
CommandMessage,
)
LOGGER = _loggers.marimo_logger()
ADDR = "tcp://127.0.0.1"
T = typing.TypeVar("T")
@dataclasses.dataclass
class Channel(typing.Generic[T]):
"""A typed communication channel wrapping a ZeroMQ socket and queue.
Channels can be either:
- Push: Send-only, uses PushQueue to immediately send via socket
- Pull: Receive-only, uses Queue to buffer received messages
"""
kind: typing.Literal["push", "pull"]
socket: zmq.Socket[bytes]
queue: QueueType[T]
@classmethod
def Push(
cls, context: zmq.Context[zmq.Socket[bytes]], *, maxsize: int = 0
) -> Channel[T]:
"""Create a push (send-only) channel.
Note: maxsize is ignored for push channels as ZeroMQ handles buffering.
"""
import zmq
socket = context.socket(zmq.PUSH)
return cls(
kind="push",
socket=socket,
queue=PushQueue(socket, maxsize=maxsize),
)
@classmethod
def Pull(
cls, context: zmq.Context[zmq.Socket[bytes]], *, maxsize: int = 0
) -> Channel[T]:
"""Create a pull (receive-only) channel.
Args:
context: ZeroMQ context for creating sockets
maxsize: Maximum queue size (0 = unlimited)
"""
import zmq
socket = context.socket(zmq.PULL)
return cls(
kind="pull",
socket=socket,
queue=queue.Queue(maxsize=maxsize),
)
@dataclasses.dataclass
class Connection:
"""Manages all ZeroMQ sockets for marimo IPC communication."""
context: zmq.Context[zmq.Socket[bytes]]
control: Channel[CommandMessage]
ui_element: Channel[BatchableCommand]
completion: Channel[CodeCompletionCommand]
win32_interrupt: Channel[bool] | None
input: Channel[str]
stream: Channel[KernelMessage]
def __post_init__(self) -> None:
"""Start receiver threads for all pull channels."""
receivers: dict[zmq.Socket[bytes], QueueType[typing.Any]] = {}
if self.control.kind == "pull":
receivers[self.control.socket] = self.control.queue
if self.ui_element.kind == "pull":
receivers[self.ui_element.socket] = self.ui_element.queue
if self.completion.kind == "pull":
receivers[self.completion.socket] = self.completion.queue
if self.win32_interrupt and self.win32_interrupt.kind == "pull":
receivers[self.win32_interrupt.socket] = self.win32_interrupt.queue
if self.input.kind == "pull":
receivers[self.input.socket] = self.input.queue
if self.stream.kind == "pull":
receivers[self.stream.socket] = self.stream.queue
self._stop_event, self._receiver_thread = start_receiver_thread(
receivers
)
@classmethod
def create(cls) -> tuple[Connection, ConnectionInfo]:
"""Create host-side connection with all sockets bound to random ports.
Returns:
Tuple of (Connection instance, ConnectionInfo with port numbers)
"""
import zmq
context = zmq.Context()
conn = cls(
context=context,
control=Channel.Push(context),
ui_element=Channel.Push(context),
completion=Channel.Push(context),
win32_interrupt=(
Channel.Push(context) if sys.platform == "win32" else None
),
input=Channel.Push(context, maxsize=1),
stream=Channel.Pull(context),
)
info = ConnectionInfo(
control=conn.control.socket.bind_to_random_port(ADDR),
ui_element=conn.ui_element.socket.bind_to_random_port(ADDR),
completion=conn.completion.socket.bind_to_random_port(ADDR),
input=conn.input.socket.bind_to_random_port(ADDR),
stream=conn.stream.socket.bind_to_random_port(ADDR),
win32_interrupt=conn.win32_interrupt.socket.bind_to_random_port(
ADDR
)
if conn.win32_interrupt
else None,
)
return conn, info
@classmethod
def connect(cls, connection_info: ConnectionInfo) -> Connection:
"""Connect to host with all sockets and start receivers.
Args:
connection_info: Port information from host
Returns:
Connected Connection instance
"""
import zmq
context = zmq.Context()
conn = cls(
context=context,
control=Channel.Pull(context),
ui_element=Channel.Pull(context),
completion=Channel.Pull(context),
win32_interrupt=Channel.Pull(context)
if connection_info.win32_interrupt
else None,
input=Channel.Pull(context, maxsize=1),
stream=Channel.Push(context),
)
# Attach to existing ports
conn.control.socket.connect(f"{ADDR}:{connection_info.control}")
conn.ui_element.socket.connect(f"{ADDR}:{connection_info.ui_element}")
conn.completion.socket.connect(f"{ADDR}:{connection_info.completion}")
if conn.win32_interrupt:
conn.win32_interrupt.socket.connect(
f"{ADDR}:{connection_info.win32_interrupt}"
)
conn.input.socket.connect(f"{ADDR}:{connection_info.input}")
conn.stream.socket.connect(f"{ADDR}:{connection_info.stream}")
return conn
def close(self) -> None:
"""Close all sockets and cleanup resources."""
# Stop receiver thread
self._stop_event.set()
if self._receiver_thread.is_alive():
self._receiver_thread.join(timeout=1)
# Close all associated sockets (and finally terminate)
self.context.destroy()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ipc/connection.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_ipc/launch_kernel.py | # Copyright 2026 Marimo. All rights reserved.
"""Standalone kernel server entry point for IPC (using ZeroMQ)."""
from __future__ import annotations
import sys
from marimo._ipc.queue_manager import QueueManager
from marimo._ipc.types import KernelArgs
from marimo._runtime import runtime
def main() -> None:
"""Launch a marimo kernel using ZeroMQ for IPC.
This function is the entry point for the kernel subprocess. It reads
connection information from stdin and sets up ZeroMQ queues that proxy
to marimo's internal kernel.
Typically, this entry point is invoked via the command line with:
python -m marimo._ipc.launch_kernel
IMPORTANT: The module path "marimo._ipc.launch_kernel" is a public API
used by external consumers (e.g., marimo-lsp). Changing this path is a
BREAKING CHANGE and should be done with care and proper deprecation.
"""
args = KernelArgs.decode_json(sys.stdin.buffer.read())
queue_manager = QueueManager.connect(args.connection_info)
sys.stdout.write("KERNEL_READY\n")
sys.stdout.flush()
runtime.launch_kernel(
set_ui_element_queue=queue_manager.set_ui_element_queue,
interrupt_queue=queue_manager.win32_interrupt_queue,
completion_queue=queue_manager.completion_queue,
control_queue=queue_manager.control_queue,
input_queue=queue_manager.input_queue,
app_metadata=args.app_metadata,
log_level=args.log_level,
user_config=args.user_config,
configs=args.configs,
profile_path=args.profile_path,
# Virtual files require a web server to serve file URLs. Since we're
# not running one, content must be embedded as data URLs instead.
virtual_files_supported=False,
# NB: IPC kernels are always subprocesses (is_ipc=True) but may be
# edit or run mode based on is_edit_mode.
stream_queue=queue_manager.stream_queue,
socket_addr=None,
is_edit_mode=not args.is_run_mode,
is_ipc=True,
redirect_console_to_browser=args.redirect_console_to_browser,
)
if __name__ == "__main__":
main()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ipc/launch_kernel.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ipc/queue_manager.py | # Copyright 2026 Marimo. All rights reserved.
"""Queue manager for generic inter-process communication."""
from __future__ import annotations
import dataclasses
import typing
from marimo._ipc.connection import Connection
from marimo._ipc.types import ConnectionInfo
if typing.TYPE_CHECKING:
from marimo._messaging.types import KernelMessage
from marimo._runtime.commands import (
BatchableCommand,
CodeCompletionCommand,
CommandMessage,
)
from marimo._session.queue import QueueType
@dataclasses.dataclass
class QueueManager:
"""High-level interface for inter-process communication queues.
Usage:
# Host side - create and bind
host_manager, connection_info = QueueManager.create()
# Kernel side - connect
kernel_manager = QueueManager.connect(connection_info)
# Send/receive messages through queues
host_manager.control_queue.put(request)
response = kernel_manager.stream_queue.get()
"""
conn: Connection
@property
def control_queue(self) -> QueueType[CommandMessage]:
"""Queue for control requests (execute, interrupt, etc.)."""
return self.conn.control.queue
@property
def set_ui_element_queue(self) -> QueueType[BatchableCommand]:
"""Queue for batchable commands (UI element updates and model commands)."""
return self.conn.ui_element.queue
@property
def completion_queue(self) -> QueueType[CodeCompletionCommand]:
"""Queue for code completion requests."""
return self.conn.completion.queue
@property
def win32_interrupt_queue(self) -> typing.Union[QueueType[bool], None]:
"""Queue for Windows interrupt signals (None on non-Windows)."""
return (
self.conn.win32_interrupt.queue
if self.conn.win32_interrupt
else None
)
@property
def input_queue(self) -> QueueType[str]:
"""Queue for user input responses."""
return self.conn.input.queue
@property
def stream_queue(self) -> QueueType[KernelMessage]:
"""Queue for kernel output messages."""
return self.conn.stream.queue
def close_queues(self) -> None:
"""Close all queues and cleanup resources."""
self.conn.close()
@classmethod
def create(
cls,
) -> tuple[QueueManager, ConnectionInfo]:
"""Create host-side queue manager with all sockets bound.
Returns:
Tuple of (QueueManager instance, ConnectionInfo for kernel)
"""
conn, info = Connection.create()
return cls(conn=conn), info
@classmethod
def connect(
cls,
connection_info: ConnectionInfo,
) -> QueueManager:
"""Connect to host and create kernel-side queue manager.
Args:
connection_info: Connection details from host
Returns:
Connected QueueManager instance
"""
return cls(conn=Connection.connect(connection_info))
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ipc/queue_manager.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ipc/queue_proxy.py | # Copyright 2026 Marimo. All rights reserved.
"""Queue proxy for ZeroMQ sockets."""
from __future__ import annotations
import pickle
import threading
import typing
from marimo import _loggers
from marimo._session.queue import QueueType
LOGGER = _loggers.marimo_logger()
T = typing.TypeVar("T")
if typing.TYPE_CHECKING:
import zmq
class PushQueue(QueueType[T]):
"""Queue for pushing messages through ZeroMQ socket (sender side only).
This is a simple wrapper that sends messages over a ZeroMQ PUSH socket.
"""
def __init__(
self,
socket: zmq.Socket[bytes],
*,
maxsize: int = 0,
) -> None:
self.socket = socket
self.maxsize = maxsize
def put(
self,
obj: T,
block: bool = True, # noqa: ARG002
timeout: float | None = None, # noqa: ARG002
) -> None:
"""Put an item into the queue."""
self.socket.send(pickle.dumps(obj))
def put_nowait(self, obj: T) -> None:
"""Put an item into the queue without blocking."""
self.put(obj, block=False)
def get(self, block: bool = True, timeout: float | None = None) -> T: # noqa: FBT001, FBT002
"""Get an item from the queue (stub - not implemented for PushQueue)."""
msg = "PushQueue does not support get operations"
raise NotImplementedError(msg)
def get_nowait(self) -> T:
"""Get an item from the queue without blocking (stub - not implemented)."""
msg = "PushQueue does not support get operations"
raise NotImplementedError(msg)
def empty(self) -> bool:
"""Return True if the queue is empty (stub - not implemented for PushQueue)."""
msg = "PushQueue does not support empty() operation"
raise NotImplementedError(msg)
def start_receiver_thread(
receivers: dict[zmq.Socket[bytes], QueueType[typing.Any]],
) -> tuple[threading.Event, threading.Thread]:
"""Start receiver thread."""
import zmq
def receive_loop(
receivers: dict[zmq.Socket[bytes], QueueType[typing.Any]],
stop_event: threading.Event,
) -> None:
"""Receive messages from sockets and put them in queues using polling."""
poller = zmq.Poller()
for socket in receivers:
poller.register(socket, zmq.POLLIN)
while not stop_event.is_set():
try:
# Poll with 100ms timeout
socks = dict(poller.poll(100))
for socket, event in socks.items():
if event & zmq.POLLIN:
msg = socket.recv(flags=zmq.NOBLOCK)
obj = pickle.loads(msg)
receivers[socket].put(obj)
except zmq.Again:
continue
except zmq.ZMQError as e:
LOGGER.debug(f"ZeroMQ socket error in receiver thread: {e}")
break
except Exception as e:
LOGGER.warning(
f"Unexpected error in ZeroMQ receiver thread: {e}",
exc_info=True,
)
continue
stop_event = threading.Event()
thread = threading.Thread(
target=receive_loop,
args=(receivers, stop_event),
daemon=True,
)
thread.start()
return stop_event, thread
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ipc/queue_proxy.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_ipc/types.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import typing
import msgspec
import msgspec.json
from marimo._ast.cell import CellConfig
from marimo._config.config import MarimoConfig
from marimo._messaging.msgspec_encoder import encode_json_bytes
from marimo._runtime.commands import AppMetadata
from marimo._types.ids import CellId_t
class ConnectionInfo(msgspec.Struct):
"""ZeroMQ socket connection info."""
control: int
ui_element: int
completion: int
win32_interrupt: typing.Union[int, None]
input: int
stream: int
class KernelArgs(msgspec.Struct):
"""Args to send to the kernel."""
configs: dict[CellId_t, CellConfig]
app_metadata: AppMetadata
user_config: MarimoConfig
log_level: int
profile_path: typing.Union[str, None]
connection_info: ConnectionInfo
# Whether to use run-mode config (autorun) vs edit-mode config (lazy)
is_run_mode: bool = False
# Runtime behavior flags
virtual_files_supported: bool = True
redirect_console_to_browser: bool = True
def encode_json(self) -> bytes:
return encode_json_bytes(self)
@classmethod
def decode_json(cls, buf: bytes) -> KernelArgs:
return msgspec.json.decode(buf, type=cls)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ipc/types.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ipc/test_kernel_communication.py | """Tests for ZeroMQ-based kernel communication."""
from __future__ import annotations
import json
import queue
import subprocess
import sys
import time
import pytest
from dirty_equals import IsFloat, IsList, IsUUID
from marimo._ast.app_config import _AppConfig
from marimo._ast.cell import CellConfig
from marimo._config.config import DEFAULT_CONFIG
from marimo._config.settings import GLOBAL_SETTINGS
from marimo._dependencies.dependencies import DependencyManager
from marimo._runtime.commands import (
AppMetadata,
ExecuteCellsCommand,
)
from marimo._types.ids import CellId_t
HAS_DEPS = DependencyManager.has("zmq")
@pytest.mark.skipif(not HAS_DEPS, reason="optional dependencies not installed")
@pytest.mark.skip(reason="TODO: fix this test. Currently flaky on CI.")
def test_kernel_launch_and_execute_cells():
"""Test launching a kernel and executing cells with stdout/stderr."""
from marimo._ipc import KernelArgs, QueueManager
execute_request = ExecuteCellsCommand(
cell_ids=[CellId_t("cell1")],
codes=[
"""\
import sys
print("stdout")
print("stderr", file=sys.stderr)
x = 42"""
],
)
queue_manager, connection_info = QueueManager.create()
kernel_args = KernelArgs(
connection_info=connection_info,
profile_path=None,
configs={cid: CellConfig() for cid in execute_request.cell_ids},
user_config=DEFAULT_CONFIG,
log_level=GLOBAL_SETTINGS.LOG_LEVEL,
app_metadata=AppMetadata(
query_params={}, cli_args={}, app_config=_AppConfig()
),
)
# IMPORTANT: The module path "marimo._ipc.launch_kernel" is a public API
# used by external consumers (e.g., marimo-lsp). Changing this path is a
# BREAKING CHANGE and should be done with care and proper deprecation.
process = subprocess.Popen(
[sys.executable, "-m", "marimo._ipc.launch_kernel"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert process.stdin is not None
process.stdin.write(kernel_args.encode_json())
process.stdin.flush()
process.stdin.close()
assert process.stdout is not None
assert process.stderr is not None
ready_line = process.stdout.readline().decode("utf-8").strip()
if ready_line != "KERNEL_READY":
exit_code = process.poll()
stderr_content = process.stderr.read().decode("utf-8")
if exit_code is not None and exit_code != 0:
raise RuntimeError(
f"Kernel process failed with exit code {exit_code}. Stderr: {stderr_content}"
)
else:
raise RuntimeError(
f"Expected KERNEL_READY, got: '{ready_line}'. Stderr: {stderr_content}"
)
queue_manager.control_queue.put(execute_request)
messages = []
seen_completed = False
extra_collection_start = None
while True:
try:
encoded = queue_manager.stream_queue.get(timeout=0.01)
decoded = json.loads(encoded)
messages.append(decoded)
if decoded["op"] == "completed-run":
seen_completed = True
extra_collection_start = time.time()
except queue.Empty:
if seen_completed and extra_collection_start is not None:
# FIXME: stdin/stdout are flushed every 10ms, so wait 100ms
# (after "completed-run") to ensure all related events.
if time.time() - extra_collection_start >= 0.1:
break
# If we haven't seen completed-run yet, continue waiting
continue
assert messages == [
{
"op": "variables",
"variables": IsList(
{
"declared_by": [
"cell1",
],
"name": "x",
"used_by": [],
},
{
"declared_by": [
"cell1",
],
"name": "sys",
"used_by": [],
},
check_order=False,
),
},
{
"cell_id": "cell1",
"console": None,
"op": "cell-op",
"output": None,
"run_id": IsUUID(),
"serialization": None,
"stale_inputs": None,
"status": "queued",
"timestamp": IsFloat(),
},
{
"cell_id": "cell1",
"op": "remove-ui-elements",
},
{
"cell_id": "cell1",
"console": [],
"op": "cell-op",
"output": None,
"run_id": IsUUID(),
"serialization": None,
"stale_inputs": None,
"status": "running",
"timestamp": IsFloat(),
},
{
"op": "variable-values",
"variables": IsList(
{
"datatype": "int",
"name": "x",
"value": "42",
},
{
"datatype": "module",
"name": "sys",
"value": "sys",
},
check_order=False,
),
},
{
"cell_id": "cell1",
"console": None,
"op": "cell-op",
"output": {
"channel": "output",
"data": "",
"mimetype": "text/plain",
"timestamp": IsFloat(),
},
"run_id": IsUUID(),
"serialization": None,
"stale_inputs": None,
"status": None,
"timestamp": IsFloat(),
},
{
"cell_id": "cell1",
"console": None,
"op": "cell-op",
"output": None,
"run_id": None,
"serialization": None,
"stale_inputs": None,
"status": "idle",
"timestamp": IsFloat(),
},
{
"op": "completed-run",
},
{
"cell_id": "cell1",
"console": {
"channel": "stdout",
"data": "stdout\n",
"mimetype": "text/plain",
"timestamp": IsFloat(),
},
"op": "cell-op",
"output": None,
"run_id": None,
"serialization": None,
"stale_inputs": None,
"status": None,
"timestamp": IsFloat(),
},
{
"cell_id": "cell1",
"console": {
"channel": "stderr",
"data": "stderr\n",
"mimetype": "text/plain",
"timestamp": IsFloat(),
},
"op": "cell-op",
"output": None,
"run_id": None,
"serialization": None,
"stale_inputs": None,
"status": None,
"timestamp": IsFloat(),
},
]
process.terminate()
process.wait(timeout=2)
queue_manager.close_queues()
@pytest.mark.skipif(not HAS_DEPS, reason="optional dependencies not installed")
def test_queue_manager_connection():
"""Test creating and connecting queue managers."""
from marimo._ipc import QueueManager
host_manager, connection_info = QueueManager.create()
client_manager = QueueManager.connect(connection_info)
test_request = ExecuteCellsCommand(
cell_ids=[CellId_t("cell1")],
codes=["print('test')"],
)
host_manager.control_queue.put(test_request)
assert client_manager.control_queue.get(timeout=1) == test_request
kernel_message = ("test-op", b'{"data": "test"}')
client_manager.stream_queue.put(kernel_message)
assert host_manager.stream_queue.get(timeout=1) == kernel_message
host_manager.close_queues()
client_manager.close_queues()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ipc/test_kernel_communication.py",
"license": "Apache License 2.0",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_mcp/server/exceptions.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Optional
@dataclass
class ToolErrorDetails:
"""Structured representation of tool execution error details."""
message: str
code: str = "TOOL_ERROR"
status: int = 400
is_retryable: bool = False
suggested_fix: Optional[str] = None
meta: Optional[dict[str, Any]] = None
def to_dict(self) -> dict[str, Any]:
"""Return a dictionary representation of the error details."""
return {
"message": self.message,
"code": self.code,
"status": self.status,
"is_retryable": self.is_retryable,
"suggested_fix": self.suggested_fix,
"meta": self.meta or {},
}
class ToolExecutionError(Exception):
"""Raise this from a tool to signal a descriptive, structured failure."""
def __init__(
self,
message: str,
*,
code: str = "TOOL_ERROR",
status: int = 400,
is_retryable: bool = False,
suggested_fix: Optional[str] = None,
meta: Optional[dict[str, Any]] = None,
):
# Create the structured error details using the dataclass
self.details = ToolErrorDetails(
message=message,
code=code,
status=status,
is_retryable=is_retryable,
suggested_fix=suggested_fix,
meta=meta,
)
# Create a structured message that includes all error details
structured_message = self._create_structured_message()
super().__init__(structured_message)
@property
def original_message(self) -> str:
"""Access the original message through the details dataclass."""
return self.details.message
@property
def code(self) -> str:
"""Access the error code through the details dataclass."""
return self.details.code
@property
def status(self) -> int:
"""Access the status through the details dataclass."""
return self.details.status
@property
def is_retryable(self) -> bool:
"""Access the retryable flag through the details dataclass."""
return self.details.is_retryable
@property
def suggested_fix(self) -> Optional[str]:
"""Access the suggested fix through the details dataclass."""
return self.details.suggested_fix
@property
def meta(self) -> dict[str, Any]:
"""Access the meta data through the details dataclass."""
return self.details.meta or {}
def _create_structured_message(self) -> str:
"""Create a message that includes all structured error information."""
import json
return json.dumps(self.details.to_dict(), separators=(",", ":"))
def to_dict(self) -> dict[str, Any]:
"""Return a dictionary representation of the error details for testing."""
return self.details.to_dict()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_mcp/server/exceptions.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_mcp/server/lifespan.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import contextlib
from typing import TYPE_CHECKING
from marimo._loggers import marimo_logger
LOGGER = marimo_logger()
if TYPE_CHECKING:
from collections.abc import AsyncIterator
from starlette.applications import Starlette
@contextlib.asynccontextmanager
async def mcp_server_lifespan(app: Starlette) -> AsyncIterator[None]:
"""Lifespan for MCP server functionality (exposing marimo as MCP server)."""
try:
mcp_app = app.state.mcp
if mcp_app is None:
LOGGER.warning("MCP server not found in app state")
yield
return
# Session manager owns request lifecycle during app run
async with mcp_app.session_manager.run():
LOGGER.info("MCP server session manager started")
yield
except ImportError as e:
LOGGER.warning(f"MCP server dependencies not available: {e}")
yield
return
except Exception as e:
LOGGER.error(f"Failed to start MCP server: {e}")
yield
return
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_mcp/server/lifespan.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_mcp/server/main.py | # Copyright 2026 Marimo. All rights reserved.
"""
MCP (Model Context Protocol) Server Implementation for Marimo
This module implements an MCP server that provides LLMs with access to marimo
notebook context and functionality.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from marimo._ai._tools.base import ToolContext
from marimo._ai._tools.tools_registry import SUPPORTED_BACKEND_AND_MCP_TOOLS
from marimo._cli.errors import MarimoCLIMissingDependencyError
from marimo._dependencies.dependencies import DependencyManager
from marimo._loggers import marimo_logger
LOGGER = marimo_logger()
if TYPE_CHECKING:
from starlette.applications import Starlette
from starlette.types import Receive, Scope, Send
def setup_mcp_server(app: Starlette, allow_remote: bool = False) -> None:
"""Create and configure MCP server for marimo integration.
Args:
app: Starlette application instance for accessing marimo state
server_name: Name for the MCP server instance
stateless_http: Whether to use stateless HTTP mode
allow_remote: If True, disable DNS rebinding protection to allow remote access behind proxies.
Returns:
StreamableHTTPSessionManager: MCP session manager
"""
if not DependencyManager.mcp.has():
raise MarimoCLIMissingDependencyError(
"MCP dependencies not available.",
"marimo[mcp]",
)
from mcp.server.fastmcp import FastMCP
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import JSONResponse
from starlette.routing import Mount
from marimo._mcp.server._prompts.registry import (
SUPPORTED_MCP_PROMPTS,
)
transport_security = None
if allow_remote:
from mcp.server.transport_security import TransportSecuritySettings
transport_security = TransportSecuritySettings(
enable_dns_rebinding_protection=False,
)
mcp = FastMCP(
"marimo-mcp-server",
stateless_http=True,
log_level="WARNING",
# Change base path from /mcp to /server
streamable_http_path="/server",
transport_security=transport_security,
)
# Create context for tools and prompts
context = ToolContext(app=app)
# Register all tools
for tool in SUPPORTED_BACKEND_AND_MCP_TOOLS:
tool_with_context = tool(context)
mcp.tool()(tool_with_context.as_mcp_tool_fn())
# Register all prompts
for prompt in SUPPORTED_MCP_PROMPTS:
prompt_with_context = prompt(context)
mcp.prompt()(prompt_with_context.as_mcp_prompt_fn())
# Initialize streamable HTTP app
mcp_app = mcp.streamable_http_app()
# Middleware to require edit scope
class RequiresEditMiddleware(BaseHTTPMiddleware):
async def __call__(
self, scope: Scope, receive: Receive, send: Send
) -> None:
auth = scope.get("auth")
if auth is None or "edit" not in auth.scopes:
response = JSONResponse(
{"detail": "Forbidden"},
status_code=403,
)
return await response(scope, receive, send)
return await self.app(scope, receive, send)
mcp_app.add_middleware(RequiresEditMiddleware)
# Add to the top of the routes to avoid conflicts with other routes
app.routes.insert(0, Mount("/mcp", mcp_app))
app.state.mcp = mcp
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_mcp/server/main.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_mcp/server/responses.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Literal, Optional
@dataclass(kw_only=True)
class SuccessResult:
status: Literal["success", "error", "warning"] = "success"
auth_required: bool = False
next_steps: Optional[list[str]] = None
action_url: Optional[str] = None
message: Optional[str] = None
meta: Optional[dict[str, Any]] = None
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_mcp/server/responses.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_mcp/server/test_exceptions.py | """Tests for marimo._mcp.server.exceptions module."""
import json
import pytest
# Skip all MCP tests if Python < 3.10 or MCP not available
pytest.importorskip("mcp", reason="MCP requires Python 3.10+")
from marimo._mcp.server.exceptions import ToolExecutionError
def test_tool_execution_error_basic():
"""Test basic ToolExecutionError creation."""
error = ToolExecutionError("Something went wrong")
assert error.original_message == "Something went wrong"
assert error.code == "TOOL_ERROR"
assert error.status == 400
assert error.is_retryable is False
assert error.suggested_fix is None
assert error.meta == {}
def test_tool_execution_error_with_all_params():
"""Test ToolExecutionError with all parameters."""
meta_data = {"session_id": "test123", "cell_id": "abc"}
error = ToolExecutionError(
"Custom error message",
code="CUSTOM_ERROR",
status=500,
is_retryable=True,
suggested_fix="Try restarting the server",
meta=meta_data,
)
assert error.original_message == "Custom error message"
assert error.code == "CUSTOM_ERROR"
assert error.status == 500
assert error.is_retryable is True
assert error.suggested_fix == "Try restarting the server"
assert error.meta == meta_data
def test_tool_execution_error_structured_message():
"""Test that the structured message contains all error details."""
error = ToolExecutionError(
"Test error",
code="TEST_CODE",
status=422,
is_retryable=True,
suggested_fix="Fix the test",
meta={"key": "value"},
)
# The structured message should be valid JSON
structured_msg = str(error)
parsed = json.loads(structured_msg)
assert parsed["message"] == "Test error"
assert parsed["code"] == "TEST_CODE"
assert parsed["status"] == 422
assert parsed["is_retryable"] is True
assert parsed["suggested_fix"] == "Fix the test"
assert parsed["meta"] == {"key": "value"}
def test_tool_execution_error_to_dict():
"""Test the to_dict method."""
error = ToolExecutionError(
"Dict test",
code="DICT_ERROR",
status=404,
is_retryable=False,
suggested_fix="Check the ID",
meta={"test": True},
)
error_dict = error.to_dict()
expected = {
"code": "DICT_ERROR",
"message": "Dict test",
"status": 404,
"is_retryable": False,
"suggested_fix": "Check the ID",
"meta": {"test": True},
}
assert error_dict == expected
def test_tool_execution_error_inheritance():
"""Test that ToolExecutionError properly inherits from Exception."""
error = ToolExecutionError("Test inheritance")
assert isinstance(error, Exception)
# Should be raisable and catchable
with pytest.raises(ToolExecutionError) as exc_info:
raise error
assert exc_info.value.original_message == "Test inheritance"
def test_tool_execution_error_none_meta():
"""Test that None meta gets converted to empty dict."""
error = ToolExecutionError("Test", meta=None)
assert error.meta == {}
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_mcp/server/test_exceptions.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_mcp/server/test_responses.py | """Tests for marimo._mcp.server.responses module."""
from typing import TypedDict
import pytest
# Skip all MCP tests if Python < 3.10 or MCP not available
pytest.importorskip("mcp", reason="MCP requires Python 3.10+")
from marimo._mcp.server.responses import (
SuccessResult,
)
class SampleData(TypedDict):
message: str
count: int
def test_success_result_basic():
"""Test basic success result creation."""
result = SuccessResult()
assert result.status == "success"
assert result.auth_required is False
assert result.next_steps is None
assert result.action_url is None
assert result.message is None
assert result.meta is None
def test_success_result_with_all_params():
"""Test success result with all optional parameters."""
next_steps = ["Step 1", "Step 2"]
result = SuccessResult(
status="warning",
auth_required=True,
next_steps=next_steps,
action_url="https://example.com",
message="Custom message",
meta={"key": "value"},
)
assert result.status == "warning"
assert result.auth_required is True
assert result.next_steps == next_steps
assert result.action_url == "https://example.com"
assert result.message == "Custom message"
assert result.meta == {"key": "value"}
def test_success_result_with_meta():
"""Test success result with meta data."""
meta_data = {"key": "value", "count": 100}
result = SuccessResult(meta=meta_data)
assert result.meta == meta_data
def test_success_result_type_structure():
"""Test that SuccessResult has the expected structure."""
result = SuccessResult()
# Verify all required fields exist
assert hasattr(result, "status")
assert hasattr(result, "auth_required")
assert hasattr(result, "next_steps")
assert hasattr(result, "action_url")
assert hasattr(result, "message")
assert hasattr(result, "meta")
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_mcp/server/test_responses.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_messaging/msgspec_encoder.py | # Copyright 2026 Marimo. All rights reserved.
"""Msgspec encoder with custom type support for marimo."""
from __future__ import annotations
import collections
import datetime
import decimal
import fractions
import uuid
from math import isnan
from pathlib import PurePath
from typing import Any
import msgspec
import msgspec.json
from marimo import _loggers
from marimo._dependencies.dependencies import DependencyManager
from marimo._plugins.core.media import io_to_data_url
from marimo._utils.methods import getcallable
LOGGER = _loggers.marimo_logger()
def enc_hook(obj: Any) -> Any:
"""Custom encoding hook for marimo types."""
if serialize := getcallable(obj, "_marimo_serialize_"):
return serialize()
if mime := getcallable(obj, "_mime_"):
mimetype, data = mime()
return {"mimetype": mimetype, "data": data}
if isinstance(obj, range):
return list(obj)
if isinstance(
obj,
(complex, fractions.Fraction, decimal.Decimal, PurePath, uuid.UUID),
):
return str(obj)
if DependencyManager.numpy.imported():
import numpy as np
if isinstance(
obj, (np.datetime64, np.timedelta64, np.complexfloating)
):
return str(obj)
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.bool_):
return bool(obj)
if isinstance(obj, (np.bytes_, np.str_)):
return str(obj)
if isinstance(obj, np.ndarray):
if any(
np.issubdtype(obj.dtype, dtype)
for dtype in (
np.datetime64,
np.timedelta64,
np.complexfloating,
)
):
return obj.astype(str).tolist()
return obj.tolist()
if isinstance(obj, np.dtype):
return str(obj)
if DependencyManager.pandas.imported():
import pandas as pd
if isinstance(obj, pd.DataFrame):
return obj.to_dict("records")
if isinstance(obj, pd.Series):
return obj.to_list()
if isinstance(obj, pd.Categorical):
return obj.tolist()
if isinstance(
obj,
(
pd.CategoricalDtype,
pd.Timestamp,
pd.Timedelta,
pd.Interval,
pd.Period,
),
):
return str(obj)
if obj is pd.NaT:
return str(obj)
if isinstance(
obj,
(
pd.TimedeltaIndex,
pd.DatetimeIndex,
pd.IntervalIndex,
pd.PeriodIndex,
),
):
return obj.astype(str).tolist()
if isinstance(obj, pd.MultiIndex):
return obj.to_list()
if isinstance(obj, pd.Index):
return obj.to_list()
# Catch-all for other pandas objects
try:
if isinstance(obj, pd.core.base.PandasObject): # type: ignore
import json
return json.loads(obj.to_json(date_format="iso"))
except AttributeError:
pass
# Handle shapely geometry objects from geopandas
if DependencyManager.geopandas.imported():
try:
# Check if it's a shapely geometry object
# shapely.geometry.base.BaseGeometry is the base class
from shapely.geometry.base import BaseGeometry # type: ignore
if isinstance(obj, BaseGeometry):
# Convert to WKT (Well-Known Text) string representation
return str(obj)
except (ImportError, AttributeError):
pass
if DependencyManager.polars.imported():
import polars as pl
if isinstance(obj, pl.DataFrame):
return obj.to_dict()
if isinstance(obj, pl.LazyFrame):
return obj.collect().to_dict()
if isinstance(obj, pl.Series):
return obj.to_list()
# Handle Polars data types
if hasattr(pl, "datatypes") and hasattr(obj, "__class__"):
# Check if it's a Polars data type
if hasattr(pl.datatypes, "DataType") and isinstance(
obj, pl.datatypes.DataType
):
return str(obj)
# Handle Pillow images
if DependencyManager.pillow.imported():
try:
from PIL import Image
if isinstance(obj, Image.Image):
return io_to_data_url(obj, "image/png")
except Exception:
LOGGER.debug("Unable to convert image to data URL", exc_info=True)
# Handle Matplotlib figures
if DependencyManager.matplotlib.imported():
try:
import matplotlib.figure
from matplotlib.axes import Axes
from marimo._output.formatting import as_html
from marimo._plugins.stateless.flex import vstack
if isinstance(obj, matplotlib.figure.Figure):
html = as_html(vstack([str(obj), obj]))
mimetype, data = html._mime_()
if isinstance(obj, Axes):
html = as_html(vstack([str(obj), obj]))
mimetype, data = html._mime_()
return {"mimetype": mimetype, "data": data}
except Exception:
LOGGER.debug(
"Error converting matplotlib figures to HTML",
exc_info=True,
)
# Handle objects with __slots__
# Check on type(obj) to avoid triggering __getattr__ on objects that
# implement it
slots = getattr(type(obj), "__slots__", None)
if slots is not None:
try:
slots = iter(slots)
except TypeError:
pass # Fall through to __dict__ handling
else:
# Convert to dict using msgspec.to_builtins for proper handling
result = {}
for slot in slots:
if hasattr(obj, slot):
attr_value = getattr(obj, slot)
# Use msgspec.to_builtins which properly handles nested structures
result[slot] = msgspec.to_builtins(
attr_value, enc_hook=enc_hook
)
return result
# Handle custom objects with `__dict__`
if hasattr(obj, "__dict__"):
# Convert the __dict__ using msgspec.to_builtins for proper handling
return msgspec.to_builtins(obj.__dict__, enc_hook=enc_hook)
# Handle collections types
if isinstance(obj, (list, tuple, set, frozenset)):
return list([enc_hook(item) for item in obj])
if isinstance(obj, collections.deque):
return list([enc_hook(item) for item in obj])
# Handle dict and dict-like types
if isinstance(
obj,
(
dict,
collections.defaultdict,
collections.OrderedDict,
collections.Counter,
),
):
return {enc_hook(k): enc_hook(v) for k, v in obj.items()}
# Handle float('inf'), float('nan'), float('-inf')
if isinstance(obj, float):
if obj == float("inf"):
return "Infinity"
if obj == float("-inf"):
return "-Infinity"
if isnan(obj):
return "NaN"
return obj
# Handle bytes objects
if isinstance(obj, memoryview):
obj = obj.tobytes()
if isinstance(obj, bytes):
try:
return obj.decode("utf-8")
except UnicodeDecodeError:
# Fallback to latin1
return obj.decode("latin1")
# Handle primitive types
if isinstance(obj, (int, str, bool)):
return obj
# Handle datetime types
if isinstance(
obj,
(datetime.datetime, datetime.timedelta, datetime.date, datetime.time),
):
return str(obj)
# Handle None
if obj is None:
return None
return repr(obj)
_encoder = msgspec.json.Encoder(enc_hook=enc_hook, decimal_format="number")
def encode_json_bytes(obj: Any) -> bytes:
"""
Encode an object as JSON and return the result as bytes.
"""
return _encoder.encode(obj)
def encode_json_str(obj: Any) -> str:
"""
Encode an object as JSON and return the result as a UTF-8 string.
"""
return _encoder.encode(obj).decode("utf-8")
def asdict(obj: msgspec.Struct) -> dict[str, Any]:
"""
Convert a msgspec.Struct into a dict of builtin Python types.
Uses `msgspec.to_builtins` with `enc_hook` to handle unsupported values.
"""
return msgspec.to_builtins(obj, enc_hook=enc_hook) # type: ignore[no-any-return]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_messaging/msgspec_encoder.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/responses.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
import starlette.responses
from marimo._messaging.msgspec_encoder import encode_json_bytes
if TYPE_CHECKING:
import msgspec
class StructResponse(starlette.responses.Response):
media_type = "application/json"
def __init__(self, struct: msgspec.Struct) -> None:
super().__init__(content=encode_json_bytes(struct))
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/responses.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_messaging/mocks.py | from __future__ import annotations
from typing import Any, Optional
from marimo._messaging.mimetypes import ConsoleMimeType
from marimo._messaging.notification import (
CellNotification,
NotificationMessage,
)
from marimo._messaging.serde import deserialize_kernel_message
from marimo._messaging.types import KernelMessage, Stderr, Stream
class MockStream(Stream):
def __init__(self, stream: Optional[Stream] = None) -> None:
self.messages: list[KernelMessage] = []
if stream is not None and hasattr(stream, "messages"):
self.messages = stream.messages
def write(self, data: KernelMessage) -> None:
self.messages.append(data)
# Attempt to deserialize the message to ensure it is valid
deserialize_kernel_message(data)
@property
def operations(self) -> list[dict[str, Any]]:
import json
return [json.loads(op_data) for op_data in self.messages]
@property
def parsed_operations(self) -> list[NotificationMessage]:
return [
deserialize_kernel_message(op_data) for op_data in self.messages
]
@property
def cell_notifications(self) -> list[CellNotification]:
return [
op
for op in self.parsed_operations
if isinstance(op, CellNotification)
]
class MockStderr(Stderr):
def __init__(self, stream: Optional[Stderr] = None) -> None:
self.messages: list[str] = []
if stream is not None and hasattr(stream, "messages"):
self.messages = stream.messages
def _write_with_mimetype(
self, data: str, mimetype: ConsoleMimeType
) -> int:
del mimetype
self.messages.append(data)
return len(data)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_messaging/mocks.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_server/ai/tools/types.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, Literal, Optional, TypeVar
from marimo._config.config import CopilotMode
# Type aliases for tool system
FunctionArgs = dict[str, Any]
ValidationFunction = Callable[[FunctionArgs], Optional[tuple[bool, str]]]
ToolSource = Literal["mcp", "backend", "frontend"]
@dataclass
class ToolDefinition:
"""Tool definition compatible with ai-sdk-ui format."""
name: str
description: str
parameters: dict[str, Any]
source: ToolSource
mode: list[CopilotMode] # tools can be available in multiple modes
def __str__(self) -> str:
return f"Tool(name={self.name}, description={self.description})"
@dataclass
class ToolCallResult:
"""Represents the result of a tool invocation."""
tool_name: str
result: Any
error: Optional[str] = None
T = TypeVar("T")
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/ai/tools/types.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ast/codegen_data/test_get_alias_import.py |
import marimo as mo
__generated_with = "0.14.11"
app = mo.App()
@app.cell
def one():
x: int = 0
return (x,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/codegen_data/test_get_alias_import.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/ipython/img_mimebundle.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
from io import BytesIO
from PIL import Image, ImageDraw
class BlueOnGray:
def __init__(self, text, retina=False):
self.text = text
self.retina = retina
def _repr_mimebundle_(self, include=None, exclude=None):
w, h = 200, 80
f = 2 if self.retina else 1
img = Image.new("RGB", (w, h), color="lightgray")
metadata = {"image/png": {"width": w // f, "height": h // f}}
draw = ImageDraw.Draw(img)
draw.text((10, 30), self.text, fill="blue")
buffer = BytesIO()
img.save(buffer, format="PNG")
return {
"image/png": buffer.getvalue(),
"text/html": f"<span style='color: blue; background-color: gray'>{self.text}</span>",
}, metadata
BlueOnGray("Blue text on gray background")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/ipython/img_mimebundle.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_utils/timer.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import functools
import time
from typing import Any, Callable
def timer(func: Callable[..., Any]) -> Callable[..., Any]:
"""
A decorator that measures and prints the execution time of a function.
This should only be used for manual debugging.
"""
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter()
execution_time = end_time - start_time
print(f"{func.__name__} took {execution_time:.4f} seconds to execute") # noqa: T201
return result
return wrapper
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/timer.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_server/codes.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from enum import IntEnum
class WebSocketCodes(IntEnum):
ALREADY_CONNECTED = 1003
NORMAL_CLOSE = 1000
FORBIDDEN = 1008
UNAUTHORIZED = 3000
UNEXPECTED_ERROR = 1011
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/codes.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_utils/cell_matching.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from marimo._types.ids import CellId_t
if TYPE_CHECKING:
from collections.abc import Sequence
def similarity_score(s1: str, s2: str) -> float:
"""Fast similarity score based on common prefix and suffix.
Returns lower score for more similar strings."""
# Find common prefix length
prefix_len = 0
for c1, c2 in zip(s1, s2):
if c1 != c2:
break
prefix_len += 1
# Find common suffix length if strings differ in middle
if prefix_len < min(len(s1), len(s2)):
s1_rev = s1[::-1]
s2_rev = s2[::-1]
suffix_len = 0
for c1, c2 in zip(s1_rev, s2_rev):
if c1 != c2:
break
suffix_len += 1
else:
suffix_len = 0
# Return inverse similarity - shorter common affix means higher score
return len(s1) + len(s2) - 2.0 * (prefix_len + suffix_len)
def group_lookup(
ids: Sequence[CellId_t], codes: Sequence[str]
) -> dict[str, list[tuple[int, CellId_t]]]:
lookup: dict[str, list[tuple[int, CellId_t]]] = {}
for idx, (cell_id, code) in enumerate(zip(ids, codes)):
lookup.setdefault(code, []).append((idx, cell_id))
return lookup
def extract_order(
codes: list[str], lookup: dict[str, list[tuple[int, CellId_t]]]
) -> list[list[int]]:
offset = 0
order: list[list[int]] = [[]] * len(codes)
for i, code in enumerate(codes):
dupes = len(lookup[code])
order[i] = [offset + j for j in range(dupes)]
offset += dupes
return order
def get_unique(
codes: Sequence[str], available: dict[str, list[tuple[int, CellId_t]]]
) -> list[str]:
# Order matters, required opposed to using set()
seen = set(codes) - set(available.keys())
unique_codes = []
for code in codes:
if code not in seen:
seen.add(code)
unique_codes.append(code)
return unique_codes
def pop_local(available: list[tuple[int, CellId_t]], idx: int) -> CellId_t:
"""Find and pop the index that is closest to idx"""
# NB. by min implementation a preference is given to the lower index when equidistant
best_idx = min(
range(len(available)), key=lambda i: abs(available[i][0] - idx)
)
return available.pop(best_idx)[1]
def _hungarian_algorithm(scores: list[list[float]]) -> list[int]:
"""Implements the Hungarian algorithm to find the best matching.
In general this class of problem is known as the assignment problem and is
pretty well studied. This is a textbook implementation to avoid additional
dependencies. Links:
- https://en.wikipedia.org/wiki/Hungarian_algorithm
"""
score_matrix = [row[:] for row in scores]
n = len(score_matrix)
# Step 1: Subtract row minima
for i in range(n):
min_value = min(score_matrix[i])
for j in range(n):
score_matrix[i][j] -= min_value
# Step 2: Subtract column minima
for j in range(n):
min_value = min(score_matrix[i][j] for i in range(n))
for i in range(n):
score_matrix[i][j] -= min_value
# Step 3: Find initial assignment
row_assignment = [-1] * n
col_assignment = [-1] * n
# Find independent zeros
for i in range(n):
for j in range(n):
if (
score_matrix[i][j] == 0
and row_assignment[i] == -1
and col_assignment[j] == -1
):
row_assignment[i] = j
col_assignment[j] = i
# Step 4: Improve assignment iteratively
while True:
assigned_count = sum(1 for x in row_assignment if x != -1)
if assigned_count == n:
break
# Find minimum uncovered value
min_uncovered = float("inf")
for i in range(n):
for j in range(n):
if row_assignment[i] == -1 and col_assignment[j] == -1:
min_uncovered = min(min_uncovered, score_matrix[i][j])
if min_uncovered == float("inf"):
break
# Update matrix
for i in range(n):
for j in range(n):
if row_assignment[i] == -1 and col_assignment[j] == -1:
score_matrix[i][j] -= min_uncovered
elif row_assignment[i] != -1 and col_assignment[j] != -1:
score_matrix[i][j] += min_uncovered
# Try to find new assignments
for i in range(n):
if row_assignment[i] == -1:
for j in range(n):
if score_matrix[i][j] == 0 and col_assignment[j] == -1:
row_assignment[i] = j
col_assignment[j] = i
break
# Convert to result format
result = [-1] * n
for i in range(n):
if row_assignment[i] != -1:
result[row_assignment[i]] = i
return result
def _match_cell_ids_by_similarity(
prev_ids: Sequence[CellId_t],
prev_codes: Sequence[str],
next_ids: Sequence[CellId_t],
next_codes: Sequence[str],
) -> list[CellId_t]:
"""Match cell IDs based on code similarity."""
assert len(prev_codes) == len(prev_ids)
assert len(next_codes) == len(next_ids)
# ids that are not in prev_ids but in next_ids
id_pool = set(next_ids) - set(prev_ids)
def get_next_available_id(idx: int) -> CellId_t:
cell_id = next_ids[idx]
# Use the id from the pool if available
if cell_id in id_pool:
id_pool.remove(cell_id)
elif id_pool:
# Otherwise just use the next available id
cell_id = id_pool.pop()
else:
# If no ids are available, we could generate a new one
# but this should never run.
raise RuntimeError(
"No available IDs left to assign. This should not happen."
)
return cell_id
def filter_and_backfill() -> list[CellId_t]:
for idx, _ in enumerate(next_ids):
if result[idx] is None:
# If we have a None, we need to fill it with an available ID
result[idx] = get_next_available_id(idx)
# Only needed to appease the type checker. We just filled all None
# values.
return [_id for _id in result if _id is not None]
# Hash matching to capture permutations
# covers next is a subset of prev (i.e. next - prev == {})
previous_lookup = group_lookup(prev_ids, prev_codes)
next_lookup = group_lookup(next_ids, next_codes)
result: list[Optional[CellId_t]] = [None] * len(next_codes)
filled = 0
for idx, code in enumerate(next_codes):
if code in previous_lookup:
# If we have an exact match, use it
filled += 1
result[idx] = pop_local(previous_lookup[code], idx)
if not previous_lookup[code]:
del previous_lookup[code]
# Clean up the next_lookup match too.
if code in next_lookup:
pop_local(next_lookup[code], idx)
if not next_lookup[code]:
del next_lookup[code]
# If we filled all positions, return the result
# or if prev is a subset of next, then prev has been dequeued and emptied,
# we can just backfill and return.
if filled == len(next_codes) or not previous_lookup:
return filter_and_backfill()
# The remaining case is (next - prev) is not empty.
# Establish specific order of remaining unique codes so we can match them
added_code = get_unique(next_codes, next_lookup)
deleted_code = get_unique(prev_codes, previous_lookup)
# Build order mappings for the Hungarian algorithm
next_order = extract_order(added_code, next_lookup)
prev_order = extract_order(deleted_code, previous_lookup)
# grab indices for lookup
next_inverse = {code: i for i, code in enumerate(added_code)}
# and inverse mapping for prev
inverse_order = {
idx: i for i, idxs in enumerate(prev_order) for idx in idxs
}
# Pad the scores matrix to ensure it is square
n = max(len(next_codes) - filled, len(prev_codes) - filled)
scores = [[0.0] * n for _ in range(n)]
# Fill matrix, accounting for dupes
for i, code in enumerate(added_code):
for j, prev_code in enumerate(deleted_code):
score = similarity_score(prev_code, code)
for x in next_order[i]:
for y in prev_order[j]:
# NB. transposed indices for Hungarian
scores[y][x] = score
# Use Hungarian algorithm to find the best matching
matches = _hungarian_algorithm(scores)
for idx, code in enumerate(next_codes):
if result[idx] is None:
match_idx = next_order[next_inverse[code]].pop(0)
if match_idx != -1 and matches[match_idx] in inverse_order:
prev_idx = inverse_order[matches[match_idx]]
prev_code = deleted_code[prev_idx]
result[idx] = pop_local(previous_lookup[prev_code], idx)
return filter_and_backfill()
def match_cell_ids_by_similarity(
prev_data: dict[CellId_t, str], next_data: dict[CellId_t, str]
) -> dict[CellId_t, CellId_t]:
"""Match cell IDs based on code similarity.
NB. There is similar code in the front end that matches session results to
cells, but there are a few caveats for why the logic is different:
- Session matching is inherent order dependent. If the order is wrong,
there is no match. Moreover, the code must be an exact match for a
session to be paired.
- Cell matching in this context is not order dependent, we assume the
notebook can be totally scrambled and we still want to match. Lose cell
matching is also allowed.
As such, in the frontend case a Lavenshtein edit is used to match cells to
session results based on code.
While here we can naively use a direct match, and non-matching cells are still
attempted to match based on some similarity metric.
Args:
prev_data: Mapping of previous cell IDs to code
next_data: Mapping of next cell IDs to code
Returns:
A map of old ids to new ids, using prev_ids where possible
"""
prev_ids, prev_codes = zip(*prev_data.items())
next_ids, next_codes = zip(*next_data.items())
sorted_ids = _match_cell_ids_by_similarity(
prev_ids,
prev_codes,
next_ids,
next_codes,
)
return dict(zip(sorted_ids, next_ids))
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/cell_matching.py",
"license": "Apache License 2.0",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_utils/xdg.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
from pathlib import Path
def home_path() -> Path:
"""Get home directory or temp directory if home directory is not available.
Returns:
Path: The home directory.
"""
try:
return Path.home().resolve()
except RuntimeError:
# Can't get home directory, so use temp directory
return Path("/tmp")
def xdg_config_home() -> Path:
"""Get XDG config home directory.
Returns $XDG_CONFIG_HOME if set and non-empty, otherwise ~/.config
"""
xdg_config_home_env = os.getenv("XDG_CONFIG_HOME")
if xdg_config_home_env and xdg_config_home_env.strip():
return Path(xdg_config_home_env)
return home_path() / ".config"
def xdg_cache_home() -> Path:
"""Get XDG cache home directory.
Returns $XDG_CACHE_HOME if set and non-empty, otherwise ~/.cache
"""
xdg_cache_home_env = os.getenv("XDG_CACHE_HOME")
if xdg_cache_home_env and xdg_cache_home_env.strip():
return Path(xdg_cache_home_env)
return home_path() / ".cache"
def xdg_state_home() -> Path:
"""Get XDG state home directory.
Returns $XDG_STATE_HOME if set and non-empty, otherwise ~/.local/state
"""
if os.name == "posix":
xdg_state_home_env = os.getenv("XDG_STATE_HOME")
if xdg_state_home_env and xdg_state_home_env.strip():
return Path(xdg_state_home_env)
return home_path() / ".local" / "state"
else:
return home_path()
def marimo_config_path() -> Path:
"""Get marimo config file path using XDG specification.
$XDG_CONFIG_HOME/marimo/marimo.toml if set, otherwise ~/.config/marimo/marimo.toml
"""
return xdg_config_home() / "marimo" / "marimo.toml"
def marimo_cache_dir() -> Path:
"""Get marimo cache directory using XDG specification.
$XDG_CACHE_HOME/marimo if set, otherwise ~/.cache/marimo
"""
return xdg_cache_home() / "marimo"
def marimo_state_dir() -> Path:
"""Get marimo state directory using XDG specification.
On Linux/macOS/Unix, returns:
$XDG_STATE_HOME/marimo if set, otherwise ~/.local/state/marimo
On Windows, returns:
~/.marimo
"""
if os.name == "posix":
return xdg_state_home() / "marimo"
else:
return home_path() / ".marimo"
def marimo_log_dir() -> Path:
"""Get marimo log directory using XDG specification.
$XDG_CACHE_HOME/marimo/logs if set, otherwise ~/.cache/marimo/logs
"""
return marimo_cache_dir() / "logs"
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/xdg.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:tests/_utils/test_xdg.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest
from marimo._utils.platform import is_windows
from marimo._utils.xdg import (
home_path,
marimo_cache_dir,
marimo_config_path,
marimo_log_dir,
marimo_state_dir,
xdg_cache_home,
xdg_config_home,
xdg_state_home,
)
class TestHomePathFunction:
"""Test home_path function behavior."""
@patch("pathlib.Path.home")
def test_home_path_normal(self, mock_home) -> None:
"""Test home_path returns home directory when available."""
expected_home = Path("/home/user")
mock_home.return_value = expected_home
result = home_path()
assert result == expected_home.resolve()
@patch("pathlib.Path.home")
def test_home_path_runtime_error_fallback(self, mock_home) -> None:
"""Test home_path falls back to /tmp on RuntimeError."""
mock_home.side_effect = RuntimeError("Unable to get home directory")
result = home_path()
assert result == Path("/tmp")
class TestXDGBasicFunctions:
"""Test basic XDG directory functions."""
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {}, clear=True)
def test_xdg_config_home_default(self, mock_home_path) -> None:
"""Test default XDG config home returns ~/.config."""
mock_home_path.return_value = Path("/home/user")
result = xdg_config_home()
assert result == Path("/home/user/.config")
@patch.dict(os.environ, {"XDG_CONFIG_HOME": "/custom/config"})
def test_xdg_config_home_env_set(self) -> None:
"""Test XDG config home respects XDG_CONFIG_HOME environment variable."""
result = xdg_config_home()
assert result == Path("/custom/config")
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {}, clear=True)
def test_xdg_cache_home_default(self, mock_home_path) -> None:
"""Test default XDG cache home returns ~/.cache."""
mock_home_path.return_value = Path("/home/user")
result = xdg_cache_home()
assert result == Path("/home/user/.cache")
@patch.dict(os.environ, {"XDG_CACHE_HOME": "/custom/cache"})
def test_xdg_cache_home_env_set(self) -> None:
"""Test XDG cache home respects XDG_CACHE_HOME environment variable."""
result = xdg_cache_home()
assert result == Path("/custom/cache")
@pytest.mark.skipif(is_windows(), reason="POSIX-specific test")
@patch("os.name", "posix")
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {}, clear=True)
def test_xdg_state_home_posix_default(self, mock_home_path) -> None:
"""Test default XDG state home on POSIX systems."""
mock_home_path.return_value = Path("/home/user")
result = xdg_state_home()
assert result == Path("/home/user/.local/state")
@pytest.mark.skipif(is_windows(), reason="POSIX-specific test")
@patch("os.name", "posix")
@patch.dict(os.environ, {"XDG_STATE_HOME": "/custom/state"})
def test_xdg_state_home_posix_env_set(self) -> None:
"""Test XDG state home on POSIX respects XDG_STATE_HOME environment variable."""
result = xdg_state_home()
assert result == Path("/custom/state")
@pytest.mark.skipif(not is_windows(), reason="Windows-specific test")
def test_xdg_state_home_non_posix(self) -> None:
"""Test XDG state home on non-POSIX systems returns home directory."""
with patch("marimo._utils.xdg.home_path") as mock_home_path:
mock_home_path.return_value = Path.home()
result = xdg_state_home()
# On Windows, should return home directory directly
assert result == Path.home()
class TestMarimoSpecificFunctions:
"""Test marimo-specific XDG functions."""
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {}, clear=True)
def test_marimo_config_path_default(self, mock_home_path) -> None:
"""Test marimo config path with default XDG config home."""
mock_home_path.return_value = Path("/home/user")
result = marimo_config_path()
assert result == Path("/home/user/.config/marimo/marimo.toml")
@patch.dict(os.environ, {"XDG_CONFIG_HOME": "/custom/config"})
def test_marimo_config_path_custom_xdg(self) -> None:
"""Test marimo config path with custom XDG_CONFIG_HOME."""
result = marimo_config_path()
assert result == Path("/custom/config/marimo/marimo.toml")
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {}, clear=True)
def test_marimo_cache_dir_default(self, mock_home_path) -> None:
"""Test marimo cache directory with default XDG cache home."""
mock_home_path.return_value = Path("/home/user")
result = marimo_cache_dir()
assert result == Path("/home/user/.cache/marimo")
@patch.dict(os.environ, {"XDG_CACHE_HOME": "/custom/cache"})
def test_marimo_cache_dir_custom_xdg(self) -> None:
"""Test marimo cache directory with custom XDG_CACHE_HOME."""
result = marimo_cache_dir()
assert result == Path("/custom/cache/marimo")
@pytest.mark.skipif(is_windows(), reason="POSIX-specific test")
@patch("os.name", "posix")
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {}, clear=True)
def test_marimo_state_dir_posix_default(self, mock_home_path) -> None:
"""Test marimo state directory on POSIX with default XDG state home."""
mock_home_path.return_value = Path("/home/user")
result = marimo_state_dir()
assert result == Path("/home/user/.local/state/marimo")
@pytest.mark.skipif(is_windows(), reason="POSIX-specific test")
@patch("os.name", "posix")
@patch.dict(os.environ, {"XDG_STATE_HOME": "/custom/state"})
def test_marimo_state_dir_posix_custom_xdg(self) -> None:
"""Test marimo state directory on POSIX with custom XDG_STATE_HOME."""
result = marimo_state_dir()
assert result == Path("/custom/state/marimo")
@pytest.mark.skipif(not is_windows(), reason="Windows-specific test")
def test_marimo_state_dir_non_posix(self) -> None:
"""Test marimo state directory on non-POSIX systems."""
with patch("marimo._utils.xdg.home_path") as mock_home_path:
mock_home_path.return_value = Path.home()
result = marimo_state_dir()
# On Windows, should return home/.marimo
assert result == Path.home() / ".marimo"
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {}, clear=True)
def test_marimo_log_dir_default(self, mock_home_path) -> None:
"""Test marimo log directory with default XDG cache home."""
mock_home_path.return_value = Path("/home/user")
result = marimo_log_dir()
assert result == Path("/home/user/.cache/marimo/logs")
@patch.dict(os.environ, {"XDG_CACHE_HOME": "/custom/cache"})
def test_marimo_log_dir_custom_xdg(self) -> None:
"""Test marimo log directory with custom XDG_CACHE_HOME."""
result = marimo_log_dir()
assert result == Path("/custom/cache/marimo/logs")
class TestEnvironmentVariableHandling:
"""Test environment variable handling edge cases."""
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {"XDG_CONFIG_HOME": ""})
def test_empty_xdg_config_home(self, mock_home_path) -> None:
"""Test behavior with empty XDG_CONFIG_HOME."""
mock_home_path.return_value = Path("/home/user")
result = xdg_config_home()
assert result == Path("/home/user/.config")
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {"XDG_CACHE_HOME": ""})
def test_empty_xdg_cache_home(self, mock_home_path) -> None:
"""Test behavior with empty XDG_CACHE_HOME."""
mock_home_path.return_value = Path("/home/user")
result = xdg_cache_home()
assert result == Path("/home/user/.cache")
@pytest.mark.skipif(is_windows(), reason="POSIX-specific test")
@patch("os.name", "posix")
@patch("marimo._utils.xdg.home_path")
@patch.dict(os.environ, {"XDG_STATE_HOME": ""})
def test_empty_xdg_state_home_posix(self, mock_home_path) -> None:
"""Test behavior with empty XDG_STATE_HOME on POSIX."""
mock_home_path.return_value = Path("/home/user")
result = xdg_state_home()
assert result == Path("/home/user/.local/state")
@patch.dict(os.environ, {"XDG_CONFIG_HOME": "relative/path"})
def test_relative_paths_in_env_vars(self) -> None:
"""Test behavior with relative paths in environment variables."""
result = xdg_config_home()
assert result == Path("relative/path")
@patch.dict(os.environ, {"XDG_CONFIG_HOME": " /path/with/spaces "})
def test_whitespace_in_env_vars(self) -> None:
"""Test behavior with whitespace in environment variables."""
result = xdg_config_home()
assert result == Path(" /path/with/spaces ")
class TestIntegration:
"""Integration tests using temporary directories."""
def test_with_temp_directories(self) -> None:
"""Test XDG functions work with real temporary directories."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
config_dir = temp_path / "config"
cache_dir = temp_path / "cache"
state_dir = temp_path / "state"
# Create directories
config_dir.mkdir()
cache_dir.mkdir()
state_dir.mkdir()
with patch.dict(
os.environ,
{
"XDG_CONFIG_HOME": str(config_dir),
"XDG_CACHE_HOME": str(cache_dir),
"XDG_STATE_HOME": str(state_dir),
},
):
# Test basic functions
assert xdg_config_home() == config_dir
assert xdg_cache_home() == cache_dir
if not is_windows():
with patch("os.name", "posix"):
assert xdg_state_home() == state_dir
# Test marimo functions
assert (
marimo_config_path()
== config_dir / "marimo" / "marimo.toml"
)
assert marimo_cache_dir() == cache_dir / "marimo"
assert marimo_log_dir() == cache_dir / "marimo" / "logs"
if not is_windows():
with patch("os.name", "posix"):
assert marimo_state_dir() == state_dir / "marimo"
@patch.dict(os.environ, {"XDG_CONFIG_HOME": "/test/config"})
def test_path_composition(self) -> None:
"""Test that Path objects compose correctly."""
config_path = marimo_config_path()
# Test that we can use Path methods
assert config_path.parent == Path("/test/config/marimo")
assert config_path.name == "marimo.toml"
assert config_path.suffix == ".toml"
# Test that we can compose new paths
backup_path = config_path.with_suffix(".toml.bak")
assert backup_path == Path("/test/config/marimo/marimo.toml.bak")
@patch.dict(os.environ, {})
def test_return_types(self) -> None:
"""Test that all functions return the correct types."""
# Basic XDG functions should return Path objects
assert isinstance(xdg_config_home(), Path)
assert isinstance(xdg_cache_home(), Path)
assert isinstance(xdg_state_home(), Path)
# Marimo functions should return Path objects
assert isinstance(marimo_config_path(), Path)
assert isinstance(marimo_cache_dir(), Path)
assert isinstance(marimo_state_dir(), Path)
assert isinstance(marimo_log_dir(), Path)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_xdg.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_runtime/test_dataflow_cases.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from functools import partial
from typing import TYPE_CHECKING, Optional, Union
import pytest
from marimo._ast import compiler
from marimo._dependencies.dependencies import DependencyManager
from marimo._runtime import dataflow
from marimo._types.ids import CellId_t
parse_cell = partial(compiler.compile_cell, cell_id=CellId_t("0"))
HAS_DUCKDB = DependencyManager.duckdb.has()
if TYPE_CHECKING:
from collections.abc import Iterable
class KnownFailure(Exception):
"""An expected failure in a test case."""
@dataclass
class GraphTestCase:
"""A test case for dataflow graph operations."""
# Test description
name: str
# If enabled
# Code to create and register
code: dict[str, str]
# Expected graph structure
expected_parents: Optional[dict[str, Iterable[str]]] = None
expected_children: Optional[dict[str, Iterable[str]]] = None
expected_stale: Optional[Iterable[str]] = None
# Expected refs/defs
expected_refs: Optional[dict[str, Iterable[str]]] = None
expected_defs: Optional[dict[str, Iterable[str]]] = None
enabled: bool = True
xfail: Union[bool, str] = False
def __post_init__(self) -> None:
# Convert all to a []
if self.expected_parents is not None:
self.expected_parents = {
cell_id: set(parents)
for cell_id, parents in self.expected_parents.items()
}
if self.expected_children is not None:
self.expected_children = {
cell_id: set(children)
for cell_id, children in self.expected_children.items()
}
if self.expected_stale is not None:
self.expected_stale = set(self.expected_stale)
if self.expected_refs is not None:
self.expected_refs = {
cell_id: set(refs)
for cell_id, refs in self.expected_refs.items()
}
if self.expected_defs is not None:
self.expected_defs = {
cell_id: set(defs)
for cell_id, defs in self.expected_defs.items()
}
PYTHON_CASES = [
# Basic Python Cases
GraphTestCase(
name="single node",
code={"0": "x = 0"},
expected_parents={"0": []},
expected_children={"0": []},
expected_refs={"0": []},
expected_defs={"0": ["x"]},
),
GraphTestCase(
name="chain",
code={"0": "x = 0", "1": "y = x", "2": "z = y\nzz = x"},
expected_parents={"0": [], "1": ["0"], "2": ["0", "1"]},
expected_children={"0": ["1", "2"], "1": ["2"], "2": []},
expected_refs={"0": [], "1": ["x"], "2": ["x", "y"]},
expected_defs={
"0": ["x"],
"1": ["y"],
"2": ["z", "zz"],
},
),
GraphTestCase(
name="cycle",
code={"0": "x = y", "1": "y = x"},
expected_parents={"0": ["1"], "1": ["0"]},
expected_children={"0": ["1"], "1": ["0"]},
expected_refs={"0": ["y"], "1": ["x"]},
expected_defs={"0": ["x"], "1": ["y"]},
),
GraphTestCase(
name="diamond",
code={
"0": "x = 0",
"1": "y = x",
"2": "z = y\nzz = x",
"3": "a = z",
},
expected_parents={
"0": [],
"1": ["0"],
"2": ["0", "1"],
"3": ["2"],
},
expected_children={
"0": ["1", "2"],
"1": ["2"],
"2": ["3"],
"3": [],
},
expected_refs={
"0": [],
"1": ["x"],
"2": ["x", "y"],
"3": ["z"],
},
expected_defs={
"0": ["x"],
"1": ["y"],
"2": ["z", "zz"],
"3": ["a"],
},
),
GraphTestCase(
name="variable del",
code={"0": "x = 0", "1": "y = x", "2": "del x"},
expected_parents={"0": [], "1": ["0"], "2": ["0", "1"]},
expected_children={"0": ["1", "2"], "1": ["2"], "2": []},
expected_refs={"0": [], "1": ["x"], "2": ["x"]},
expected_defs={
"0": ["x"],
"1": ["y"],
"2": [],
},
),
]
SQL_CASES = [
GraphTestCase(
name="python -> sql",
enabled=HAS_DUCKDB,
code={
"0": "df = pd.read_csv('data.csv')",
"1": "result = mo.sql(f'FROM df WHERE name = {name}')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={"0": ["pd"], "1": ["df", "mo", "name"]},
expected_defs={"0": ["df"], "1": ["result"]},
),
GraphTestCase(
name="sql -> python via output",
enabled=HAS_DUCKDB,
code={
"0": "result = mo.sql(f'FROM my_table WHERE name = {name}')",
"1": "df = result.head()",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={"0": ["mo", "name", "my_table"], "1": ["result"]},
expected_defs={"0": ["result"], "1": ["df"]},
),
GraphTestCase(
name="sql -/> python when creating a table",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE my_table (name STRING)')",
"1": "my_table = df.head()",
},
expected_parents={"0": [], "1": []},
expected_children={"0": [], "1": []},
expected_refs={"0": ["mo"], "1": ["df"]},
expected_defs={"0": ["my_table"], "1": ["my_table"]},
),
GraphTestCase(
name="sql redefinition",
enabled=HAS_DUCKDB,
code={
"0": "df = pd.read_csv('data.csv')",
"1": "df = mo.sql(f'FROM df')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={"0": ["pd"], "1": ["df", "mo"]},
expected_defs={"0": ["df"], "1": ["df"]},
),
GraphTestCase(
name="sql should not reference python variables when schema",
enabled=HAS_DUCKDB,
code={
"0": "df = pd.read_csv('data.csv')",
"1": "result = mo.sql(f'FROM my_schema.df')",
},
expected_parents={"0": [], "1": []},
expected_children={"0": [], "1": []},
expected_refs={"0": ["pd"], "1": ["mo", "my_schema.df"]},
expected_defs={"0": ["df"], "1": ["result"]},
),
GraphTestCase(
name="sql should not reference python variables when schema",
enabled=HAS_DUCKDB,
code={
"0": "my_schema = 100",
"1": "_ = mo.sql(f'FROM my_schema.df')",
},
expected_parents={"0": [], "1": []},
expected_children={"0": [], "1": []},
expected_refs={"0": [], "1": ["mo", "my_schema.df"]},
expected_defs={"0": ["my_schema"], "1": []},
),
GraphTestCase(
name="sql should not reference python variables when catalog",
enabled=HAS_DUCKDB,
code={
"0": "my_catalog = 100",
"1": "_ = mo.sql(f'FROM my_catalog.my_schema.df')",
},
expected_parents={"0": [], "1": []},
expected_children={"0": [], "1": []},
expected_refs={"0": [], "1": ["mo", "my_catalog.my_schema.df"]},
expected_defs={"0": ["my_catalog"], "1": []},
),
GraphTestCase(
name="sql table reference resolves to table name even if created with schema",
enabled=HAS_DUCKDB,
code={
"0": "_df = mo.sql(f'CREATE TABLE my_schema.my_table (name STRING)')",
"1": "_df = mo.sql(f'FROM my_table SELECT *')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={"0": ["mo"], "1": ["my_table", "mo"]},
expected_defs={"0": ["my_table"], "1": []},
),
GraphTestCase(
name="sql table reference resolves to table name even if created with catalog and schema",
enabled=HAS_DUCKDB,
code={
"0": "_df = mo.sql(f'CREATE TABLE my_catalog.my_schema.my_table (name STRING)')",
"1": "_df = mo.sql(f'FROM my_table SELECT *')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={"0": ["mo"], "1": ["my_table", "mo"]},
expected_defs={"0": ["my_table"], "1": []},
),
GraphTestCase(
name="sql table created from another table reference",
enabled=HAS_DUCKDB,
code={
"0": "_df = mo.sql(f'CREATE TABLE schema_one.my_table (name STRING)')",
"1": "_df = mo.sql(f'CREATE TABLE schema_two.my_table_two AS SELECT * FROM schema_one.my_table')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={"0": ["mo"], "1": ["mo", "schema_one.my_table"]},
expected_defs={"0": ["my_table"], "1": ["my_table_two"]},
),
GraphTestCase(
name="sql table reference with catalog and schema",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE my_catalog.my_schema.my_table (name STRING)')",
"1": "_ = mo.sql(f'FROM my_catalog.my_schema.my_table SELECT *')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={
"0": ["mo"],
"1": ["my_catalog.my_schema.my_table", "mo"],
},
expected_defs={"0": ["my_table"], "1": []},
),
GraphTestCase(
name="different schemas with same table name",
enabled=HAS_DUCKDB,
code={
"0": "_df = mo.sql(f'CREATE TABLE schema_one.my_table (name STRING)')",
"1": "_df = mo.sql(f'CREATE TABLE schema_two.my_table (name STRING)')",
"2": "_df = mo.sql(f'FROM schema_one.my_table SELECT *')",
},
expected_parents={"0": [], "1": [], "2": ["0"]},
expected_children={"0": ["2"], "1": [], "2": []},
expected_refs={
"0": ["mo"],
"1": ["mo"],
"2": ["mo", "schema_one.my_table"],
},
expected_defs={"0": ["my_table"], "1": ["my_table"], "2": []},
),
GraphTestCase(
name="sql definitions with same name as qualified schema and table",
enabled=HAS_DUCKDB,
code={
"0": "my_table = mo.sql(f'CREATE TABLE schema_one.my_table (name STRING)')",
"1": "schema_one = mo.sql(f'CREATE TABLE schema_one.my_table (name STRING)')",
"2": "my_table",
"3": "schema_one",
},
expected_parents={"0": [], "1": [], "2": ["0"], "3": ["1"]},
expected_children={"0": ["2"], "1": ["3"], "2": [], "3": []},
expected_refs={
"0": ["mo"],
"1": ["mo"],
"2": ["my_table"],
"3": ["schema_one"],
},
expected_defs={
"0": ["my_table"],
"1": ["my_table", "schema_one"],
},
),
GraphTestCase(
name="sql catalog and schema with same name",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE my_db.my_db.my_table (name STRING)')",
"1": "_ = mo.sql(f'FROM my_db.my_db.my_table SELECT *')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "my_db.my_db.my_table"],
},
expected_defs={"0": ["my_table"], "1": []},
),
GraphTestCase(
name="sql view creation with schema reference",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE my_schema.base_table (id INT)')",
"1": "_ = mo.sql(f'CREATE VIEW my_schema.my_view AS SELECT * FROM my_schema.base_table')",
"2": "_ = mo.sql(f'FROM my_schema.my_view SELECT *')",
},
expected_parents={"0": [], "1": ["0"], "2": ["1"]},
expected_children={"0": ["1"], "1": ["2"], "2": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "my_schema.base_table"],
"2": ["mo", "my_schema.my_view"],
},
expected_defs={"0": ["base_table"], "1": ["my_view"], "2": []},
),
GraphTestCase(
name="sql case insensitive schema matching",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE MY_SCHEMA.my_table (name STRING)')",
"1": "_ = mo.sql(f'FROM my_schema.my_table SELECT *')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "my_schema.my_table"],
},
expected_defs={"0": ["my_table"], "1": []},
),
GraphTestCase(
name="sql no reference to python variable when using catalog",
enabled=HAS_DUCKDB,
code={
"0": "my_catalog_var = 'test_catalog'",
"1": "_ = mo.sql(f'FROM my_catalog_var.schema.table SELECT *')",
},
expected_parents={"0": [], "1": []},
expected_children={"0": [], "1": []},
expected_refs={
"0": [],
"1": ["mo", "my_catalog_var.schema.table"],
},
expected_defs={"0": ["my_catalog_var"], "1": []},
),
GraphTestCase(
name="sql no reference to python variable when using schema",
enabled=HAS_DUCKDB,
code={
"0": "my_schema_var = 'test_schema'",
"1": "_ = mo.sql(f'FROM my_schema_var.table SELECT *')",
},
expected_parents={"0": [], "1": []},
expected_children={"0": [], "1": []},
expected_refs={
"0": [],
"1": ["mo", "my_schema_var.table"],
},
expected_defs={"0": ["my_schema_var"], "1": []},
),
GraphTestCase(
name="sql catalog.schema.table requires both catalog and schema to match",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE catalog_one.schema_one.my_table (name STRING)')",
"1": "_ = mo.sql(f'FROM catalog_one.schema_one.my_table SELECT *')",
"2": "_ = mo.sql(f'FROM catalog_two.schema_one.my_table SELECT *')",
"3": "_ = mo.sql(f'FROM catalog_one.schema_two.my_table SELECT *')",
},
expected_parents={"0": [], "1": ["0"], "2": [], "3": []},
expected_children={"0": ["1"], "1": [], "2": [], "3": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "catalog_one.schema_one.my_table"],
"2": ["mo", "catalog_two.schema_one.my_table"],
"3": ["mo", "catalog_one.schema_two.my_table"],
},
expected_defs={"0": ["my_table"], "1": [], "2": [], "3": []},
),
GraphTestCase(
name="sql table substring doesn't cause false positive",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE catalog_one.schema_one.my_table (name STRING)')",
"1": "_ = mo.sql(f'FROM catalog_one.schema_one.my_table_suffix SELECT *')",
"2": "_ = mo.sql(f'FROM catalog_one.schema_one.prefix_my_table SELECT *')",
},
expected_parents={"0": [], "1": [], "2": []},
expected_children={"0": [], "1": [], "2": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "catalog_one.schema_one.my_table_suffix"],
"2": ["mo", "catalog_one.schema_one.prefix_my_table"],
},
expected_defs={"0": ["my_table"], "1": [], "2": []},
),
GraphTestCase(
name="sql table schema substring doesn't cause false positive",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE catalog_one.schema_one.my_table (name STRING)')",
"1": "_ = mo.sql(f'FROM catalog_one.my_table.suffix SELECT *')",
},
expected_parents={"0": [], "1": []},
expected_children={"0": [], "1": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "catalog_one.my_table.suffix"],
},
expected_defs={"0": ["my_table"], "1": []},
),
GraphTestCase(
name="sql table attach statements, single definition",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f\"ATTACH 'my_db.db' as my_db\")",
"1": "_ = mo.sql(f'FROM my_db.main.my_table SELECT *')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "my_db.main.my_table"],
},
expected_defs={"0": ["my_db"], "1": []},
),
GraphTestCase(
name="sql table attach statements, multiple definitions",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f\"ATTACH 'my_db.db' as my_db\")",
"1": "_ = mo.sql(f'CREATE OR REPLACE TABLE my_db.my_table AS SELECT 1')",
"2": "_ = mo.sql(f'FROM my_db.main.my_table SELECT *')",
"3": "_ = mo.sql(f'FROM my_db.my_table SELECT *')",
},
expected_parents={"0": [], "1": [], "2": ["0", "1"], "3": ["0", "1"]},
expected_children={"0": ["2", "3"], "1": ["2", "3"], "2": [], "3": []},
expected_refs={
"0": ["mo"],
"1": ["mo"],
"2": ["mo", "my_db.main.my_table"],
"3": ["mo", "my_db.my_table"],
},
expected_defs={"0": ["my_db"], "1": ["my_table"], "2": [], "3": []},
),
GraphTestCase(
name="create table with the same name from a different schema",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE my_table AS SELECT * FROM schema_one.my_table')",
"1": "_ = mo.sql(f'SELECT * FROM my_table')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={
"0": ["mo", "schema_one.my_table"],
"1": ["mo", "my_table"],
},
expected_defs={"0": ["my_table"], "1": []},
),
GraphTestCase(
name="create table with the same name from catalog.schema hierarchy",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE my_table AS SELECT * FROM catalog_one.schema_one.my_table')",
"1": "_ = mo.sql(f'SELECT * FROM my_table')",
},
expected_parents={"0": [], "1": ["0"]},
expected_children={"0": ["1"], "1": []},
expected_refs={
"0": ["mo", "catalog_one.schema_one.my_table"],
"1": ["mo", "my_table"],
},
expected_defs={"0": ["my_table"], "1": []},
),
GraphTestCase(
name="create schema with the same name from a different catalog",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE SCHEMA my_schema')",
"1": "_ = mo.sql(f'CREATE TABLE my_schema.my_table AS SELECT * FROM catalog_one.my_schema.my_table')",
"2": "_ = mo.sql(f'SELECT * FROM my_schema.my_table')",
},
expected_parents={"0": [], "1": [], "2": ["1"]},
expected_children={"0": [], "1": ["2"], "2": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "catalog_one.my_schema.my_table"],
"2": ["mo", "my_schema.my_table"],
},
expected_defs={"0": ["my_schema"], "1": ["my_table"], "2": []},
),
GraphTestCase(
name="create table that references itself in join prevents self-loop",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE users AS SELECT 1 as id')",
"1": "_ = mo.sql(f'CREATE TABLE orders AS SELECT u.id FROM schema_one.users u JOIN schema_two.orders o ON u.id = o.user_id')",
"2": "_ = mo.sql(f'SELECT * FROM orders')",
},
expected_parents={"0": [], "1": ["0"], "2": ["1"]},
expected_children={"0": ["1"], "1": ["2"], "2": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "schema_one.users", "schema_two.orders"],
"2": ["mo", "orders"],
},
expected_defs={"0": ["users"], "1": ["orders"], "2": []},
),
GraphTestCase(
name="multiple tables with hierarchical self-reference patterns",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE table_a AS SELECT * FROM db1.table_a')",
"1": "_ = mo.sql(f'CREATE TABLE table_b AS SELECT * FROM db2.table_b')",
"2": "_ = mo.sql(f'SELECT * FROM table_a UNION ALL SELECT * FROM table_b')",
},
expected_parents={"0": [], "1": [], "2": ["0", "1"]},
expected_children={"0": ["2"], "1": ["2"], "2": []},
expected_refs={
"0": ["mo", "db1.table_a"],
"1": ["mo", "db2.table_b"],
"2": ["mo", "table_a", "table_b"],
},
expected_defs={"0": ["table_a"], "1": ["table_b"], "2": []},
),
GraphTestCase(
name="create table from hierarchical ref then reference it hierarchically",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE SCHEMA my_schema')",
"1": "_ = mo.sql(f'CREATE TABLE my_schema.data AS SELECT * FROM external.my_schema.data')",
"2": "_ = mo.sql(f'SELECT * FROM my_schema.data')",
},
expected_parents={"0": [], "1": [], "2": ["1"]},
expected_children={"0": [], "1": ["2"], "2": []},
expected_refs={
"0": ["mo"],
"1": ["mo", "external.my_schema.data"],
"2": ["mo", "my_schema.data"],
},
expected_defs={"0": ["my_schema"], "1": ["data"], "2": []},
),
GraphTestCase(
name="sql table multiple definitions, different order",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE OR REPLACE TABLE my_db.my_table AS SELECT 1')",
"1": "_ = mo.sql(f\"ATTACH 'my_db.db' as my_db\")",
"2": "_ = mo.sql(f'FROM my_db.main.my_table SELECT *')",
},
expected_parents={"0": [], "1": [], "2": ["0", "1"]},
expected_children={"0": ["2"], "1": ["2"], "2": []},
expected_refs={
"0": ["mo"],
"1": ["mo"],
"2": ["mo", "my_db.main.my_table"],
},
expected_defs={"0": ["my_table"], "1": ["my_db"], "2": []},
),
GraphTestCase(
name="sql table ordering doesn't cause false positives",
enabled=HAS_DUCKDB,
code={
"0": "_ = mo.sql(f'CREATE TABLE catalog_one.schema_one.my_table (name STRING)')",
"1": "_ = mo.sql(f'CREATE TABLE catalog_one.my_table.schema_one (name STRING)')",
"2": "_ = mo.sql(f'CREATE TABLE schema_one.my_table.catalog_one (name STRING)')",
"3": "_ = mo.sql(f'CREATE TABLE schema_one.catalog_one.my_table (name STRING)')",
"4": "_ = mo.sql(f'CREATE TABLE my_table.catalog_one.schema_one (name STRING)')",
"5": "_ = mo.sql(f'CREATE TABLE my_table.schema_one.catalog_one (name STRING)')",
},
expected_parents={
"0": [],
"1": [],
"2": [],
"3": [],
"4": [],
"5": [],
},
expected_children={
"0": [],
"1": [],
"2": [],
"3": [],
"4": [],
"5": [],
},
expected_refs={
"0": ["mo"],
"1": ["mo"],
"2": ["mo"],
"3": ["mo"],
"4": ["mo"],
"5": ["mo"],
},
expected_defs={
"0": ["my_table"],
"1": ["schema_one"],
"2": ["catalog_one"],
"3": ["my_table"],
"4": ["schema_one"],
"5": ["catalog_one"],
},
),
]
CASES = PYTHON_CASES + SQL_CASES
@pytest.mark.parametrize("case", CASES)
def test_cases(case: GraphTestCase) -> None:
print(f"Running {case.name}")
graph = dataflow.DirectedGraph()
if not case.enabled:
pytest.skip(f"Skipping {case.name} because it's not enabled")
for cell_id, code in case.code.items():
cell = parse_cell(code)
graph.register_cell(CellId_t(cell_id), cell)
def make_assertions():
if case.expected_refs:
for cell_id, refs in case.expected_refs.items():
assert graph.cells[CellId_t(cell_id)].refs == refs, (
f"Cell {cell_id} has refs {graph.cells[CellId_t(cell_id)].refs}, expected {refs}"
)
if case.expected_defs:
for cell_id, defs in case.expected_defs.items():
assert graph.cells[CellId_t(cell_id)].defs == defs, (
f"Cell {cell_id} has defs {graph.cells[CellId_t(cell_id)].defs}, expected {defs}"
)
assert graph.parents == case.expected_parents, (
f"Graph parents {graph.parents} do not match expected {case.expected_parents}"
)
assert graph.children == case.expected_children, (
f"Graph children {graph.children} do not match expected {case.expected_children}"
)
if case.xfail:
if isinstance(case.xfail, str):
print(case.xfail)
try:
make_assertions()
except AssertionError as e:
pytest.xfail(str(e))
raise KnownFailure(str(e)) from e
else:
make_assertions()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/test_dataflow_cases.py",
"license": "Apache License 2.0",
"lines": 667,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/ai/github_model_check.py | # /// script
# dependencies = [
# "marimo",
# "openai==1.99.9",
# ]
# [tool.marimo.runtime]
# auto_instantiate = false
# ///
import marimo
__generated_with = "0.18.0"
app = marimo.App(width="medium")
@app.cell(hide_code=True)
def _():
import marimo as mo
import openai
import os
import httpx
return httpx, mo, openai
@app.cell
def _(mo):
# Set up OpenAI API key
api_key = mo.ui.text(
value="", label="GitHub API Key. Use `gh auth token`", full_width=True
)
api_key
return (api_key,)
@app.cell
def _(mo):
with_headers = mo.ui.checkbox(label="With Headers")
with_headers
return (with_headers,)
@app.cell
def _(api_key, httpx, openai, with_headers):
client = openai.Client(
base_url="https://models.github.ai/inference",
api_key=api_key.value,
http_client=httpx.Client(verify=False),
default_headers={
"editor-version": "vscode/1.95.0",
"Copilot-Integration-Id": "vscode-chat",
}
if with_headers.value
else {},
)
models = client.models.list().model_dump()
return client, models
@app.cell
def _(models):
ids = [item["id"] for item in models["data"]]
ids
return (ids,)
@app.cell
def _(mo):
# Button to trigger API call
run_button = mo.ui.run_button(label="Generate Response")
run_button
return (run_button,)
@app.cell
def _(client, ids, run_button):
results = {}
if run_button.value:
for model in ids:
try:
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": "hi"}],
max_tokens=100,
)
msg = (
"✅ Model "
+ model
+ " passed: "
+ response.choices[0].message.content
)
print(msg)
results[model] = msg
except:
msg = "❌ Model " + model + " failed"
print(msg)
results[model] = msg
return (results,)
@app.cell
def _(mo, results):
mo.stop(not results)
mo.ui.table(results, selection=None)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/ai/github_model_check.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/ai/model_check.py | # /// script
# requires-python = ">=3.12"
# dependencies = [
# "anthropic==0.64.0",
# "any-llm-sdk[anthropic]==0.12.1",
# "google-genai==1.30.0",
# "marimo",
# "polars==1.32.3",
# "protobuf==5.29.5",
# ]
# ///
import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell(hide_code=True)
def _():
models_csv = """
name,model,description,provider,roles,thinking
Claude 3 Haiku,claude-3-haiku-20240307,Fastest model optimized for speed and efficiency,anthropic,"chat, edit",False
Claude 3.5 Haiku,claude-3-5-haiku-20241022,Fast and efficient model with excellent performance for everyday tasks,anthropic,"chat, edit",False
Claude 3.5 Sonnet v2,claude-3-5-sonnet-20241022,High-performance model with advanced coding and reasoning capabilities,anthropic,"chat, edit, code, vision",False
Claude Opus 4,claude-opus-4-20250514,World's best coding model with sustained performance on complex tasks,anthropic,"chat, edit, code, reasoning, vision",True
Claude Opus 4.1,claude-opus-4-1-20250805,Latest flagship model with hybrid reasoning capabilities,anthropic,"chat, edit, reasoning, vision",True
Claude Sonnet 3.7,claude-3-7-sonnet-20250219,Hybrid AI reasoning model with rapid or thoughtful responses,anthropic,"chat, edit, reasoning, vision",True
Claude Sonnet 4,claude-sonnet-4-20250514,Superior coding and reasoning while responding precisely to instructions,anthropic,"chat, edit, code, reasoning, vision",True
GPT-4.1,gpt-4.1,"Fast, highly intelligent model with largest context window",azure,"chat, edit, code",False
GPT-4o,gpt-4o,"Fast, intelligent, flexible GPT model with multimodal capabilities",azure,"chat, edit, vision",False
GPT-5,gpt-5,The best model for coding and agentic tasks across domains,azure,"chat, edit, code, agent",False
GPT-5 Mini,gpt-5-mini,"A faster, cost-efficient version of GPT-5 for well-defined tasks",azure,"chat, edit",False
o1-mini,o1-mini,Faster and cheaper reasoning model,azure,"reasoning, math",True
o1-preview,o1-preview,Reasoning model with advanced problem solving capabilities,azure,"reasoning, math, code",True
Amazon Nova Lite,amazon.nova-lite-v1:0,Fast and cost-effective multimodal model,bedrock,"chat, edit, vision",False
Amazon Nova Micro,amazon.nova-micro-v1:0,Ultra-fast text-only model for simple tasks,bedrock,"chat, edit",False
Amazon Nova Premier,amazon.nova-premier-v1:0,High-performance multimodal model for complex reasoning tasks,bedrock,"chat, edit, vision, reasoning",False
Amazon Nova Pro,amazon.nova-pro-v1:0,Balanced multimodal model for general-purpose applications,bedrock,"chat, edit, vision",False
Claude 3.5 Haiku,anthropic.claude-3-5-haiku-20241022-v1:0,Fast and efficient model for everyday tasks,bedrock,"chat, edit",False
Claude 3.5 Sonnet v2,anthropic.claude-3-5-sonnet-20241022-v1:0,High-performance model with advanced coding capabilities,bedrock,"chat, edit, code, vision",False
Claude Opus 4.1,anthropic.claude-opus-4-1-20250805-v1:0,Latest flagship model with hybrid reasoning capabilities,bedrock,"chat, edit, reasoning, vision",True
Claude Sonnet 3.7,us.anthropic.claude-3-7-sonnet-20250219-v1:0,Hybrid reasoning model with rapid or thoughtful responses (uses inference profile),bedrock,"chat, edit, reasoning, vision",True
Claude Sonnet 4,anthropic.claude-sonnet-4-20250514-v1:0,Superior coding and reasoning model,bedrock,"chat, edit, code, reasoning, vision",True
DeepSeek R1,deepseek.deepseek-r1-671b-instruct-v1:0,Advanced reasoning model with step-by-step thinking,bedrock,"reasoning, math, code",True
Meta Llama 3.2 90B Vision,meta.llama3-2-90b-instruct-v1:0,Large multimodal model with vision capabilities,bedrock,"chat, edit, vision",False
Meta Llama 3.3 70B,meta.llama3-3-70b-instruct-v1:0,Latest Meta model with improved capabilities,bedrock,"chat, edit, code",False
Gemini 1.5 Flash,gemini-1.5-flash,Fast and cost-effective model for high-volume applications,google,"chat, edit",False
Gemini 1.5 Pro,gemini-1.5-pro,Advanced multimodal model with extensive context understanding,google,"chat, edit, vision, code",False
Gemini 2.0 Flash,gemini-2.0-flash,"Next generation features, speed, and realtime streaming",google,"chat, edit, vision, code",False
Gemini 2.5 Flash,gemini-2.5-flash,Efficient workhorse model with controllable thinking budget,google,"chat, edit, reasoning",True
Gemini 2.5 Flash-Lite,gemini-2.5-flash-lite,Most cost-efficient model supporting high throughput,google,"chat, edit",False
Gemini 2.5 Pro,gemini-2.5-pro,Most intelligent and capable AI model with 1M token context window,google,"chat, edit, reasoning, vision, code",True
Codellama 34B,codellama:34b,Specialized model optimized for code generation and programming,ollama,"code, edit",False
DeepSeek R1 70B,deepseek-r1:70b,Open reasoning model with performance approaching leading models,ollama,"reasoning, math, code",True
DeepSeek R1 7B,deepseek-r1:7b,Smaller reasoning model with step-by-step thinking,ollama,"reasoning, math",True
Gemma 3 27B,gemma3:27b,"Current, most capable Google model that runs on a single GPU",ollama,"chat, edit, code, vision",False
Llama 3.1 70B,llama3.1:70b,State-of-the-art model from Meta with 70B parameters,ollama,"chat, edit, code",False
Llama 3.2 Vision 11B,llama3.2-vision:11b,Multimodal model with vision capabilities,ollama,"chat, edit, vision",False
Llama 3.2 Vision 90B,llama3.2-vision:90b,Large multimodal model with advanced vision understanding,ollama,"chat, edit, vision",False
Llava 34B,llava:34b,Advanced vision-language understanding model,ollama,"chat, vision",False
Mistral 7B,mistral:7b,Efficient and powerful model for general-purpose tasks,ollama,"chat, edit",False
Qwen 3 32B,qwen3:32b,Latest generation model with dense and MoE architectures,ollama,"chat, edit, code",True
gpt-oss 120B,gpt-oss:120b,Most powerful open-weight model from OpenAI with agentic capabilities,ollama,"chat, reasoning, agent, code",True
gpt-oss 20B,gpt-oss:20b,Medium-sized open-weight model for low latency tasks,ollama,"chat, agent",True
GPT-4.1,gpt-4.1,"Fast, highly intelligent model with largest context window (1 million tokens)",openai,"chat, edit, code",False
GPT-4.1 Mini,gpt-4.1-mini,"Balanced for intelligence, speed, and cost",openai,"chat, edit",False
GPT-4o,gpt-4o,"Fast, intelligent, flexible GPT model with multimodal capabilities",openai,"chat, edit, vision",False
GPT-4o Mini,gpt-4o-mini,"Fast, affordable small model for focused tasks",openai,"chat, edit",False
GPT-5,gpt-5,The best model for coding and agentic tasks across domains,openai,"chat, edit, code, agent",False
GPT-5 Mini,gpt-5-mini,"A faster, cost-efficient version of GPT-5 for well-defined tasks",openai,"chat, edit",False
GPT-5 Nano,gpt-5-nano,"Fastest, most cost-efficient version of GPT-5",openai,"chat, edit",False
o3,o3,Our most powerful reasoning model with chain-of-thought capabilities,openai,"reasoning, math, code",True
o3-mini,o3-mini,A small model alternative to o3 for reasoning tasks,openai,"reasoning, math",True
o3-pro,o3-pro,Version of o3 designed to think longer and provide most reliable responses,openai,"reasoning, math, code",True
o4-mini,o4-mini,"Faster, more affordable reasoning model optimized for math, coding, and visual tasks",openai,"reasoning, code, vision",True
"""
return (models_csv,)
@app.cell
def _(models_csv):
import polars as pl
import io
df = pl.read_csv(io.BytesIO(models_csv.encode()))
df
return (df,)
@app.cell
def _():
import any_llm
import marimo as mo
import anthropic
return any_llm, mo
@app.function
def get_key(provider):
from marimo._config.manager import get_default_config_manager
config = get_default_config_manager(current_path=None).get_config(
hide_secrets=False
)["ai"]
if provider == "openai":
return config.get("open_ai", {}).get("api_key")
return config.get(provider, {}).get("api_key")
@app.cell
def _(any_llm, mo):
@mo.persistent_cache()
def query(provider, model):
key = get_key(provider)
return any_llm.completion(
model=f"{provider}/{model}",
messages=[{"role": "user", "content": "hi"}],
api_key=key,
)
return (query,)
@app.cell
def _(df, query):
SKIP = {"azure", "bedrock", "ollama"}
for record in df.to_dicts():
provider = record["provider"]
if provider in SKIP:
continue
model = record["model"]
print(f"Testing {provider} / {model}")
try:
res = query(provider, model)
print(f"✅ {provider} / {model}")
print(res.choices[0].message.content)
except Exception as e:
print(f"❌ {e}")
return
@app.cell
def _(query):
others = [
["google", "codegemma"],
["google", "codegemma-2b"],
["google", "codegemma-7b"],
]
for provider, model in others:
print(f"Testing {provider} / {model}")
try:
res = query(provider, model)
print(f"✅ {provider} / {model}")
print(res.choices[0].message.content)
except Exception as e:
print(f"❌ {e}")
return
@app.cell
def _():
import google.genai as genai
client = genai.Client(api_key=get_key("google"))
for m in client.models.list():
for action in m.supported_actions:
if action == "generateContent":
print(f"Model name: {m.name}")
print(f"Display name: {m.display_name}")
print(f"Description: {m.description}")
print("-" * 80)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/ai/model_check.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_utils/test_formatter.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import sys
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from marimo._utils.formatter import (
BlackFormatter,
CellCodes,
DefaultFormatter,
FormatError,
Formatter,
RuffFormatter,
ruff,
)
class TestFormatter:
"""Test the base Formatter class."""
async def test_base_formatter_returns_unchanged_codes(self):
"""Test that base Formatter returns codes unchanged."""
formatter = Formatter(line_length=88)
codes: CellCodes = {"cell1": "x = 1\ny = 2", "cell2": "z = 3"}
result = await formatter.format(codes)
assert result == codes
assert result is codes # Should be the same object
class TestDefaultFormatter:
"""Test the DefaultFormatter class that tries ruff, then black."""
@patch("marimo._utils.formatter.RuffFormatter")
@patch("marimo._dependencies.dependencies.DependencyManager.which")
@patch("marimo._dependencies.dependencies.DependencyManager.ruff")
async def test_uses_ruff_when_available(
self,
mock_ruff: MagicMock,
mock_which: MagicMock,
mock_ruff_formatter: MagicMock,
) -> None:
"""Test DefaultFormatter uses RuffFormatter when ruff is available."""
# Mock ruff as available
mock_ruff.has.return_value = True
mock_which.return_value = "/usr/bin/ruff"
# Mock RuffFormatter
mock_instance = AsyncMock()
mock_ruff_formatter.return_value = mock_instance
mock_instance.format.return_value = {"cell1": "formatted_code"}
formatter = DefaultFormatter(line_length=88)
codes: CellCodes = {"cell1": "x=1"}
result = await formatter.format(codes)
mock_ruff_formatter.assert_called_once_with(88)
mock_instance.format.assert_called_once_with(codes)
assert result == {"cell1": "formatted_code"}
@patch("marimo._utils.formatter.BlackFormatter")
@patch("marimo._dependencies.dependencies.DependencyManager.black")
@patch("marimo._dependencies.dependencies.DependencyManager.which")
@patch("marimo._dependencies.dependencies.DependencyManager.ruff")
async def test_uses_black_when_ruff_unavailable_but_black_available(
self,
mock_ruff: MagicMock,
mock_which: MagicMock,
mock_black: MagicMock,
mock_black_formatter: MagicMock,
) -> None:
"""Test DefaultFormatter uses BlackFormatter when ruff unavailable but black available."""
# Mock ruff as unavailable, black as available
mock_ruff.has.return_value = False
mock_which.return_value = None
mock_black.has.return_value = True
# Mock BlackFormatter
mock_instance = AsyncMock()
mock_black_formatter.return_value = mock_instance
mock_instance.format.return_value = {"cell1": "formatted_code"}
formatter = DefaultFormatter(line_length=88)
codes: CellCodes = {"cell1": "x=1"}
result = await formatter.format(codes)
mock_black_formatter.assert_called_once_with(88)
mock_instance.format.assert_called_once_with(codes)
assert result == {"cell1": "formatted_code"}
@patch("marimo._dependencies.dependencies.DependencyManager.black")
@patch("marimo._dependencies.dependencies.DependencyManager.which")
@patch("marimo._dependencies.dependencies.DependencyManager.ruff")
async def test_raises_module_not_found_when_no_formatters_available(
self,
mock_ruff: MagicMock,
mock_which: MagicMock,
mock_black: MagicMock,
) -> None:
"""Test DefaultFormatter raises ModuleNotFoundError when no formatters available."""
# Mock both ruff and black as unavailable
mock_ruff.has.return_value = False
mock_which.return_value = None
mock_black.has.return_value = False
formatter = DefaultFormatter(line_length=88)
codes: CellCodes = {"cell1": "x=1"}
with pytest.raises(ModuleNotFoundError) as exc_info:
await formatter.format(codes)
assert "ruff or black" in str(exc_info.value)
assert exc_info.value.name == "ruff"
class TestRuffFormatter:
"""Test the RuffFormatter class."""
@patch("marimo._utils.formatter.ruff")
async def test_ruff_formatter_calls_ruff_function(
self, mock_ruff: MagicMock
) -> None:
"""Test RuffFormatter calls the ruff function with correct arguments."""
mock_ruff.return_value = {"cell1": "formatted_code"}
formatter = RuffFormatter(line_length=100)
codes: CellCodes = {"cell1": "x=1"}
result = await formatter.format(codes)
mock_ruff.assert_called_once_with(
codes, "format", "--line-length", "100"
)
assert result == {"cell1": "formatted_code"}
@patch("marimo._utils.formatter.ruff")
async def test_ruff_formatter_propagates_exceptions(
self, mock_ruff: MagicMock
) -> None:
"""Test RuffFormatter propagates exceptions from ruff function."""
mock_ruff.side_effect = ModuleNotFoundError("ruff not found")
formatter = RuffFormatter(line_length=88)
codes: CellCodes = {"cell1": "x=1"}
with pytest.raises(ModuleNotFoundError):
await formatter.format(codes)
class TestBlackFormatter:
"""Test the BlackFormatter class."""
@patch("marimo._dependencies.dependencies.DependencyManager.black")
async def test_black_formatter_requires_dependency(
self, mock_black: MagicMock
):
"""Test BlackFormatter calls require on black dependency."""
mock_black.require.side_effect = ModuleNotFoundError("black required")
formatter = BlackFormatter(line_length=88)
codes: CellCodes = {"cell1": "x=1"}
with pytest.raises(ModuleNotFoundError):
await formatter.format(codes)
mock_black.require.assert_called_once_with("to enable code formatting")
@patch("asyncio.to_thread")
@patch("marimo._dependencies.dependencies.DependencyManager.black")
async def test_black_formatter_formats_code_successfully(
self, mock_black_dep: MagicMock, mock_to_thread: MagicMock
):
"""Test BlackFormatter successfully formats code using black."""
# Mock dependency requirement to pass
mock_black_dep.require.return_value = None
# Mock black module with __spec__ attribute
mock_black = MagicMock()
mock_black.__spec__ = MagicMock()
mock_mode = MagicMock()
mock_black.Mode.return_value = mock_mode
mock_black.format_str.return_value = "formatted_code\n"
with patch.dict(sys.modules, {"black": mock_black}):
mock_to_thread.return_value = "formatted_code\n"
formatter = BlackFormatter(line_length=100)
codes: CellCodes = {"cell1": "x=1", "cell2": "y=2"}
result = await formatter.format(codes)
assert result == {
"cell1": "formatted_code",
"cell2": "formatted_code",
}
mock_black.Mode.assert_called_with(line_length=100)
assert mock_to_thread.call_count == 2
@patch("asyncio.to_thread")
@patch("marimo._dependencies.dependencies.DependencyManager.black")
async def test_black_formatter_handles_formatting_errors_gracefully(
self, mock_black_dep: MagicMock, mock_to_thread: MagicMock
):
"""Test BlackFormatter handles black formatting errors gracefully."""
# Mock dependency requirement to pass
mock_black_dep.require.return_value = None
# Mock black module with __spec__ attribute
mock_black = MagicMock()
mock_black.__spec__ = MagicMock()
mock_mode = MagicMock()
mock_black.Mode.return_value = mock_mode
with patch.dict(sys.modules, {"black": mock_black}):
# First call succeeds, second call fails
mock_to_thread.side_effect = [
"formatted_code\n",
Exception("Black error"),
]
formatter = BlackFormatter(line_length=88)
codes: CellCodes = {"cell1": "x=1", "cell2": "y=2"}
result = await formatter.format(codes)
# Should return formatted code for successful cells, original for failed
assert result == {
"cell1": "formatted_code",
"cell2": "y=2",
}
class TestRuffFunction:
"""Test the ruff async function."""
@patch("marimo._utils.formatter._run_subprocess_safe")
async def test_ruff_function_with_module_ruff_available(
self, mock_subprocess_safe: MagicMock
):
"""Test ruff function when ruff is available as a module."""
# Mock help command success, then format command success
mock_subprocess_safe.side_effect = [
(b"", b"", 0), # help command success
(b"formatted_code\n", b"", 0), # format command success
]
codes: CellCodes = {"cell1": "x=1"}
result = await ruff(codes, "format", "--line-length", "88")
assert result == {"cell1": "formatted_code"}
# Check that help command was called first
help_call = mock_subprocess_safe.call_args_list[0]
assert help_call[0] == (sys.executable, "-m", "ruff", "--help")
# Check that format command was called
format_call = mock_subprocess_safe.call_args_list[1]
assert format_call[0] == (
sys.executable,
"-m",
"ruff",
"format",
"--line-length",
"88",
"-",
)
assert format_call[1]["input_data"] == b"x=1"
@patch("marimo._utils.formatter._run_subprocess_safe")
async def test_ruff_function_falls_back_to_global_ruff(
self, mock_subprocess_safe: MagicMock
):
"""Test ruff function falls back to global ruff when module unavailable."""
# Mock module ruff failing, global ruff succeeding for help, then format success
mock_subprocess_safe.side_effect = [
(b"", b"", 1), # module help fails
(b"", b"", 0), # global help succeeds
(b"formatted_code\n", b"", 0), # format succeeds
]
codes: CellCodes = {"cell1": "x=1"}
result = await ruff(codes, "format")
assert result == {"cell1": "formatted_code"}
# Check that global ruff was used for format command
format_call = mock_subprocess_safe.call_args_list[2]
assert format_call[0] == ("ruff", "format", "-")
assert format_call[1]["input_data"] == b"x=1"
@patch("marimo._utils.formatter._run_subprocess_safe")
async def test_ruff_function_raises_module_not_found_when_unavailable(
self, mock_subprocess_safe: MagicMock
):
"""Test ruff function raises ModuleNotFoundError when ruff is unavailable."""
# Mock both module and global ruff failing
mock_subprocess_safe.return_value = (b"", b"", 1)
codes: CellCodes = {"cell1": "x=1"}
with pytest.raises(ModuleNotFoundError) as exc_info:
await ruff(codes, "format")
assert "ruff" in str(exc_info.value)
assert exc_info.value.name == "ruff"
@patch("marimo._utils.formatter._run_subprocess_safe")
async def test_ruff_function_handles_format_failures_gracefully(
self, mock_subprocess_safe: MagicMock
):
"""Test ruff function handles individual cell formatting failures gracefully."""
# Mock help command success, then format commands - one success, one failure
mock_subprocess_safe.side_effect = [
(b"", b"", 0), # help succeeds
(b"formatted_code\n", b"", 0), # first format succeeds
(b"", b"syntax error", 1), # second format fails
]
codes: CellCodes = {"cell1": "x=1", "cell2": "invalid syntax"}
result = await ruff(codes, "format")
# Should only include successfully formatted code
assert result == {"cell1": "formatted_code"}
@patch("marimo._utils.formatter.LOGGER")
@patch("marimo._utils.formatter._run_subprocess_safe")
async def test_ruff_function_handles_communication_exceptions(
self, mock_subprocess_safe: MagicMock, mock_logger: MagicMock
):
"""Test ruff function handles communication exceptions gracefully."""
del mock_logger
# Mock help command success, then format command that raises exception
mock_subprocess_safe.side_effect = [
(b"", b"", 0), # help succeeds
Exception("Communication failed"), # format fails with exception
]
codes: CellCodes = {"cell1": "x=1"}
result = await ruff(codes, "format")
# Should return empty dict when all cells fail
assert result == {}
@patch("marimo._utils.formatter._run_subprocess_safe")
async def test_ruff_function_strips_whitespace_from_output(
self, mock_subprocess_safe: MagicMock
):
"""Test ruff function strips whitespace from formatted output."""
# Mock help command success, then format with whitespace in output
mock_subprocess_safe.side_effect = [
(b"", b"", 0), # help succeeds
(b" formatted_code \n\n", b"", 0), # format with whitespace
]
codes: CellCodes = {"cell1": "x=1"}
result = await ruff(codes, "format")
assert result == {"cell1": "formatted_code"}
@patch("marimo._utils.formatter.LOGGER")
@patch("marimo._utils.formatter._run_subprocess_safe")
async def test_ruff_function_raises_format_error_on_non_zero_exit(
self, mock_subprocess_safe: MagicMock, mock_logger: MagicMock
):
"""Test ruff function raises FormatError when format command fails."""
del mock_logger
# Mock help command success, then format command that fails
mock_subprocess_safe.side_effect = [
(b"", b"", 0), # help succeeds
(b"", b"format error", 1), # format fails
]
codes: CellCodes = {"cell1": "x=1"}
result = await ruff(codes, "format")
# Should skip failed cells
assert result == {}
@patch("asyncio.to_thread")
@patch("asyncio.create_subprocess_exec")
async def test_run_subprocess_safe_falls_back_on_windows(
self, mock_subprocess_exec: MagicMock, mock_to_thread: MagicMock
):
"""Test _run_subprocess_safe falls back to subprocess.run on Windows."""
from marimo._utils.formatter import _run_subprocess_safe
# Mock asyncio.create_subprocess_exec to raise NotImplementedError (Windows behavior)
mock_subprocess_exec.side_effect = NotImplementedError()
# Mock asyncio.to_thread to return subprocess result
mock_to_thread.return_value = (b"output", b"error", 0)
result = await _run_subprocess_safe("python", "--version")
assert result == (b"output", b"error", 0)
mock_to_thread.assert_called_once()
class TestFormatError:
"""Test the FormatError exception class."""
def test_format_error_is_exception(self) -> None:
"""Test FormatError is a proper Exception subclass."""
error = FormatError("test message")
assert isinstance(error, Exception)
assert str(error) == "test message"
def test_format_error_can_be_raised_and_caught(self) -> None:
"""Test FormatError can be raised and caught properly."""
with pytest.raises(FormatError) as exc_info:
raise FormatError("test formatting error")
assert str(exc_info.value) == "test formatting error"
class TestFormatterIntegration:
"""Integration tests for formatter classes working together."""
async def test_formatter_inheritance_structure(self) -> None:
"""Test that all formatter classes inherit from base Formatter properly."""
assert issubclass(DefaultFormatter, Formatter)
assert issubclass(RuffFormatter, Formatter)
assert issubclass(BlackFormatter, Formatter)
async def test_all_formatters_accept_line_length_parameter(self) -> None:
"""Test all formatters accept and store line_length parameter."""
formatters = [
Formatter(100),
DefaultFormatter(100),
RuffFormatter(100),
BlackFormatter(100),
]
for formatter in formatters:
assert formatter.line_length == 100
async def test_cell_codes_type_alias_works_correctly(self) -> None:
"""Test CellCodes type alias works as expected."""
codes: CellCodes = {"cell1": "code1", "cell2": "code2"}
assert isinstance(codes, dict)
assert all(isinstance(k, str) for k in codes.keys())
assert all(isinstance(v, str) for v in codes.values())
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_formatter.py",
"license": "Apache License 2.0",
"lines": 353,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_sql/test_engine_utils.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Literal
from unittest.mock import Mock, patch
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._sql.utils import convert_to_output
native_result = {"data": "native_result"}
polars_result = {"data": "polars_result"}
pandas_result = {"data": "pandas_result"}
lazy_polars_result = {"data": "lazy_polars_result"}
MockFunctions = Literal[
"to_native", "to_polars", "to_pandas", "to_lazy_polars"
]
MockFnDict = dict[MockFunctions, Mock]
@pytest.fixture
def mock_functions() -> MockFnDict:
"""Returns a dictionary of mock conversion functions keyed by their names."""
return {
"to_native": Mock(return_value=native_result),
"to_polars": Mock(return_value=polars_result),
"to_pandas": Mock(return_value=pandas_result),
"to_lazy_polars": Mock(return_value=lazy_polars_result),
}
def assert_only_one_called(
mock_functions: MockFnDict, function_name: MockFunctions
) -> None:
"""Assert that only the specified mock function was called."""
for name, mock_fn in mock_functions.items():
if name == function_name:
mock_fn.assert_called_once()
else:
mock_fn.assert_not_called()
def assert_multiple_called_once(
mock_functions: MockFnDict, expected_called: list[MockFunctions]
) -> None:
"""Assert that only the specified mock functions were called once each."""
for name, mock_fn in mock_functions.items():
if name in expected_called:
mock_fn.assert_called_once()
else:
mock_fn.assert_not_called()
class TestNativeOutputFormat:
"""Test different native output format scenarios."""
def test_with_to_native(self, mock_functions: MockFnDict) -> None:
"""Test native output format when to_native is provided."""
result = convert_to_output(
sql_output_format="native",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
to_native=mock_functions["to_native"],
)
assert result == native_result
assert_only_one_called(mock_functions, "to_native")
def test_without_to_native(self, mock_functions: MockFnDict) -> None:
"""Test native output format when to_native is not provided."""
with pytest.raises(
ValueError, match="to_native is required for native output format"
):
convert_to_output(
sql_output_format="native",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
class TestPolarsOutputFormat:
"""Test polars output format scenarios."""
@pytest.mark.skipif(
not DependencyManager.polars.has(),
reason="Polars is not installed",
)
def test_polars_format(self, mock_functions: MockFnDict) -> None:
"""Test polars output format."""
result = convert_to_output(
sql_output_format="polars",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == polars_result
assert_only_one_called(mock_functions, "to_polars")
@pytest.mark.skipif(
not DependencyManager.polars.has(),
reason="Polars is not installed",
)
class TestLazyPolarsOutputFormat:
"""Test lazy-polars output format scenarios."""
def test_with_to_lazy_polars(self, mock_functions: MockFnDict) -> None:
"""Test lazy-polars output format when to_lazy_polars is provided."""
result = convert_to_output(
sql_output_format="lazy-polars",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
to_lazy_polars=mock_functions["to_lazy_polars"],
)
assert result == lazy_polars_result
assert_only_one_called(mock_functions, "to_lazy_polars")
def test_without_to_lazy_polars_dataframe(
self, mock_functions: MockFnDict
) -> None:
"""Test lazy-polars output format when to_lazy_polars is not provided and to_polars returns DataFrame."""
mock_dataframe = Mock()
mock_lazy_frame = Mock()
mock_dataframe.lazy.return_value = mock_lazy_frame
mock_functions["to_polars"].return_value = mock_dataframe
result = convert_to_output(
sql_output_format="lazy-polars",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == mock_lazy_frame
mock_functions["to_polars"].assert_called_once()
mock_dataframe.lazy.assert_called_once()
mock_functions["to_pandas"].assert_not_called()
def test_without_to_lazy_polars_series(
self, mock_functions: MockFnDict
) -> None:
"""Test lazy-polars output format when to_lazy_polars is not provided and to_polars returns Series."""
import polars as pl
mock_series = Mock(spec=pl.Series)
mock_frame = Mock()
mock_lazy_frame = Mock()
mock_series.to_frame.return_value = mock_frame
mock_frame.lazy.return_value = mock_lazy_frame
mock_functions["to_polars"].return_value = mock_series
result = convert_to_output(
sql_output_format="lazy-polars",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == mock_lazy_frame
mock_functions["to_polars"].assert_called_once()
mock_series.to_frame.assert_called_once()
mock_frame.lazy.assert_called_once()
mock_functions["to_pandas"].assert_not_called()
@pytest.mark.skipif(
not DependencyManager.pandas.has(),
reason="Pandas is not installed",
)
class TestPandasOutputFormat:
"""Test pandas output format scenarios."""
def test_pandas_format(self, mock_functions: MockFnDict) -> None:
"""Test pandas output format."""
result = convert_to_output(
sql_output_format="pandas",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == pandas_result
assert_only_one_called(mock_functions, "to_pandas")
class TestAutoOutputFormat:
"""Test auto output format scenarios."""
@pytest.mark.skipif(
not DependencyManager.polars.has(),
reason="Polars is not installed",
)
@patch("marimo._sql.utils.DependencyManager")
def test_with_polars_success(
self, mock_dependency_manager: Mock, mock_functions: MockFnDict
) -> None:
"""Test auto output format when polars is available and succeeds."""
mock_dependency_manager.polars.has.return_value = True
mock_dependency_manager.pandas.has.return_value = False
result = convert_to_output(
sql_output_format="auto",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == polars_result
assert_only_one_called(mock_functions, "to_polars")
@pytest.mark.skipif(
not DependencyManager.polars.has(),
reason="Polars is not installed",
)
@patch("marimo._sql.utils.DependencyManager")
@patch("marimo._sql.utils.LOGGER")
def test_with_polars_failure_fallback_to_pandas(
self,
mock_logger: Mock,
mock_dependency_manager: Mock,
mock_functions: MockFnDict,
) -> None:
"""Test auto output format when polars fails and falls back to pandas."""
mock_dependency_manager.polars.has.return_value = True
mock_dependency_manager.pandas.has.return_value = True
import polars as pl
mock_functions["to_polars"].side_effect = pl.exceptions.PanicException(
"test error"
)
result = convert_to_output(
sql_output_format="auto",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == pandas_result
assert_multiple_called_once(mock_functions, ["to_polars", "to_pandas"])
mock_logger.info.assert_called_once_with(
"Failed to convert to polars, falling back to pandas"
)
@pytest.mark.skipif(
not DependencyManager.polars.has(),
reason="Polars is not installed",
)
@patch("marimo._sql.utils.DependencyManager")
@patch("marimo._sql.utils.LOGGER")
def test_with_polars_compute_error_fallback_to_pandas(
self,
mock_logger: Mock,
mock_dependency_manager: Mock,
mock_functions: MockFnDict,
) -> None:
"""Test auto output format when polars ComputeError occurs and falls back to pandas."""
mock_dependency_manager.polars.has.return_value = True
mock_dependency_manager.pandas.has.return_value = True
import polars as pl
mock_functions["to_polars"].side_effect = pl.exceptions.ComputeError(
"test error"
)
result = convert_to_output(
sql_output_format="auto",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == pandas_result
assert_multiple_called_once(mock_functions, ["to_polars", "to_pandas"])
mock_logger.info.assert_called_once_with(
"Failed to convert to polars, falling back to pandas"
)
@patch("marimo._sql.utils.DependencyManager")
def test_without_polars_with_pandas_success(
self, mock_dependency_manager: Mock, mock_functions: MockFnDict
) -> None:
"""Test auto output format when polars is not available but pandas succeeds."""
mock_dependency_manager.polars.has.return_value = False
mock_dependency_manager.pandas.has.return_value = True
result = convert_to_output(
sql_output_format="auto",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == pandas_result
assert_only_one_called(mock_functions, "to_pandas")
@patch("marimo._sql.utils.DependencyManager")
@patch("marimo._sql.utils.LOGGER")
def test_without_polars_with_pandas_failure(
self,
mock_logger: Mock,
mock_dependency_manager: Mock,
mock_functions: MockFnDict,
) -> None:
"""Test auto output format when polars is not available and pandas fails."""
mock_dependency_manager.polars.has.return_value = False
mock_dependency_manager.pandas.has.return_value = True
pandas_error = Exception("pandas error")
mock_functions["to_pandas"].side_effect = pandas_error
result = convert_to_output(
sql_output_format="auto",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result is None
assert_only_one_called(mock_functions, "to_pandas")
mock_logger.warning.assert_called_once_with(
"Failed to convert dataframe", exc_info=pandas_error
)
@patch("marimo._sql.utils.DependencyManager")
def test_without_polars_without_pandas(
self, mock_dependency_manager: Mock, mock_functions: MockFnDict
) -> None:
"""Test auto output format when neither polars nor pandas is available."""
mock_dependency_manager.polars.has.return_value = False
mock_dependency_manager.pandas.has.return_value = False
with pytest.raises(
ModuleNotFoundError, match="pandas or polars is required"
):
convert_to_output(
sql_output_format="auto",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
mock_functions["to_polars"].assert_not_called()
mock_functions["to_pandas"].assert_not_called()
@pytest.mark.skipif(
not DependencyManager.polars.has(),
reason="Polars is not installed",
)
@patch("marimo._sql.utils.DependencyManager")
def test_with_polars_failure_no_pandas(
self, mock_dependency_manager: Mock, mock_functions: MockFnDict
) -> None:
"""Test auto output format when polars fails and pandas is not available."""
mock_dependency_manager.polars.has.return_value = True
mock_dependency_manager.pandas.has.return_value = False
import polars as pl
mock_functions["to_polars"].side_effect = pl.exceptions.PanicException(
"test error"
)
with pytest.raises(
ModuleNotFoundError, match="pandas or polars is required"
):
convert_to_output(
sql_output_format="auto",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
mock_functions["to_polars"].assert_called_once()
mock_functions["to_pandas"].assert_not_called()
class TestAllOutputFormats:
"""Test all output format types."""
@pytest.mark.skipif(
not DependencyManager.polars.has()
and not DependencyManager.pandas.has(),
reason="Polars and pandas are not installed",
)
@pytest.mark.parametrize(
("format_type", "expected_result", "expected_called_once"),
[
("polars", polars_result, ["to_polars"]),
("pandas", pandas_result, ["to_pandas"]),
("native", native_result, ["to_native"]),
("lazy-polars", lazy_polars_result, ["to_lazy_polars"]),
],
)
def test_basic_formats(
self,
format_type: str,
expected_result: dict,
expected_called_once: list[str],
mock_functions: MockFnDict,
) -> None:
"""Test basic output formats without complex dependencies."""
result = convert_to_output(
sql_output_format=format_type,
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
to_native=mock_functions["to_native"],
to_lazy_polars=mock_functions["to_lazy_polars"],
)
assert result == expected_result
assert_multiple_called_once(mock_functions, expected_called_once) # type: ignore
@pytest.mark.skipif(
not DependencyManager.polars.has(),
reason="Polars is not installed",
)
@patch("marimo._sql.utils.DependencyManager")
def test_auto_format_with_polars(
self, mock_dependency_manager: Mock, mock_functions: MockFnDict
) -> None:
"""Test auto format when polars is available."""
mock_dependency_manager.polars.has.return_value = True
mock_dependency_manager.pandas.has.return_value = False
result = convert_to_output(
sql_output_format="auto",
to_polars=mock_functions["to_polars"],
to_pandas=mock_functions["to_pandas"],
)
assert result == polars_result
assert_only_one_called(mock_functions, "to_polars")
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_sql/test_engine_utils.py",
"license": "Apache License 2.0",
"lines": 356,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_server/ai/config.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import (
Any,
Optional,
Union,
cast,
)
from starlette.exceptions import HTTPException
from marimo._config.config import (
AiConfig,
CopilotMode,
MarimoConfig,
PartialMarimoConfig,
)
from marimo._server.ai.constants import DEFAULT_MAX_TOKENS, DEFAULT_MODEL
from marimo._server.ai.ids import AiModelId
from marimo._server.ai.tools.tool_manager import get_tool_manager
from marimo._server.ai.tools.types import ToolDefinition
from marimo._utils.http import HTTPStatus
# https://github.com/pydantic/pydantic-ai/blob/8b9ac2bde2355b0d431abea6cf210a36fffe0c43/pydantic_ai_slim/pydantic_ai/providers/github.py#L41C17-L41C51
GITHUB_COPILOT_BASE_URL = "https://models.github.ai/inference"
@dataclass
class AnyProviderConfig:
"""Normalized config for any AI provider."""
base_url: Optional[str]
api_key: str
project: Optional[str] = None
ssl_verify: Optional[bool] = None
ca_bundle_path: Optional[str] = None
client_pem: Optional[str] = None
extra_headers: Optional[dict[str, str]] = None
tools: Optional[list[ToolDefinition]] = None
def __post_init__(self) -> None:
# Only include tools if they are available
# Empty tools list causes an error with deepseek
# https://discord.com/channels/1059888774789730424/1387766267792068821
if not self.tools:
self.tools = None
@classmethod
def for_openai(cls, config: AiConfig) -> AnyProviderConfig:
fallback_key = cls.os_key("OPENAI_API_KEY")
return cls._for_openai_like(
config,
"open_ai",
"OpenAI",
fallback_key=fallback_key,
require_key=True,
)
@classmethod
def for_azure(cls, config: AiConfig) -> AnyProviderConfig:
fallback_key = cls.os_key("AZURE_API_KEY")
return cls._for_openai_like(
config,
"azure",
"Azure OpenAI",
fallback_key=fallback_key,
require_key=True,
)
@classmethod
def for_openai_compatible(cls, config: AiConfig) -> AnyProviderConfig:
return cls._for_openai_like(
config, "open_ai_compatible", "OpenAI Compatible"
)
@classmethod
def for_custom_provider(
cls, config: AiConfig, provider_name: str
) -> AnyProviderConfig:
"""Get config for a custom provider by name."""
custom_providers = cast(
dict[str, Any], config.get("custom_providers", {})
)
if provider_name not in custom_providers:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"Custom provider '{provider_name}' not configured. "
"Go to Settings > AI to configure.",
)
provider_config = cast(dict[str, Any], custom_providers[provider_name])
return cls._for_openai_like(
config,
key=provider_name,
name=provider_name.replace("_", " ").title(),
require_key=False,
ai_config=provider_config,
)
@classmethod
def for_ollama(cls, config: AiConfig) -> AnyProviderConfig:
default_base_url = "http://127.0.0.1:11434/v1"
return cls._for_openai_like(
config,
"ollama",
"Ollama",
fallback_key="ollama-placeholder",
fallback_base_url=default_base_url,
)
@classmethod
def for_github(cls, config: AiConfig) -> AnyProviderConfig:
fallback_key = cls.os_key("GITHUB_TOKEN")
result = cls._for_openai_like(
config,
"github",
"GitHub",
fallback_key=fallback_key,
# Default base URL for GitHub Copilot, taken from
fallback_base_url=GITHUB_COPILOT_BASE_URL,
require_key=True,
)
# Add default extra headers for GitHub, but allow user to override
default_headers = {
"editor-version": "vscode/1.95.0",
"Copilot-Integration-Id": "vscode-chat",
}
# Merge: user headers override defaults
if result.extra_headers:
merged_headers = {**default_headers, **result.extra_headers}
else:
merged_headers = default_headers
result.extra_headers = merged_headers
return result
@classmethod
def for_openrouter(cls, config: AiConfig) -> AnyProviderConfig:
fallback_key = cls.os_key("OPENROUTER_API_KEY")
return cls._for_openai_like(
config,
"openrouter",
"OpenRouter",
fallback_key=fallback_key,
# Default base URL for OpenRouter
fallback_base_url="https://openrouter.ai/api/v1/",
require_key=True,
)
@classmethod
def for_wandb(cls, config: AiConfig) -> AnyProviderConfig:
fallback_key = cls.os_key("WANDB_API_KEY")
return cls._for_openai_like(
config,
"wandb",
"Weights & Biases",
fallback_key=fallback_key,
# Default base URL for Weights & Biases
fallback_base_url="https://api.inference.wandb.ai/v1/",
require_key=True,
)
@classmethod
def _for_openai_like(
cls,
config: AiConfig,
key: str,
name: str,
*,
fallback_key: Optional[str] = None,
fallback_base_url: Optional[str] = None,
require_key: bool = False,
ai_config: dict[str, Any] | None = None,
) -> AnyProviderConfig:
ai_config = ai_config or _get_ai_config(config, key)
key = _get_key(
ai_config, name, fallback_key=fallback_key, require_key=require_key
)
# Use SSL_CERT_FILE environment variable as fallback for ca_bundle_path
ca_bundle_path = ai_config.get("ca_bundle_path") or cls.os_key(
"SSL_CERT_FILE"
)
kwargs: dict[str, Any] = {
"base_url": _get_base_url(ai_config) or fallback_base_url,
"api_key": key,
"project": ai_config.get("project", None),
"ssl_verify": ai_config.get("ssl_verify", True),
"ca_bundle_path": ca_bundle_path,
"client_pem": ai_config.get("client_pem", None),
"extra_headers": ai_config.get("extra_headers", None),
"tools": _get_tools(config.get("mode", "manual")),
}
return AnyProviderConfig(**kwargs)
@classmethod
def for_anthropic(cls, config: AiConfig) -> AnyProviderConfig:
ai_config = _get_ai_config(config, "anthropic")
fallback_key = cls.os_key("ANTHROPIC_API_KEY")
key = _get_key(
ai_config,
"Anthropic",
fallback_key=fallback_key,
require_key=True,
)
return cls(
base_url=_get_base_url(ai_config),
api_key=key,
tools=_get_tools(config.get("mode", "manual")),
)
@classmethod
def for_google(cls, config: AiConfig) -> AnyProviderConfig:
fallback_key = cls.os_key("GEMINI_API_KEY") or cls.os_key(
"GOOGLE_API_KEY"
)
ai_config = _get_ai_config(config, "google")
key = _get_key(
ai_config,
"Google AI",
fallback_key=fallback_key,
require_key=False,
)
return cls(
base_url=_get_base_url(ai_config),
api_key=key,
ssl_verify=True,
tools=_get_tools(config.get("mode", "manual")),
)
@classmethod
def for_bedrock(cls, config: AiConfig) -> AnyProviderConfig:
ai_config = _get_ai_config(config, "bedrock")
key = _get_key(ai_config, "Bedrock")
return cls(
base_url=_get_base_url(ai_config, "Bedrock"),
api_key=key,
tools=_get_tools(config.get("mode", "manual")),
)
@classmethod
def for_model(cls, model: str, config: AiConfig) -> AnyProviderConfig:
model_id = AiModelId.from_model(model)
if model_id.provider == "anthropic":
return cls.for_anthropic(config)
elif model_id.provider == "google":
return cls.for_google(config)
elif model_id.provider == "bedrock":
return cls.for_bedrock(config)
elif model_id.provider == "ollama":
return cls.for_ollama(config)
elif model_id.provider == "openai":
return cls.for_openai(config)
elif model_id.provider == "azure":
return cls.for_azure(config)
elif model_id.provider == "github":
return cls.for_github(config)
elif model_id.provider == "openrouter":
return cls.for_openrouter(config)
elif model_id.provider == "wandb":
return cls.for_wandb(config)
elif model_id.provider == "openai_compatible":
return cls.for_openai_compatible(config)
else:
custom_providers = cast(
dict[str, Any], config.get("custom_providers", {})
)
if model_id.provider in custom_providers:
return cls.for_custom_provider(config, model_id.provider)
# Catch-all: try OpenAI compatible first, then OpenAI.
try:
if "open_ai_compatible" in config:
return cls.for_openai_compatible(config)
return cls.for_openai(config)
except HTTPException:
return cls.for_openai(config)
@classmethod
def os_key(cls, key: str) -> Optional[str]:
import os
return os.environ.get(key)
def _get_tools(mode: CopilotMode) -> list[ToolDefinition]:
try:
tool_manager = get_tool_manager()
except ValueError:
# ToolManager may not be initialized in some tests or non-server contexts
return []
return tool_manager.get_tools_for_mode(mode)
def _get_ai_config(config: AiConfig, key: str) -> dict[str, Any]:
if key not in config:
return {}
return cast(dict[str, Any], config.get(key, {}))
def get_chat_model(config: AiConfig) -> str:
"""Get the chat model from the config."""
return (
# Current config
config.get("models", {}).get("chat_model")
# Legacy config
or config.get("open_ai", {}).get("model")
or DEFAULT_MODEL
)
def get_edit_model(config: AiConfig) -> str:
"""Get the edit model from the config."""
return config.get("models", {}).get("edit_model") or get_chat_model(config)
def get_autocomplete_model(
config: Union[MarimoConfig, PartialMarimoConfig],
) -> str:
"""Get the autocomplete model from the config."""
return (
# Current config
config.get("ai", {}).get("models", {}).get("autocomplete_model")
# Legacy config
or config.get("completion", {}).get("model")
or DEFAULT_MODEL
)
def get_max_tokens(config: MarimoConfig) -> int:
if "ai" not in config:
return DEFAULT_MAX_TOKENS
if "max_tokens" not in config["ai"]:
return DEFAULT_MAX_TOKENS
return config["ai"]["max_tokens"]
def _get_key(
config: Any,
name: str,
*,
fallback_key: Optional[str] = None,
require_key: bool = False,
) -> str:
"""Get the API key for a given provider."""
if not isinstance(config, dict):
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"Invalid config for {name}. Go to Settings > AI to configure.",
)
config = cast(dict[str, Any], config)
if name == "Bedrock":
if "profile_name" in config:
profile_name = config.get("profile_name", "")
return f"profile:{profile_name}"
elif (
"aws_access_key_id" in config and "aws_secret_access_key" in config
):
return f"{config['aws_access_key_id']}:{config['aws_secret_access_key']}"
else:
return ""
if "api_key" in config:
key = config["api_key"]
if key:
return cast(str, key)
if "http://127.0.0.1:11434/" in config.get("base_url", ""):
# Ollama can be configured and in that case the api key is not needed.
# We send a placeholder value to prevent the user from being confused.
return "ollama-placeholder"
if fallback_key:
return fallback_key
if require_key:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"{name} API key not configured. Go to Settings > AI to configure.",
)
return ""
def _get_base_url(config: Any, name: str = "") -> Optional[str]:
"""Get the base URL for a given provider."""
if not isinstance(config, dict):
if name:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"{name} is not configured. Go to Settings > AI to configure.",
)
else:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail="Invalid config. Go to Settings > AI to configure.",
)
if name == "Bedrock":
if "region_name" in config:
return cast(str, config["region_name"])
else:
return None
elif "base_url" in config:
return cast(str, config["base_url"])
return None
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/ai/config.py",
"license": "Apache License 2.0",
"lines": 359,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_server/ai/test_ai_config.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
from typing import Any, cast
from unittest.mock import patch
import pytest
from starlette.exceptions import HTTPException
from marimo._config.config import (
AiConfig,
MarimoConfig,
)
from marimo._dependencies.dependencies import DependencyManager
from marimo._server.ai.config import (
GITHUB_COPILOT_BASE_URL,
AnyProviderConfig,
_get_ai_config,
_get_base_url,
_get_key,
get_autocomplete_model,
get_chat_model,
get_edit_model,
get_max_tokens,
)
from marimo._server.ai.constants import DEFAULT_MAX_TOKENS, DEFAULT_MODEL
from marimo._server.ai.tools.types import ToolDefinition
from marimo._utils.http import HTTPStatus
class TestAnyProviderConfig:
"""Tests for AnyProviderConfig class."""
def test_for_openai_basic(self):
"""Test basic OpenAI configuration."""
config: AiConfig = {
"open_ai": {
"api_key": "test-openai-key",
"model": "gpt-4",
}
}
provider_config = AnyProviderConfig.for_openai(config)
assert provider_config.api_key == "test-openai-key"
assert provider_config.base_url is None
assert provider_config.ssl_verify is True
assert provider_config.ca_bundle_path is None
assert provider_config.client_pem is None
assert provider_config.extra_headers is None
def test_for_openai_with_base_url(self):
"""Test OpenAI configuration with custom base URL."""
config: AiConfig = {
"open_ai": {
"api_key": "test-key",
"base_url": "https://custom.openai.com",
"ssl_verify": False,
"ca_bundle_path": "/path/to/ca.pem",
"client_pem": "/path/to/client.pem",
"extra_headers": {"test-header": "test-value"},
}
}
provider_config = AnyProviderConfig.for_openai(config)
assert provider_config.api_key == "test-key"
assert provider_config.base_url == "https://custom.openai.com"
assert provider_config.ssl_verify is False
assert provider_config.ca_bundle_path == "/path/to/ca.pem"
assert provider_config.client_pem == "/path/to/client.pem"
assert provider_config.extra_headers == {"test-header": "test-value"}
def test_for_azure(self):
"""Test Azure OpenAI configuration."""
config: AiConfig = {
"azure": {
"api_key": "test-azure-key",
"base_url": "https://test.openai.azure.com",
}
}
provider_config = AnyProviderConfig.for_azure(config)
assert provider_config.api_key == "test-azure-key"
assert provider_config.base_url == "https://test.openai.azure.com"
def test_for_openai_compatible(self):
"""Test OpenAI compatible service configuration."""
config: AiConfig = {
"open_ai_compatible": {
"api_key": "test-compatible-key",
"base_url": "https://compatible.service.com",
}
}
provider_config = AnyProviderConfig.for_openai_compatible(config)
assert provider_config.api_key == "test-compatible-key"
assert provider_config.base_url == "https://compatible.service.com"
def test_for_ollama(self):
"""Test Ollama configuration."""
config: AiConfig = {
"ollama": {
"api_key": "test-ollama-key",
"base_url": "http://localhost:11434",
}
}
provider_config = AnyProviderConfig.for_ollama(config)
assert provider_config.api_key == "test-ollama-key"
assert provider_config.base_url == "http://localhost:11434"
def test_for_ollama_empty(self):
config: AiConfig = {}
provider_config = AnyProviderConfig.for_ollama(config)
assert isinstance(provider_config, AnyProviderConfig)
assert provider_config.api_key == "ollama-placeholder"
assert provider_config.base_url == "http://127.0.0.1:11434/v1"
def test_for_ollama_placeholder_key(self):
"""Test Ollama configuration with default URL gets placeholder key."""
config: AiConfig = {
"ollama": {
"base_url": "http://127.0.0.1:11434/",
}
}
provider_config = AnyProviderConfig.for_ollama(config)
assert provider_config.api_key == "ollama-placeholder"
def test_for_ollama_fallback_url(self):
"""Test Ollama configuration with fallback base URL."""
config: AiConfig = {"ollama": {}}
provider_config = AnyProviderConfig.for_ollama(config)
assert provider_config.api_key == "ollama-placeholder"
assert provider_config.base_url == "http://127.0.0.1:11434/v1"
def test_for_github(self):
"""Test GitHub configuration."""
config: AiConfig = {
"github": {
"api_key": "test-github-key",
"base_url": "https://some-base-url",
}
}
provider_config = AnyProviderConfig.for_github(config)
assert provider_config.api_key == "test-github-key"
assert provider_config.base_url == "https://some-base-url"
def test_for_github_with_fallback_base_url(self):
"""Test GitHub configuration uses fallback base URL when not specified."""
config: AiConfig = {
"github": {
"api_key": "test-github-key",
}
}
provider_config = AnyProviderConfig.for_github(config)
assert provider_config.api_key == "test-github-key"
assert provider_config.base_url == "https://models.github.ai/inference"
@pytest.mark.skipif(
not DependencyManager.pydantic_ai.has(),
reason="pydantic-ai is not installed",
)
def test_github_default_base_url_matches_pydantic_ai(self):
"""Test GitHub configuration base URL matches pydantic-ai."""
from pydantic_ai.providers.github import GitHubProvider
assert (
GitHubProvider(api_key="dummy").base_url == GITHUB_COPILOT_BASE_URL
)
def test_for_github_default_extra_headers(self):
"""Test GitHub configuration includes default extra headers."""
config: AiConfig = {
"github": {
"api_key": "test-github-key",
}
}
provider_config = AnyProviderConfig.for_github(config)
assert provider_config.extra_headers is not None
assert (
provider_config.extra_headers["editor-version"] == "vscode/1.95.0"
)
assert (
provider_config.extra_headers["Copilot-Integration-Id"]
== "vscode-chat"
)
def test_for_github_user_headers_override_defaults(self):
"""Test GitHub configuration allows user headers to override defaults."""
config: AiConfig = {
"github": {
"api_key": "test-github-key",
"extra_headers": {
"editor-version": "custom-editor/2.0.0",
"X-Custom-Header": "custom-value",
},
}
}
provider_config = AnyProviderConfig.for_github(config)
assert provider_config.extra_headers is not None
# User header should override default
assert (
provider_config.extra_headers["editor-version"]
== "custom-editor/2.0.0"
)
# Default header not overridden should remain
assert (
provider_config.extra_headers["Copilot-Integration-Id"]
== "vscode-chat"
)
# Custom user header should be preserved
assert (
provider_config.extra_headers["X-Custom-Header"] == "custom-value"
)
def test_for_github_with_copilot_settings(self):
"""Test GitHub configuration with copilot_settings is accepted."""
config: AiConfig = {
"github": {
"api_key": "test-github-key",
"copilot_settings": {
"http": {
"proxy": "http://proxy.example.com:8888",
"proxyStrictSSL": True,
},
"telemetry": {"telemetryLevel": "off"},
},
}
}
# Should not raise an error - copilot_settings is a valid field
provider_config = AnyProviderConfig.for_github(config)
# Note: copilot_settings is stored in config but not used by AnyProviderConfig
# It's used by the frontend LSP client
assert provider_config.api_key == "test-github-key"
assert provider_config.base_url == "https://models.github.ai/inference"
def test_for_openrouter(self):
"""Test OpenRouter configuration."""
config: AiConfig = {
"openrouter": {
"api_key": "test-openrouter-key",
"base_url": "https://openrouter.ai/api/v1/",
}
}
provider_config = AnyProviderConfig.for_openrouter(config)
assert provider_config.api_key == "test-openrouter-key"
assert provider_config.base_url == "https://openrouter.ai/api/v1/"
def test_for_openrouter_with_fallback_base_url(self):
"""Test OpenRouter configuration uses fallback base URL when not specified."""
config: AiConfig = {
"openrouter": {
"api_key": "test-openrouter-key",
}
}
provider_config = AnyProviderConfig.for_openrouter(config)
assert provider_config.api_key == "test-openrouter-key"
assert provider_config.base_url == "https://openrouter.ai/api/v1/"
def test_for_wandb(self):
"""Test Weights & Biases configuration."""
config: AiConfig = {
"wandb": {
"api_key": "test-wandb-key",
"base_url": "https://api.inference.wandb.ai/v1/",
}
}
provider_config = AnyProviderConfig.for_wandb(config)
assert provider_config.api_key == "test-wandb-key"
assert provider_config.base_url == "https://api.inference.wandb.ai/v1/"
def test_for_wandb_with_fallback_base_url(self):
"""Test Weights & Biases configuration uses fallback base URL when not specified."""
config: AiConfig = {
"wandb": {
"api_key": "test-wandb-key",
}
}
provider_config = AnyProviderConfig.for_wandb(config)
assert provider_config.api_key == "test-wandb-key"
assert provider_config.base_url == "https://api.inference.wandb.ai/v1/"
def test_for_wandb_with_project(self):
"""Test Weights & Biases configuration with project field."""
config: AiConfig = {
"wandb": {
"api_key": "test-wandb-key",
"project": "my-project",
}
}
provider_config = AnyProviderConfig.for_wandb(config)
assert provider_config.api_key == "test-wandb-key"
assert provider_config.project == "my-project"
def test_for_openai_with_project(self):
"""Test OpenAI configuration with project field."""
config: AiConfig = {
"open_ai": {
"api_key": "test-openai-key",
"project": "my-openai-project",
}
}
provider_config = AnyProviderConfig.for_openai(config)
assert provider_config.api_key == "test-openai-key"
assert provider_config.project == "my-openai-project"
def test_for_anthropic(self):
"""Test Anthropic configuration."""
config: AiConfig = {
"anthropic": {
"api_key": "test-anthropic-key",
}
}
provider_config = AnyProviderConfig.for_anthropic(config)
assert provider_config.api_key == "test-anthropic-key"
assert provider_config.base_url is None
def test_for_google(self):
"""Test Google AI configuration."""
config: AiConfig = {
"google": {
"api_key": "test-google-key",
}
}
provider_config = AnyProviderConfig.for_google(config)
assert provider_config.api_key == "test-google-key"
assert provider_config.base_url is None
def test_for_bedrock_with_profile(self):
"""Test Bedrock configuration with profile name."""
config: AiConfig = {
"bedrock": {
"profile_name": "test-profile",
"region_name": "us-east-1",
}
}
provider_config = AnyProviderConfig.for_bedrock(config)
assert provider_config.api_key == "profile:test-profile"
assert provider_config.base_url == "us-east-1"
def test_for_bedrock_with_credentials(self):
"""Test Bedrock configuration with AWS credentials."""
config: AiConfig = {
"bedrock": {
"aws_access_key_id": "test-access-key",
"aws_secret_access_key": "test-secret-key",
"region_name": "us-west-2",
}
}
provider_config = AnyProviderConfig.for_bedrock(config)
assert provider_config.api_key == "test-access-key:test-secret-key"
assert provider_config.base_url == "us-west-2"
def test_for_model_openai(self) -> None:
"""Test for_model with OpenAI model."""
config: AiConfig = {"open_ai": {"api_key": "test-key"}}
provider_config = AnyProviderConfig.for_model("gpt-4", config)
assert provider_config.api_key == "test-key"
def test_for_model_anthropic(self) -> None:
"""Test for_model with Anthropic model."""
config: AiConfig = {"anthropic": {"api_key": "test-anthropic-key"}}
provider_config = AnyProviderConfig.for_model("claude-3-opus", config)
assert provider_config.api_key == "test-anthropic-key"
def test_for_model_github(self) -> None:
"""Test for_model with GitHub model."""
config: AiConfig = {"github": {"api_key": "test-github-key"}}
provider_config = AnyProviderConfig.for_model("github/gpt-4o", config)
assert provider_config.api_key == "test-github-key"
def test_for_model_openrouter(self) -> None:
"""Test for_model with OpenRouter model."""
config: AiConfig = {"openrouter": {"api_key": "test-openrouter-key"}}
provider_config = AnyProviderConfig.for_model(
"openrouter/gpt-4", config
)
assert provider_config.api_key == "test-openrouter-key"
assert provider_config.base_url == "https://openrouter.ai/api/v1/"
def test_for_model_wandb(self) -> None:
"""Test for_model with Weights & Biases model."""
config: AiConfig = {"wandb": {"api_key": "test-wandb-key"}}
provider_config = AnyProviderConfig.for_model("wandb/llama-3", config)
assert provider_config.api_key == "test-wandb-key"
assert provider_config.base_url == "https://api.inference.wandb.ai/v1/"
def test_for_model_unknown_defaults_to_ollama(self) -> None:
"""Test for_model with unknown provider defaults to Ollama."""
config: AiConfig = {"ollama": {"api_key": "test-key"}}
provider_config = AnyProviderConfig.for_model("unknown-model", config)
assert provider_config.api_key == "test-key"
def test_for_model_unknown_provider_defaults_to_openai_compatible(
self,
) -> None:
"""Test for_model with unknown provider defaults to OpenAI compatible."""
config: AiConfig = {
"open_ai_compatible": {"api_key": "test-key"},
"open_ai": {"api_key": "other-key"},
}
provider_config = AnyProviderConfig.for_model(
"provider/unknown-model", config
)
assert provider_config.api_key == "test-key"
# Fallback to OpenAI if OpenAI compatible is not configured
config: AiConfig = {
"open_ai": {"api_key": "other-key"},
}
provider_config = AnyProviderConfig.for_model(
"provider/unknown-model", config
)
assert provider_config.api_key == "other-key"
@patch("marimo._server.ai.config._get_tools")
def test_tools_included_when_available(self, mock_get_tools: Any) -> None:
"""Test that tools are included when available."""
mock_tool = ToolDefinition(
name="test_tool",
description="Test tool",
parameters={},
source="backend",
mode=["manual"],
)
mock_get_tools.return_value = [mock_tool]
config: AiConfig = {
"open_ai": {"api_key": "test-key"},
"mode": "manual",
}
provider_config = AnyProviderConfig.for_openai(config)
assert len(provider_config.tools) == 1
assert provider_config.tools[0] == mock_tool
@patch("marimo._server.ai.config._get_tools")
def test_tools_excluded_when_empty(self, mock_get_tools: Any) -> None:
"""Test that tools are excluded when empty to prevent errors with deepseek."""
mock_get_tools.return_value = []
config: AiConfig = {
"open_ai": {"api_key": "test-key"},
"mode": "manual",
}
provider_config = AnyProviderConfig.for_openai(config)
assert provider_config.tools is None
class TestOsKey:
"""Tests for os_key method."""
@patch.dict(os.environ, {"OPENAI_API_KEY": "test-api-key"})
def test_os_key_exists(self) -> None:
"""Test os_key returns value when environment variable exists."""
result = AnyProviderConfig.os_key("OPENAI_API_KEY")
assert result == "test-api-key"
@patch.dict(os.environ, {}, clear=True)
def test_os_key_not_exists(self) -> None:
"""Test os_key returns None when environment variable doesn't exist."""
result = AnyProviderConfig.os_key("NONEXISTENT_KEY")
assert result is None
@patch.dict(os.environ, {"EMPTY_KEY": ""})
def test_os_key_empty_string(self) -> None:
"""Test os_key returns empty string when environment variable is empty."""
result = AnyProviderConfig.os_key("EMPTY_KEY")
assert result == ""
class TestProviderConfigWithFallback:
"""Tests for provider config methods with OS environment fallback."""
@patch.dict(os.environ, {"OPENAI_API_KEY": "env-openai-key"})
def test_for_openai_with_fallback_key(self) -> None:
"""Test OpenAI config uses fallback key when config is missing api_key."""
config: AiConfig = {"open_ai": {}}
provider_config = AnyProviderConfig.for_openai(config)
assert provider_config.api_key == "env-openai-key"
@patch.dict(os.environ, {"OPENAI_API_KEY": "env-openai-key"})
def test_for_openai_empty(self) -> None:
"""Test OpenAI config uses fallback key when config is missing api_key and config is empty."""
config: AiConfig = {}
provider_config = AnyProviderConfig.for_openai(config)
assert provider_config.api_key == "env-openai-key"
@patch.dict(os.environ, {"OPENAI_API_KEY": "env-openai-key"})
def test_for_openai_config_key_takes_precedence(self) -> None:
"""Test OpenAI config key takes precedence over environment variable."""
config: AiConfig = {"open_ai": {"api_key": "config-openai-key"}}
provider_config = AnyProviderConfig.for_openai(config)
assert provider_config.api_key == "config-openai-key"
@patch.dict(os.environ, {}, clear=True)
def test_for_openai_no_fallback_available(self) -> None:
"""Test OpenAI config fails when no config key and no env var."""
config: AiConfig = {"open_ai": {}}
with pytest.raises(HTTPException) as exc_info:
AnyProviderConfig.for_openai(config)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "OpenAI API key not configured" in str(exc_info.value.detail)
@patch.dict(os.environ, {"AZURE_API_KEY": "env-azure-key"})
def test_for_azure_with_fallback_key(self) -> None:
"""Test Azure config uses fallback key when config is missing api_key."""
config: AiConfig = {
"azure": {"base_url": "https://test.openai.azure.com"}
}
provider_config = AnyProviderConfig.for_azure(config)
assert provider_config.api_key == "env-azure-key"
@patch.dict(os.environ, {"AZURE_API_KEY": "env-azure-key"})
def test_for_azure_config_key_takes_precedence(self) -> None:
"""Test Azure config key takes precedence over environment variable."""
config: AiConfig = {
"azure": {
"api_key": "config-azure-key",
"base_url": "https://test.openai.azure.com",
}
}
provider_config = AnyProviderConfig.for_azure(config)
assert provider_config.api_key == "config-azure-key"
@patch.dict(os.environ, {"ANTHROPIC_API_KEY": "env-anthropic-key"})
def test_for_anthropic_with_fallback_key(self) -> None:
"""Test Anthropic config uses fallback key when config is missing api_key."""
config: AiConfig = {"anthropic": {}}
provider_config = AnyProviderConfig.for_anthropic(config)
assert provider_config.api_key == "env-anthropic-key"
@patch.dict(os.environ, {"ANTHROPIC_API_KEY": "env-anthropic-key"})
def test_for_anthropic_config_key_takes_precedence(self) -> None:
"""Test Anthropic config key takes precedence over environment variable."""
config: AiConfig = {"anthropic": {"api_key": "config-anthropic-key"}}
provider_config = AnyProviderConfig.for_anthropic(config)
assert provider_config.api_key == "config-anthropic-key"
@patch.dict(os.environ, {"GEMINI_API_KEY": "env-gemini-key"})
def test_for_google_with_gemini_fallback_key(self) -> None:
"""Test Google config uses GEMINI_API_KEY fallback when config is missing api_key."""
config: AiConfig = {"google": {}}
provider_config = AnyProviderConfig.for_google(config)
assert provider_config.api_key == "env-gemini-key"
@patch.dict(os.environ, {"GOOGLE_API_KEY": "env-google-key"}, clear=True)
def test_for_google_with_google_fallback_key(self) -> None:
"""Test Google config uses GOOGLE_API_KEY fallback when GEMINI_API_KEY is not available."""
config: AiConfig = {"google": {}}
provider_config = AnyProviderConfig.for_google(config)
assert provider_config.api_key == "env-google-key"
@patch.dict(
os.environ,
{
"GEMINI_API_KEY": "env-gemini-key",
"GOOGLE_API_KEY": "env-google-key",
},
)
def test_for_google_gemini_takes_precedence_over_google(self) -> None:
"""Test Google config prefers GEMINI_API_KEY over GOOGLE_API_KEY."""
config: AiConfig = {"google": {}}
provider_config = AnyProviderConfig.for_google(config)
assert provider_config.api_key == "env-gemini-key"
@patch.dict(os.environ, {"GEMINI_API_KEY": "env-gemini-key"})
def test_for_google_config_key_takes_precedence(self) -> None:
"""Test Google config key takes precedence over environment variables."""
config: AiConfig = {"google": {"api_key": "config-google-key"}}
provider_config = AnyProviderConfig.for_google(config)
assert provider_config.api_key == "config-google-key"
@patch.dict(os.environ, {}, clear=True)
def test_for_google_no_fallback_available(self) -> None:
"""Test Google config succeeds with empty key when no env vars."""
config: AiConfig = {"google": {}}
provider_config = AnyProviderConfig.for_google(config)
assert provider_config == AnyProviderConfig(
base_url=None,
api_key="",
ssl_verify=True,
)
@patch.dict(os.environ, {"GITHUB_TOKEN": "env-github-token"})
def test_for_github_with_fallback_key(self) -> None:
"""Test GitHub config uses fallback key when config is missing api_key."""
config: AiConfig = {"github": {}}
provider_config = AnyProviderConfig.for_github(config)
assert provider_config.api_key == "env-github-token"
@patch.dict(os.environ, {"GITHUB_TOKEN": "env-github-token"})
def test_for_github_config_key_takes_precedence(self) -> None:
"""Test GitHub config key takes precedence over environment variable."""
config: AiConfig = {"github": {"api_key": "config-github-token"}}
provider_config = AnyProviderConfig.for_github(config)
assert provider_config.api_key == "config-github-token"
@patch.dict(os.environ, {}, clear=True)
def test_for_github_no_fallback_available(self) -> None:
"""Test GitHub config fails when no config key and no env var."""
config: AiConfig = {"github": {}}
with pytest.raises(HTTPException) as exc_info:
AnyProviderConfig.for_github(config)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "GitHub API key not configured" in str(exc_info.value.detail)
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "env-openrouter-token"})
def test_for_openrouter_with_fallback_key(self) -> None:
"""Test OpenRouter config uses fallback key when config is missing api_key."""
config: AiConfig = {"openrouter": {}}
provider_config = AnyProviderConfig.for_openrouter(config)
assert provider_config.api_key == "env-openrouter-token"
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "env-openrouter-token"})
def test_for_openrouter_config_key_takes_precedence(self) -> None:
"""Test OpenRouter config key takes precedence over environment variable."""
config: AiConfig = {
"openrouter": {"api_key": "config-openrouter-token"}
}
provider_config = AnyProviderConfig.for_openrouter(config)
assert provider_config.api_key == "config-openrouter-token"
@patch.dict(os.environ, {}, clear=True)
def test_for_openrouter_no_fallback_available(self) -> None:
"""Test OpenRouter config fails when no config key and no env var."""
config: AiConfig = {"openrouter": {}}
with pytest.raises(HTTPException) as exc_info:
AnyProviderConfig.for_openrouter(config)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "OpenRouter API key not configured" in str(
exc_info.value.detail
)
@patch.dict(os.environ, {"WANDB_API_KEY": "env-wandb-token"})
def test_for_wandb_with_fallback_key(self) -> None:
"""Test Weights & Biases config uses fallback key when config is missing api_key."""
config: AiConfig = {"wandb": {}}
provider_config = AnyProviderConfig.for_wandb(config)
assert provider_config.api_key == "env-wandb-token"
@patch.dict(os.environ, {"WANDB_API_KEY": "env-wandb-token"})
def test_for_wandb_config_key_takes_precedence(self) -> None:
"""Test Weights & Biases config key takes precedence over environment variable."""
config: AiConfig = {"wandb": {"api_key": "config-wandb-token"}}
provider_config = AnyProviderConfig.for_wandb(config)
assert provider_config.api_key == "config-wandb-token"
@patch.dict(os.environ, {}, clear=True)
def test_for_wandb_no_fallback_available(self) -> None:
"""Test Weights & Biases config fails when no config key and no env var."""
config: AiConfig = {"wandb": {}}
with pytest.raises(HTTPException) as exc_info:
AnyProviderConfig.for_wandb(config)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "Weights & Biases API key not configured" in str(
exc_info.value.detail
)
class TestGetKey:
"""Tests for _get_key function."""
def test_get_key_with_api_key(self):
"""Test getting API key from config."""
config = {"api_key": "test-key"}
result = _get_key(config, "Test Service")
assert result == "test-key"
def test_get_key_bedrock_profile(self):
"""Test getting Bedrock key with profile name."""
config = {"profile_name": "aws-profile"}
result = _get_key(config, "Bedrock")
assert result == "profile:aws-profile"
def test_get_key_bedrock_credentials(self):
"""Test getting Bedrock key with AWS credentials."""
config = {
"aws_access_key_id": "access-key",
"aws_secret_access_key": "secret-key",
}
result = _get_key(config, "Bedrock")
assert result == "access-key:secret-key"
def test_get_key_bedrock_fallback(self):
"""Test Bedrock key fallback when no credentials."""
config = {}
result = _get_key(config, "Bedrock")
assert result == ""
def test_get_key_ollama_placeholder(self):
"""Test Ollama gets placeholder key for local URL."""
config = {"base_url": "http://127.0.0.1:11434/"}
result = _get_key(config, "Ollama")
assert result == "ollama-placeholder"
def test_get_key_invalid_config(self):
"""Test error when config is not a dict."""
with pytest.raises(HTTPException) as exc_info:
_get_key("invalid", "Test Service")
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "Invalid config" in str(exc_info.value.detail)
def test_get_key_missing_api_key(self):
"""Test error when API key is missing."""
config = {}
assert _get_key(config, "Test Service") == ""
def test_get_key_empty_api_key(self):
"""Test error when API key is empty."""
config = {"api_key": ""}
assert _get_key(config, "Test Service") == ""
def test_get_key_none_api_key(self):
"""Test error when API key is None."""
config = {"api_key": None}
assert _get_key(config, "Test Service") == ""
def test_get_key_with_fallback_key(self):
"""Test using fallback key when api_key is missing."""
config = {}
result = _get_key(config, "Test Service", fallback_key="fallback-key")
assert result == "fallback-key"
def test_get_key_with_fallback_key_empty_api_key(self):
"""Test using fallback key when api_key is empty."""
config = {"api_key": ""}
result = _get_key(config, "Test Service", fallback_key="fallback-key")
assert result == "fallback-key"
def test_get_key_with_fallback_key_none_api_key(self):
"""Test using fallback key when api_key is None."""
config = {"api_key": None}
result = _get_key(config, "Test Service", fallback_key="fallback-key")
assert result == "fallback-key"
def test_get_key_config_takes_precedence_over_fallback(self):
"""Test that config api_key takes precedence over fallback_key."""
config = {"api_key": "config-key"}
result = _get_key(config, "Test Service", fallback_key="fallback-key")
assert result == "config-key"
def test_get_key_no_fallback_key_provided(self):
"""Test error when no fallback key provided and api_key missing."""
config = {}
assert _get_key(config, "Test Service", fallback_key=None) == ""
def test_get_key_empty_fallback_key(self):
"""Test error when fallback key is empty string."""
config = {}
assert _get_key(config, "Test Service", fallback_key="") == ""
def test_get_key_bedrock_profile_ignores_fallback(self):
"""Test that Bedrock profile handling ignores fallback key."""
config = {"profile_name": "aws-profile"}
result = _get_key(config, "Bedrock", fallback_key="fallback-key")
assert result == "profile:aws-profile"
def test_get_key_bedrock_credentials_ignores_fallback(self):
"""Test that Bedrock credentials handling ignores fallback key."""
config = {
"aws_access_key_id": "access-key",
"aws_secret_access_key": "secret-key",
}
result = _get_key(config, "Bedrock", fallback_key="fallback-key")
assert result == "access-key:secret-key"
def test_get_key_ollama_placeholder_ignores_fallback(self):
"""Test that Ollama placeholder handling ignores fallback key."""
config = {"base_url": "http://127.0.0.1:11434/"}
result = _get_key(config, "Ollama", fallback_key="fallback-key")
assert result == "ollama-placeholder"
class TestGetBaseUrl:
"""Tests for _get_base_url function."""
def test_get_base_url_with_url(self):
"""Test getting base URL from config."""
config = {"base_url": "https://api.example.com"}
result = _get_base_url(config)
assert result == "https://api.example.com"
def test_get_base_url_bedrock_region(self):
"""Test getting Bedrock base URL from region."""
config = {"region_name": "us-east-1"}
result = _get_base_url(config, "Bedrock")
assert result == "us-east-1"
def test_get_base_url_bedrock_without_name_param(self):
"""Test that Bedrock base URL is None when name param is not passed."""
config = {"region_name": "us-east-1"}
result = _get_base_url(config) # No name parameter
assert result is None
def test_get_base_url_bedrock_no_region(self):
"""Test Bedrock base URL when no region specified."""
config = {}
result = _get_base_url(config, "Bedrock")
assert result is None
def test_get_base_url_missing(self):
"""Test when base URL is not in config."""
config = {}
result = _get_base_url(config)
assert result is None
def test_get_base_url_invalid_config(self):
"""Test error when config is not a dict."""
with pytest.raises(HTTPException) as exc_info:
_get_base_url("invalid")
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "Invalid config" in str(exc_info.value.detail)
class TestGetAiConfig:
"""Tests for _get_ai_config function."""
def test_get_ai_config_success(self):
"""Test successful retrieval of AI config."""
config: AiConfig = {"open_ai": {"api_key": "test-key"}}
result = _get_ai_config(config, "open_ai")
assert result == {"api_key": "test-key"}
def test_get_ai_config_missing_key(self):
"""Test that _get_ai_config returns empty dict when AI config key is missing."""
config: AiConfig = {}
result = _get_ai_config(config, "open_ai")
assert result == {}
def test_get_ai_config_empty_tools(self):
"""Test that _get_ai_config returns empty dict when AI config key is missing."""
config: AiConfig = {
"open_ai": {"api_key": "test-key"},
"mode": "manual",
}
result = _get_ai_config(config, "open_ai")
assert result == {"api_key": "test-key"}
class TestUtilityFunctions:
"""Tests for utility functions."""
def test_get_model_with_openai_config(self):
"""Test getting model from OpenAI config."""
config: AiConfig = {
"models": {
"chat_model": "gpt-4",
"edit_model": "gpt-5",
"displayed_models": [],
"custom_models": [],
},
"open_ai": {"api_key": "test-key"},
}
result = get_chat_model(config)
assert result == "gpt-4"
result = get_edit_model(config)
assert result == "gpt-5"
def test_get_model_default(self):
"""Test getting default model when not specified."""
config: AiConfig = {
"models": {
"displayed_models": [],
"custom_models": [],
},
"open_ai": {"api_key": "test-key"},
}
result = get_chat_model(config)
assert result == DEFAULT_MODEL
result = get_edit_model(config)
assert result == DEFAULT_MODEL
result = get_autocomplete_model({"ai": config})
assert result == DEFAULT_MODEL
def test_get_max_tokens_from_config(self):
"""Test getting max tokens from config."""
config = cast(
MarimoConfig,
{
"ai": {"max_tokens": 2048},
},
)
result = get_max_tokens(config)
assert result == 2048
def test_get_max_tokens_default_no_ai_config(self):
"""Test getting default max tokens when no AI config."""
config = cast(
MarimoConfig,
{
"completion": {"activate_on_typing": True, "copilot": False},
},
)
result = get_max_tokens(config)
assert result == DEFAULT_MAX_TOKENS
def test_get_max_tokens_default_no_max_tokens(self):
"""Test getting default max tokens when max_tokens not specified."""
config = cast(
MarimoConfig,
{
"ai": {},
},
)
result = get_max_tokens(config)
assert result == DEFAULT_MAX_TOKENS
def test_get_autocomplete_model(self) -> None:
"""Test get_autocomplete_model with new ai.models.autocomplete_model config."""
config: AiConfig = {
"models": {
"chat_model": "openai/gpt-4o",
"edit_model": "openai/gpt-4o-mini",
"autocomplete_model": "openai/gpt-3.5-turbo-instruct",
"displayed_models": [],
"custom_models": [],
}
}
assert (
get_autocomplete_model({"ai": config})
== "openai/gpt-3.5-turbo-instruct"
)
def test_get_chat_model(self) -> None:
"""Test get_chat_model with new ai.models.chat_model config."""
config: AiConfig = {
"models": {
"chat_model": "anthropic/claude-3-5-sonnet-20241022",
"edit_model": "openai/gpt-4o-mini",
"displayed_models": [],
"custom_models": [],
}
}
assert get_chat_model(config) == "anthropic/claude-3-5-sonnet-20241022"
def test_get_edit_model(self) -> None:
"""Test get_edit_model with new ai.models.edit_model config."""
config: AiConfig = {
"models": {
"chat_model": "openai/gpt-4o",
"edit_model": "anthropic/claude-3-5-haiku-20241022",
"displayed_models": [],
"custom_models": [],
}
}
assert get_edit_model(config) == "anthropic/claude-3-5-haiku-20241022"
def test_get_edit_model_fallback_to_chat_model(self) -> None:
"""Test get_edit_model falls back to chat_model when edit_model is not set."""
config: AiConfig = {
"models": {
"chat_model": "openai/gpt-4o",
"displayed_models": [],
"custom_models": [],
# Note: no edit_model
}
}
assert get_edit_model(config) == "openai/gpt-4o"
def test_get_models_with_legacy_openai_config(self) -> None:
"""Test that the new get_*_model functions work with legacy open_ai.model config."""
config: AiConfig = {
"open_ai": {
"api_key": "test-key",
"model": "gpt-4-legacy",
}
}
# Should fall back to open_ai.model for both chat and edit
assert get_chat_model(config) == "gpt-4-legacy"
assert get_edit_model(config) == "gpt-4-legacy"
assert get_autocomplete_model({"ai": config}) == DEFAULT_MODEL
def test_for_model_with_autocomplete_model(self) -> None:
"""Test AnyProviderConfig.for_model works with autocomplete models from new config."""
config: AiConfig = {
"open_ai": {"api_key": "test-key"},
"models": {
"autocomplete_model": "openai/gpt-3.5-turbo-instruct",
"displayed_models": [],
"custom_models": [],
},
}
provider_config = AnyProviderConfig.for_model(
"openai/gpt-3.5-turbo-instruct", config
)
assert provider_config.api_key == "test-key"
assert provider_config.tools is None
class TestSSLConfiguration:
"""Tests for SSL configuration across all OpenAI-like providers."""
@pytest.mark.parametrize(
("provider_name", "provider_method", "api_key_config"),
[
("openai", "for_openai", {"open_ai": {"api_key": "test-key"}}),
("github", "for_github", {"github": {"api_key": "test-key"}}),
("ollama", "for_ollama", {"ollama": {"api_key": "test-key"}}),
],
)
def test_ssl_config_from_provider_config(
self,
provider_name: str,
provider_method: str,
api_key_config: AiConfig,
) -> None:
"""Test SSL configuration is read from provider config."""
# Get the provider key from api_key_config
provider_key = next(iter(api_key_config.keys()))
config: AiConfig = {
**api_key_config,
}
config[provider_key]["ssl_verify"] = False
config[provider_key]["ca_bundle_path"] = "/custom/path/to/ca.pem"
config[provider_key]["client_pem"] = "/custom/path/to/client.pem"
config[provider_key]["extra_headers"] = {"X-Custom": "header"}
method = getattr(AnyProviderConfig, provider_method)
provider_config = method(config)
assert provider_config.ssl_verify is False, (
f"{provider_name}: ssl_verify should be False"
)
assert provider_config.ca_bundle_path == "/custom/path/to/ca.pem", (
f"{provider_name}: ca_bundle_path should match"
)
assert provider_config.client_pem == "/custom/path/to/client.pem", (
f"{provider_name}: client_pem should match"
)
# GitHub includes default headers that are merged with user headers
if provider_name == "github":
assert provider_config.extra_headers is not None
assert "X-Custom" in provider_config.extra_headers
assert provider_config.extra_headers["X-Custom"] == "header"
# GitHub should also include default headers
assert "editor-version" in provider_config.extra_headers
assert "Copilot-Integration-Id" in provider_config.extra_headers
else:
assert provider_config.extra_headers == {"X-Custom": "header"}, (
f"{provider_name}: extra_headers should match"
)
@pytest.mark.parametrize(
("provider_name", "provider_method", "api_key_config"),
[
("openai", "for_openai", {"open_ai": {"api_key": "test-key"}}),
("github", "for_github", {"github": {"api_key": "test-key"}}),
("ollama", "for_ollama", {"ollama": {"api_key": "test-key"}}),
],
)
@patch.dict(os.environ, {"SSL_CERT_FILE": "/env/path/to/ca.pem"})
def test_ssl_cert_file_fallback(
self,
provider_name: str,
provider_method: str,
api_key_config: AiConfig,
) -> None:
"""Test SSL_CERT_FILE environment variable is used as fallback."""
config: AiConfig = {**api_key_config}
method = getattr(AnyProviderConfig, provider_method)
provider_config = method(config)
assert provider_config.ca_bundle_path == "/env/path/to/ca.pem", (
f"{provider_name}: should use SSL_CERT_FILE env var as fallback"
)
class TestEdgeCases:
"""Tests for edge cases and error conditions."""
def test_openai_config_missing(self):
"""Test error when OpenAI config is missing."""
config: AiConfig = {}
with pytest.raises(HTTPException) as exc_info:
AnyProviderConfig.for_openai(config)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "OpenAI API key not configured" in str(exc_info.value.detail)
def test_anthropic_config_missing(self):
"""Test error when Anthropic config is missing."""
config: AiConfig = {}
with pytest.raises(HTTPException) as exc_info:
AnyProviderConfig.for_anthropic(config)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "Anthropic API key not configured" in str(exc_info.value.detail)
def test_google_config_missing(self):
"""Test Google config defaults to empty key when config is missing."""
config: AiConfig = {}
provider_config = AnyProviderConfig.for_google(config)
assert provider_config == AnyProviderConfig(
base_url=None,
api_key="",
ssl_verify=True,
)
def test_bedrock_config_missing(self):
"""Test when Bedrock config is missing, should not error since could use environment variables."""
config: AiConfig = {}
provider_config = AnyProviderConfig.for_bedrock(config)
assert provider_config == AnyProviderConfig(
base_url=None,
api_key="",
)
def test_azure_config_missing(self):
"""Test error when Azure config is missing."""
config: AiConfig = {}
with pytest.raises(HTTPException) as exc_info:
AnyProviderConfig.for_azure(config)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "Azure OpenAI API key not configured" in str(
exc_info.value.detail
)
def test_ollama_config_missing(self):
"""Test should not error when Ollama config is missing."""
config: AiConfig = {}
provider_config = AnyProviderConfig.for_ollama(config)
assert provider_config == AnyProviderConfig(
base_url="http://127.0.0.1:11434/v1",
api_key="ollama-placeholder",
ssl_verify=True,
)
def test_openai_compatible_config_missing(self):
"""Test error when OpenAI Compatible config is missing."""
config: AiConfig = {}
assert AnyProviderConfig.for_openai_compatible(
config
) == AnyProviderConfig(base_url=None, api_key="", ssl_verify=True)
def test_github_config_missing(self):
"""Test error when GitHub config is missing."""
config: AiConfig = {}
with pytest.raises(HTTPException) as exc_info:
AnyProviderConfig.for_github(config)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "GitHub API key not configured" in str(exc_info.value.detail)
def test_tools_empty_list(self):
"""Test that tools are not included when empty list."""
provider_config = AnyProviderConfig(
tools=[],
api_key="test-key",
base_url="test-base-url",
)
assert provider_config.tools is None
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/ai/test_ai_config.py",
"license": "Apache License 2.0",
"lines": 1008,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.