sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
stanfordnlp/dspy:tests/utils/test_syncify.py | import asyncio
import dspy
def test_syncify_in_place():
class MyProgram(dspy.Module):
async def aforward(self, x: int) -> int:
await asyncio.sleep(0.01)
return x + 1
sync_program = dspy.syncify(MyProgram())
assert sync_program(1) == 2
assert sync_program(2) == 3
def test_syncify_with_wrapper():
class MyProgram(dspy.Module):
async def aforward(self, x: int) -> int:
await asyncio.sleep(0.01)
return x + 1
sync_program = dspy.syncify(MyProgram(), in_place=False)
assert sync_program(1) == 2
assert sync_program(2) == 3
def test_syncify_works_with_optimizers():
class MyProgram(dspy.Module):
def __init__(self):
self.predict = dspy.Predict("question->answer")
async def aforward(self, question: str):
return await self.predict.acall(question=question)
async_program = MyProgram()
def dummy_metric(gold, pred, traces=None):
return True
# We only test the optimizer completes without errors, so the LM response doesn't matter.
lm = dspy.utils.DummyLM([{"answer": "dummy"} for _ in range(100)])
dspy.configure(lm=lm)
dataset = [dspy.Example(question="question", answer="answer").with_inputs("question") for _ in range(10)]
optimizer = dspy.BootstrapFewShot(metric=dummy_metric, max_bootstrapped_demos=2, max_labeled_demos=0)
# Test syncify in place
sync_program = dspy.syncify(async_program, in_place=True)
optimized_program = optimizer.compile(sync_program, trainset=dataset)
assert len(optimized_program.predictors()[0].demos) == 2
# Test syncify with wrapper
sync_program = dspy.syncify(async_program, in_place=False)
optimized_program = optimizer.compile(sync_program, trainset=dataset)
assert len(optimized_program.predictors()[0].demos) == 2
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/utils/test_syncify.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/utils/test_settings.py | import asyncio
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from unittest import mock
import pytest
from litellm import Choices, Message, ModelResponse
import dspy
def test_basic_dspy_settings():
dspy.configure(lm=dspy.LM("openai/gpt-4o"), adapter=dspy.JSONAdapter(), callbacks=[lambda x: x])
assert dspy.settings.lm.model == "openai/gpt-4o"
assert isinstance(dspy.settings.adapter, dspy.JSONAdapter)
assert len(dspy.settings.callbacks) == 1
def test_forbid_configure_call_in_child_thread():
dspy.configure(lm=dspy.LM("openai/gpt-4o"), adapter=dspy.JSONAdapter(), callbacks=[lambda x: x])
def worker():
with pytest.raises(RuntimeError, match="Cannot call dspy.configure"):
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"), callbacks=[])
with ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(worker)
def test_dspy_context():
dspy.configure(lm=dspy.LM("openai/gpt-4o"), adapter=dspy.JSONAdapter(), callbacks=[lambda x: x])
with dspy.context(lm=dspy.LM("openai/gpt-4o-mini"), callbacks=[]):
assert dspy.settings.lm.model == "openai/gpt-4o-mini"
assert len(dspy.settings.callbacks) == 0
assert dspy.settings.lm.model == "openai/gpt-4o"
assert len(dspy.settings.callbacks) == 1
def test_dspy_context_parallel():
dspy.configure(lm=dspy.LM("openai/gpt-4o"), adapter=dspy.JSONAdapter(), callbacks=[lambda x: x])
def worker(i):
with dspy.context(lm=dspy.LM("openai/gpt-4o-mini"), trace=[i], callbacks=[]):
assert dspy.settings.lm.model == "openai/gpt-4o-mini"
assert dspy.settings.trace == [i]
assert len(dspy.settings.callbacks) == 0
with ThreadPoolExecutor(max_workers=5) as executor:
executor.map(worker, range(3))
assert dspy.settings.lm.model == "openai/gpt-4o"
assert len(dspy.settings.callbacks) == 1
def test_dspy_context_with_dspy_parallel():
dspy.configure(lm=dspy.LM("openai/gpt-4o", cache=False), adapter=dspy.ChatAdapter())
class MyModule(dspy.Module):
def __init__(self):
self.predict = dspy.Predict("question -> answer")
def forward(self, question: str) -> str:
lm = dspy.LM("openai/gpt-4o-mini", cache=False) if "France" in question else dspy.settings.lm
with dspy.context(lm=lm):
time.sleep(1)
assert dspy.settings.lm.model == lm.model
return self.predict(question=question)
with mock.patch("litellm.completion") as mock_completion:
mock_completion.return_value = ModelResponse(
choices=[Choices(message=Message(content="[[ ## answer ## ]]\nParis"))],
model="openai/gpt-4o-mini",
)
module = MyModule()
parallelizer = dspy.Parallel()
input_pairs = [
(module, {"question": "What is the capital of France?"}),
(module, {"question": "What is the capital of Germany?"}),
]
parallelizer(input_pairs)
# Verify mock was called correctly
assert mock_completion.call_count == 2
for call_args in mock_completion.call_args_list:
if "France" in call_args.kwargs["messages"][-1]["content"]:
# France question uses gpt-4o-mini
assert call_args.kwargs["model"] == "openai/gpt-4o-mini"
else:
# Germany question uses gpt-4o
assert call_args.kwargs["model"] == "openai/gpt-4o"
# The main thread is not affected by the context
assert dspy.settings.lm.model == "openai/gpt-4o"
@pytest.mark.asyncio
async def test_dspy_context_with_async_task_group():
class MyModule(dspy.Module):
def __init__(self):
self.predict = dspy.Predict("question -> answer")
async def aforward(self, question: str) -> str:
lm = (
dspy.LM("openai/gpt-4o-mini", cache=False)
if "France" in question
else dspy.LM("openai/gpt-4o", cache=False)
)
with dspy.context(lm=lm, trace=[]):
await asyncio.sleep(1)
assert dspy.settings.lm.model == lm.model
result = await self.predict.acall(question=question)
assert len(dspy.settings.trace) == 1
return result
module = MyModule()
with dspy.context(lm=dspy.LM("openai/gpt-4.1", cache=False), adapter=dspy.ChatAdapter()):
with mock.patch("litellm.acompletion") as mock_completion:
mock_completion.return_value = ModelResponse(
choices=[Choices(message=Message(content="[[ ## answer ## ]]\nParis"))],
model="openai/gpt-4o-mini",
)
# Define the coroutines to be run
coroutines = [
module.acall(question="What is the capital of France?"),
module.acall(question="What is the capital of France?"),
module.acall(question="What is the capital of Germany?"),
module.acall(question="What is the capital of Germany?"),
]
# Run them concurrently and gather results
results = await asyncio.gather(*coroutines)
assert results[0].answer == "Paris"
assert results[1].answer == "Paris"
assert results[2].answer == "Paris"
assert results[3].answer == "Paris"
# Verify mock was called correctly
assert mock_completion.call_count == 4
# France question uses gpt-4o-mini
assert mock_completion.call_args_list[0].kwargs["model"] == "openai/gpt-4o-mini"
assert mock_completion.call_args_list[1].kwargs["model"] == "openai/gpt-4o-mini"
# Germany question uses gpt-4o
assert mock_completion.call_args_list[2].kwargs["model"] == "openai/gpt-4o"
assert mock_completion.call_args_list[3].kwargs["model"] == "openai/gpt-4o"
# The main thread is not affected by the context
assert dspy.settings.lm.model == "openai/gpt-4.1"
assert dspy.settings.trace == []
@pytest.mark.asyncio
async def test_dspy_configure_allowance_async():
def bar1():
# `dspy.configure` is disallowed in different async tasks from the initial one.
# In this case, foo1 (async) calls bar1 (sync), and bar1 uses the async task from foo1.
with pytest.raises(RuntimeError) as e:
dspy.configure(lm=dspy.LM("openai/gpt-4o"))
assert "dspy.configure(...) can only be called from the same async" in str(e.value)
async def foo1():
bar1()
await asyncio.sleep(0.1)
async def foo2():
# `dspy.configure` is disallowed in different async tasks from the initial one.
with pytest.raises(RuntimeError) as e:
dspy.configure(lm=dspy.LM("openai/gpt-4o"))
assert "dspy.configure(...) can only be called from the same async" in str(e.value)
await asyncio.sleep(0.1)
async def foo3():
# `dspy.context` is allowed in different async tasks from the initial one.
with dspy.context(lm=dspy.LM("openai/gpt-4o")):
await asyncio.sleep(0.1)
async def foo4():
# foo4 is directly invoked by the entry task, so it has the same async task as the entry task.
dspy.configure(lm=dspy.LM("openai/gpt-4o"))
await asyncio.sleep(0.1)
# `dspy.configure` is allowed to be called multiple times in the same async task.
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o"))
dspy.configure(adapter=dspy.JSONAdapter())
await asyncio.gather(foo1(), foo2(), foo3())
foo4()
def test_dspy_settings_save_load(tmp_path):
dspy.configure(lm=dspy.LM("openai/gpt-4o"), adapter=dspy.JSONAdapter(), callbacks=[lambda x: x])
dspy.settings.save(tmp_path / "settings.pkl")
dspy.configure(lm=None, adapter=None, callbacks=None)
loaded_settings = dspy.load_settings(tmp_path / "settings.pkl")
dspy.configure(**loaded_settings)
assert dspy.settings.lm.model == "openai/gpt-4o"
assert isinstance(dspy.settings.adapter, dspy.JSONAdapter)
assert len(dspy.settings.callbacks) == 1
def test_dspy_settings_save_exclude_keys(tmp_path):
dspy.configure(lm=dspy.LM("openai/gpt-4o"), adapter=dspy.JSONAdapter(), track_usage=True)
dspy.settings.save(tmp_path / "settings.pkl", exclude_keys=["adapter", "track_usage"])
dspy.configure(lm=None, adapter=None, track_usage=False)
loaded_settings = dspy.load_settings(tmp_path / "settings.pkl")
dspy.configure(**loaded_settings)
assert dspy.settings.lm.model == "openai/gpt-4o"
assert dspy.settings.adapter is None
assert not dspy.settings.track_usage
def test_settings_save_with_extra_modules(tmp_path):
# Create a temporary Python file with our custom module
custom_module_path = tmp_path / "custom_module.py"
with open(custom_module_path, "w") as f:
f.write(
"""
def callback(x):
return x + 1
"""
)
# Add the tmp_path to Python path so we can import the module
sys.path.insert(0, str(tmp_path))
try:
import custom_module
dspy.configure(callbacks=[custom_module.callback])
settings_path = tmp_path / "settings.pkl"
sys.path.insert(0, str(tmp_path))
dspy.configure(callbacks=[custom_module.callback])
dspy.settings.save(settings_path, modules_to_serialize=[custom_module])
# Remove the custom module again to simulate it not being available at load time
sys.modules.pop("custom_module", None)
sys.path.remove(str(tmp_path))
del custom_module
dspy.configure(callbacks=None)
# Loading should now succeed and preserve the adapter instance
loaded_settings = dspy.load_settings(settings_path)
dspy.settings.configure(**loaded_settings)
assert dspy.settings.callbacks[0](3) == 4
finally:
# Only need to clean up sys.path
if str(tmp_path) in sys.path:
sys.path.remove(str(tmp_path))
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/utils/test_settings.py",
"license": "MIT License",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/adapters/xml_adapter.py | import re
from typing import Any
from pydantic.fields import FieldInfo
from dspy.adapters.chat_adapter import ChatAdapter, FieldInfoWithName
from dspy.adapters.utils import format_field_value, translate_field_type
from dspy.signatures.signature import Signature
from dspy.utils.callback import BaseCallback
class XMLAdapter(ChatAdapter):
def __init__(self, callbacks: list[BaseCallback] | None = None):
super().__init__(callbacks)
self.field_pattern = re.compile(r"<(?P<name>\w+)>((?P<content>.*?))</\1>", re.DOTALL)
def format_field_with_value(self, fields_with_values: dict[FieldInfoWithName, Any]) -> str:
output = []
for field, field_value in fields_with_values.items():
formatted = format_field_value(field_info=field.info, value=field_value)
output.append(f"<{field.name}>\n{formatted}\n</{field.name}>")
return "\n\n".join(output).strip()
def format_field_structure(self, signature: type[Signature]) -> str:
"""
XMLAdapter requires input and output fields to be wrapped in XML tags like `<field_name>`.
"""
parts = []
parts.append("All interactions will be structured in the following way, with the appropriate values filled in.")
def format_signature_fields_for_instructions(fields: dict[str, FieldInfo]):
return self.format_field_with_value(
fields_with_values={
FieldInfoWithName(name=field_name, info=field_info): translate_field_type(field_name, field_info)
for field_name, field_info in fields.items()
},
)
parts.append(format_signature_fields_for_instructions(signature.input_fields))
parts.append(format_signature_fields_for_instructions(signature.output_fields))
return "\n\n".join(parts).strip()
def format_user_message_content(
self,
signature: type[Signature],
inputs: dict[str, Any],
prefix: str = "",
suffix: str = "",
main_request: bool = False,
) -> str:
messages = [prefix]
messages.append(self.format_field_with_value(
{
FieldInfoWithName(name=k, info=v): inputs.get(k)
for k, v in signature.input_fields.items() if k in inputs
},
))
if main_request:
output_requirements = self.user_message_output_requirements(signature)
if output_requirements is not None:
messages.append(output_requirements)
messages.append(suffix)
return "\n\n".join(messages).strip()
def format_assistant_message_content(
self,
signature: type[Signature],
outputs: dict[str, Any],
missing_field_message=None,
) -> str:
return self.format_field_with_value(
{
FieldInfoWithName(name=k, info=v): outputs.get(k, missing_field_message)
for k, v in signature.output_fields.items()
},
)
def user_message_output_requirements(self, signature: type[Signature]) -> str:
message = "Respond with the corresponding output fields wrapped in XML tags "
message += ", then ".join(f"`<{f}>`" for f in signature.output_fields)
message += "."
return message
def parse(self, signature: type[Signature], completion: str) -> dict[str, Any]:
fields = {}
for match in self.field_pattern.finditer(completion):
name = match.group("name")
content = match.group("content").strip()
if name in signature.output_fields and name not in fields:
fields[name] = content
# Cast values using base class parse_value helper
for k, v in fields.items():
fields[k] = self._parse_field_value(signature.output_fields[k], v, completion, signature)
if fields.keys() != signature.output_fields.keys():
from dspy.utils.exceptions import AdapterParseError
raise AdapterParseError(
adapter_name="XMLAdapter",
signature=signature,
lm_response=completion,
parsed_result=fields,
)
return fields
def _parse_field_value(self, field_info, raw, completion, signature):
from dspy.adapters.utils import parse_value
try:
return parse_value(raw, field_info.annotation)
except Exception as e:
from dspy.utils.exceptions import AdapterParseError
raise AdapterParseError(
adapter_name="XMLAdapter",
signature=signature,
lm_response=completion,
message=f"Failed to parse field {field_info} with value {raw}: {e}",
)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/xml_adapter.py",
"license": "MIT License",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/adapters/test_xml_adapter.py | import sys
from unittest import mock
import pydantic
import pytest
from litellm import Choices, Message, ModelResponse
import dspy
from dspy.adapters.chat_adapter import FieldInfoWithName
from dspy.adapters.xml_adapter import XMLAdapter
def test_xml_adapter_format_and_parse_basic():
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
answer: str = dspy.OutputField()
adapter = XMLAdapter()
# Format output fields as XML
fields_with_values = {FieldInfoWithName(name="answer", info=TestSignature.output_fields["answer"]): "Paris"}
xml = adapter.format_field_with_value(fields_with_values)
assert xml.strip() == "<answer>\nParis\n</answer>"
# Parse XML output
completion = "<answer>Paris</answer>"
parsed = adapter.parse(TestSignature, completion)
assert parsed == {"answer": "Paris"}
def test_xml_adapter_parse_multiple_fields():
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
answer: str = dspy.OutputField()
explanation: str = dspy.OutputField()
adapter = XMLAdapter()
completion = """
<answer>Paris</answer>
<explanation>The capital of France is Paris.</explanation>
"""
parsed = adapter.parse(TestSignature, completion)
assert parsed == {"answer": "Paris", "explanation": "The capital of France is Paris."}
def test_xml_adapter_parse_raises_on_missing_field():
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
answer: str = dspy.OutputField()
explanation: str = dspy.OutputField()
adapter = XMLAdapter()
completion = "<answer>Paris</answer>"
with pytest.raises(dspy.utils.exceptions.AdapterParseError) as e:
adapter.parse(TestSignature, completion)
assert e.value.adapter_name == "XMLAdapter"
assert e.value.signature == TestSignature
assert e.value.lm_response == "<answer>Paris</answer>"
assert "explanation" in str(e.value)
def test_xml_adapter_parse_casts_types():
class TestSignature(dspy.Signature):
number: int = dspy.OutputField()
flag: bool = dspy.OutputField()
adapter = XMLAdapter()
completion = """
<number>42</number>
<flag>true</flag>
"""
parsed = adapter.parse(TestSignature, completion)
assert parsed == {"number": 42, "flag": True}
def test_xml_adapter_parse_raises_on_type_error():
class TestSignature(dspy.Signature):
number: int = dspy.OutputField()
adapter = XMLAdapter()
completion = "<number>not_a_number</number>"
with pytest.raises(dspy.utils.exceptions.AdapterParseError) as e:
adapter.parse(TestSignature, completion)
assert "Failed to parse field" in str(e.value)
def test_xml_adapter_format_and_parse_nested_model():
class InnerModel(pydantic.BaseModel):
value: int
label: str
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
result: InnerModel = dspy.OutputField()
adapter = XMLAdapter()
# Format output fields as XML
fields_with_values = {
FieldInfoWithName(name="result", info=TestSignature.output_fields["result"]): InnerModel(value=5, label="foo")
}
xml = adapter.format_field_with_value(fields_with_values)
# The output will be a JSON string inside the XML tag
assert xml.strip().startswith("<result>")
assert '"value": 5' in xml
assert '"label": "foo"' in xml
assert xml.strip().endswith("</result>")
# Parse XML output (should parse as string, not as model)
completion = '<result>{"value": 5, "label": "foo"}</result>'
parsed = adapter.parse(TestSignature, completion)
# The parse_value helper will try to cast to InnerModel
assert isinstance(parsed["result"], InnerModel)
assert parsed["result"].value == 5
assert parsed["result"].label == "foo"
def test_xml_adapter_format_and_parse_list_of_models():
class Item(pydantic.BaseModel):
name: str
score: float
class TestSignature(dspy.Signature):
items: list[Item] = dspy.OutputField()
adapter = XMLAdapter()
items = [Item(name="a", score=1.1), Item(name="b", score=2.2)]
fields_with_values = {FieldInfoWithName(name="items", info=TestSignature.output_fields["items"]): items}
xml = adapter.format_field_with_value(fields_with_values)
assert xml.strip().startswith("<items>")
assert '"name": "a"' in xml
assert '"score": 2.2' in xml
assert xml.strip().endswith("</items>")
# Parse XML output
import json
completion = f"<items>{json.dumps([i.model_dump() for i in items])}</items>"
parsed = adapter.parse(TestSignature, completion)
assert isinstance(parsed["items"], list)
assert all(isinstance(i, Item) for i in parsed["items"])
assert parsed["items"][0].name == "a"
assert parsed["items"][1].score == 2.2
def test_xml_adapter_with_tool_like_output():
# XMLAdapter does not natively support tool calls, but we can test structured output
class ToolCall(pydantic.BaseModel):
name: str
args: dict
result: str
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
tool_calls: list[ToolCall] = dspy.OutputField()
answer: str = dspy.OutputField()
adapter = XMLAdapter()
tool_calls = [
ToolCall(name="get_weather", args={"city": "Tokyo"}, result="Sunny"),
ToolCall(name="get_population", args={"country": "Japan", "year": 2023}, result="125M"),
]
fields_with_values = {
FieldInfoWithName(name="tool_calls", info=TestSignature.output_fields["tool_calls"]): tool_calls,
FieldInfoWithName(
name="answer", info=TestSignature.output_fields["answer"]
): "The weather is Sunny. Population is 125M.",
}
xml = adapter.format_field_with_value(fields_with_values)
assert xml.strip().startswith("<tool_calls>")
assert '"name": "get_weather"' in xml
assert '"result": "125M"' in xml
assert xml.strip().endswith("</answer>")
import json
completion = (
f"<tool_calls>{json.dumps([tc.model_dump() for tc in tool_calls])}</tool_calls>"
f"\n<answer>The weather is Sunny. Population is 125M.</answer>"
)
parsed = adapter.parse(TestSignature, completion)
assert isinstance(parsed["tool_calls"], list)
assert parsed["tool_calls"][0].name == "get_weather"
assert parsed["tool_calls"][1].result == "125M"
assert parsed["answer"] == "The weather is Sunny. Population is 125M."
def test_xml_adapter_formats_nested_images():
class ImageWrapper(pydantic.BaseModel):
images: list[dspy.Image]
tag: list[str]
class MySignature(dspy.Signature):
image: ImageWrapper = dspy.InputField()
text: str = dspy.OutputField()
image1 = dspy.Image(url="https://example.com/image1.jpg")
image2 = dspy.Image(url="https://example.com/image2.jpg")
image3 = dspy.Image(url="https://example.com/image3.jpg")
image_wrapper = ImageWrapper(images=[image1, image2, image3], tag=["test", "example"])
demos = [
dspy.Example(
image=image_wrapper,
text="This is a test image",
),
]
image_wrapper_2 = ImageWrapper(images=[dspy.Image(url="https://example.com/image4.jpg")], tag=["test", "example"])
adapter = dspy.XMLAdapter()
messages = adapter.format(MySignature, demos, {"image": image_wrapper_2})
assert len(messages) == 4
# Image information in the few-shot example's user message
expected_image1_content = {"type": "image_url", "image_url": {"url": "https://example.com/image1.jpg"}}
expected_image2_content = {"type": "image_url", "image_url": {"url": "https://example.com/image2.jpg"}}
expected_image3_content = {"type": "image_url", "image_url": {"url": "https://example.com/image3.jpg"}}
assert expected_image1_content in messages[1]["content"]
assert expected_image2_content in messages[1]["content"]
assert expected_image3_content in messages[1]["content"]
# The query image is formatted in the last user message
assert {"type": "image_url", "image_url": {"url": "https://example.com/image4.jpg"}} in messages[-1]["content"]
def test_xml_adapter_with_code():
# Test with code as input field
class CodeAnalysis(dspy.Signature):
"""Analyze the time complexity of the code"""
code: dspy.Code = dspy.InputField()
result: str = dspy.OutputField()
adapter = dspy.XMLAdapter()
messages = adapter.format(CodeAnalysis, [], {"code": "print('Hello, world!')"})
assert len(messages) == 2
# The output field type description should be included in the system message even if the output field is nested
assert dspy.Code.description() in messages[0]["content"]
# The user message should include the question and the tools
assert "print('Hello, world!')" in messages[1]["content"]
# Test with code as output field
class CodeGeneration(dspy.Signature):
"""Generate code to answer the question"""
question: str = dspy.InputField()
code: dspy.Code = dspy.OutputField()
adapter = dspy.XMLAdapter()
with mock.patch("litellm.completion") as mock_completion:
mock_completion.return_value = ModelResponse(
choices=[Choices(message=Message(content='<code>print("Hello, world!")</code>'))],
model="openai/gpt-4o-mini",
)
result = adapter(
dspy.LM(model="openai/gpt-4o-mini", cache=False),
{},
CodeGeneration,
[],
{"question": "Write a python program to print 'Hello, world!'"},
)
assert result[0]["code"].code == 'print("Hello, world!")'
def test_xml_adapter_full_prompt():
class QA(dspy.Signature):
query: str = dspy.InputField()
context: str | None = dspy.InputField()
answer: str = dspy.OutputField()
adapter = dspy.XMLAdapter()
messages = adapter.format(QA, [], {"query": "when was Marie Curie born"})
assert len(messages) == 2
assert messages[0]["role"] == "system"
assert messages[1]["role"] == "user"
union_type_repr = "Union[str, NoneType]" if sys.version_info >= (3, 14) else "UnionType[str, NoneType]"
expected_system = (
"Your input fields are:\n"
"1. `query` (str): \n"
f"2. `context` ({union_type_repr}):\n"
"Your output fields are:\n"
"1. `answer` (str):\n"
"All interactions will be structured in the following way, with the appropriate values filled in.\n\n"
"<query>\n{query}\n</query>\n\n"
"<context>\n{context}\n</context>\n\n"
"<answer>\n{answer}\n</answer>\n"
"In adhering to this structure, your objective is: \n"
" Given the fields `query`, `context`, produce the fields `answer`."
)
expected_user = (
"<query>\nwhen was Marie Curie born\n</query>\n\n"
"Respond with the corresponding output fields wrapped in XML tags `<answer>`."
)
assert messages[0]["content"] == expected_system
assert messages[1]["content"] == expected_user
def test_format_system_message():
class MySignature(dspy.Signature):
"""Answer the question with multiple answers and scores"""
question: str = dspy.InputField()
answers: list[str] = dspy.OutputField()
scores: list[float] = dspy.OutputField()
adapter = dspy.XMLAdapter()
system_message = adapter.format_system_message(MySignature)
expected_system_message = """Your input fields are:
1. `question` (str):
Your output fields are:
1. `answers` (list[str]):
2. `scores` (list[float]):
All interactions will be structured in the following way, with the appropriate values filled in.
<question>
{question}
</question>
<answers>
{answers} # note: the value you produce must adhere to the JSON schema: {"type": "array", "items": {"type": "string"}}
</answers>
<scores>
{scores} # note: the value you produce must adhere to the JSON schema: {"type": "array", "items": {"type": "number"}}
</scores>
In adhering to this structure, your objective is:
Answer the question with multiple answers and scores"""
assert system_message == expected_system_message
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_xml_adapter.py",
"license": "MIT License",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/adapters/test_base_type.py | import pydantic
import dspy
def test_basic_extract_custom_type_from_annotation():
class Event(dspy.Type):
event_name: str
start_date_time: str
end_date_time: str | None
location: str | None
class ExtractEvent(dspy.Signature):
"""Extract all events from the email content."""
email: str = dspy.InputField()
event: Event = dspy.OutputField()
assert dspy.Type.extract_custom_type_from_annotation(ExtractEvent.output_fields["event"].annotation) == [Event]
class ExtractEvents(dspy.Signature):
"""Extract all events from the email content."""
email: str = dspy.InputField()
events: list[Event] = dspy.OutputField()
assert dspy.Type.extract_custom_type_from_annotation(ExtractEvents.output_fields["events"].annotation) == [Event]
def test_extract_custom_type_from_annotation_with_nested_type():
class Event(dspy.Type):
event_name: str
start_date_time: str
end_date_time: str | None
location: str | None
class EventIdentifier(dspy.Type):
model_config = pydantic.ConfigDict(frozen=True) # Make it hashable
event_id: str
event_name: str
class ExtractEvents(dspy.Signature):
"""Extract all events from the email content."""
email: str = dspy.InputField()
events: list[dict[EventIdentifier, Event]] = dspy.OutputField()
assert dspy.Type.extract_custom_type_from_annotation(ExtractEvents.output_fields["events"].annotation) == [
EventIdentifier,
Event,
]
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_base_type.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/utils/langchain_tool.py | from typing import TYPE_CHECKING, Any
from dspy.adapters.types.tool import Tool, convert_input_schema_to_tool_args
if TYPE_CHECKING:
from langchain.tools import BaseTool
def convert_langchain_tool(tool: "BaseTool") -> Tool:
"""Build a DSPy tool from a LangChain tool.
This function converts a LangChain tool (either created with @tool decorator
or by subclassing BaseTool) into a DSPy Tool.
Args:
tool: The LangChain tool to convert.
Returns:
A DSPy Tool object.
"""
async def func(**kwargs):
try:
result = await tool.ainvoke(kwargs)
return result
except Exception as e:
raise RuntimeError(f"Failed to call LangChain tool {tool.name}: {e!s}")
# Get args_schema from the tool
# https://python.langchain.com/api_reference/core/tools/langchain_core.tools.base.BaseTool.html#langchain_core.tools.base.BaseTool.args_schema
args_schema = tool.args_schema
args, _, arg_desc = convert_input_schema_to_tool_args(args_schema.model_json_schema())
# The args_schema of Langchain tool is a pydantic model, so we can get the type hints from the model fields
arg_types = {
key: field.annotation if field.annotation is not None else Any
for key, field in args_schema.model_fields.items()
}
return Tool(
func=func,
name=tool.name,
desc=tool.description,
args=args,
arg_types=arg_types,
arg_desc=arg_desc
)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/utils/langchain_tool.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
stanfordnlp/dspy:tests/utils/test_langchain_tool.py | import importlib
import pytest
if importlib.util.find_spec("langchain_core") is None:
pytest.skip(reason="langchain_core is not installed", allow_module_level=True)
from pydantic import BaseModel
from dspy.utils.langchain_tool import convert_langchain_tool
@pytest.mark.asyncio
@pytest.mark.extra
async def test_convert_custom_simple_tool():
from langchain_core.tools import tool
@tool
def add(a: int, b: int) -> int:
"""Add two numbers."""
return a + b
tool = convert_langchain_tool(add)
assert tool.name == "add"
assert tool.desc == "Add two numbers."
assert tool.args == {"a": {"title": "A", "type": "integer"}, "b": {"title": "B", "type": "integer"}}
assert tool.arg_types == {"a": int, "b": int}
assert tool.arg_desc == {"a": "No description provided. (Required)", "b": "No description provided. (Required)"}
assert await tool.acall(a=1, b=2) == 3
@pytest.mark.asyncio
@pytest.mark.extra
async def test_convert_custom_tool_with_custom_class():
from langchain_core.tools import tool
class Profile(BaseModel):
name: str
age: int
@tool
def get_age(profile: Profile) -> int:
"""Get the age of the profile."""
return profile.age
tool = convert_langchain_tool(get_age)
assert tool.name == "get_age"
assert tool.desc == "Get the age of the profile."
assert tool.args == {"profile": {"title": "Profile", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"]}}
assert tool.arg_types == {"profile": Profile}
assert tool.arg_desc == {"profile": "No description provided. (Required)"}
assert await tool.acall(profile=Profile(name="John", age=20)) == 20
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/utils/test_langchain_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/predict/code_act.py | import inspect
import logging
from typing import Callable
import dspy
from dspy.adapters.types.tool import Tool
from dspy.predict.program_of_thought import ProgramOfThought
from dspy.predict.react import ReAct
from dspy.primitives.python_interpreter import PythonInterpreter
from dspy.signatures.signature import Signature, ensure_signature
logger = logging.getLogger(__name__)
class CodeAct(ReAct, ProgramOfThought):
"""
CodeAct is a module that utilizes the Code Interpreter and predefined tools to solve the problem.
"""
def __init__(self, signature: str | type[Signature], tools: list[Callable], max_iters: int = 5, interpreter: PythonInterpreter | None = None):
"""
Initializes the CodeAct class with the specified model, temperature, and max tokens.
Args:
signature (Union[str, Type[Signature]]): The signature of the module.
tools (list[Callable]): The tool callables to be used. CodeAct only accepts functions and not callable objects.
max_iters (int): The maximum number of iterations to generate the answer.
interpreter: PythonInterpreter instance to use. If None, a new one is instantiated.
Example:
```python
from dspy.predict import CodeAct
def factorial(n):
if n == 1:
return 1
return n * factorial(n-1)
act = CodeAct("n->factorial", tools=[factorial])
act(n=5) # 120
```
"""
self.signature = ensure_signature(signature)
self.max_iters = max_iters
self.history = []
tools = [t if isinstance(t, Tool) else Tool(t) for t in tools]
if any(
not inspect.isfunction(tool.func) for tool in tools
):
raise ValueError("CodeAct only accepts functions and not callable objects.")
tools = {tool.name: tool for tool in tools}
instructions = self._build_instructions(self.signature, tools)
codeact_signature = (
dspy.Signature({**self.signature.input_fields}, "\n".join(instructions))
.append("trajectory", dspy.InputField(), type_=str)
.append("generated_code", dspy.OutputField(desc="Python code that when executed, produces output relevant to answering the question"), type_=str)
.append("finished", dspy.OutputField(desc="a boolean flag to determine if the process is done"), type_=bool)
)
extract_signature = dspy.Signature(
{**self.signature.input_fields, **self.signature.output_fields},
self.signature.instructions,
).append("trajectory", dspy.InputField(), type_=str)
self.tools: dict[str, Tool] = tools
self.codeact = dspy.Predict(codeact_signature)
self.extractor = dspy.ChainOfThought(extract_signature)
# It will raises exception when dspy cannot find available deno instance by now.
self.interpreter = interpreter or PythonInterpreter()
def _build_instructions(self, signature, tools):
instructions = [f"{signature.instructions}\n"] if signature.instructions else []
inputs = ", ".join([f"`{k}`" for k in signature.input_fields.keys()])
outputs = ", ".join([f"`{k}`" for k in signature.output_fields.keys()])
instructions.append(
f"You are an intelligent agent. For each episode, you will receive the fields {inputs} as input.\n"
f"Your goal is to generate executable Python code that collects any necessary information for producing {outputs}.\n"
"For each iteration, you will generate a code snippet that either solves the task or progresses towards the solution.\n"
"Ensure any output you wish to extract from the code is printed to the console. The code should be enclosed in a fenced code block.\n"
f"When all information for producing the outputs ({outputs}) are available to be extracted, mark `finished=True` besides the final Python code.\n"
"You have access to the Python Standard Library and the following functions:"
)
for idx, tool in enumerate(tools.values()):
instructions.append(f"({idx + 1}) {tool}")
return instructions
def forward(self, **kwargs):
# Define the tool functions in the interpreter
for tool in self.tools.values():
self.interpreter(inspect.getsource(tool.func))
trajectory = {}
max_iters = kwargs.pop("max_iters", self.max_iters)
for idx in range(max_iters):
code_data = self.codeact(trajectory=trajectory, **kwargs)
output = None
code, error = self._parse_code(code_data)
if error:
trajectory[f"observation_{idx}"] = f"Failed to parse the generated code: {error}"
continue
trajectory[f"generated_code_{idx}"] = code
output, error = self._execute_code(code)
if not error:
trajectory[f"code_output_{idx}"] = output
else:
trajectory[f"observation_{idx}"] = f"Failed to execute the generated code: {error}"
if code_data.finished:
break
extract = self._call_with_potential_trajectory_truncation(self.extractor, trajectory, **kwargs)
self.interpreter.shutdown()
return dspy.Prediction(trajectory=trajectory, **extract)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/predict/code_act.py",
"license": "MIT License",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/predict/test_code_act.py | import pytest
import dspy
from dspy import Signature
from dspy.predict import CodeAct
from dspy.utils import DummyLM
pytestmark = pytest.mark.deno
class BasicQA(Signature):
question = dspy.InputField()
answer = dspy.OutputField(desc="often between 1 and 5 words")
def add(a: float, b: float) -> float:
"add two numbers"
return a + b
def test_codeact_code_generation():
lm = DummyLM(
[
{
"reasoning": "Reason_A",
"generated_code": "```python\nresult = add(1,1)\nprint(result)\n```",
"finished": True,
},
{"reasoning": "Reason_B", "answer": "2"},
]
)
dspy.configure(lm=lm)
program = CodeAct(BasicQA, tools=[add])
res = program(question="What is 1+1?")
assert res.answer == "2"
assert res.trajectory == {
"code_output_0": '"2\\n"',
"generated_code_0": "result = add(1,1)\nprint(result)",
}
assert program.interpreter.deno_process is None
class ExtremumFinder(Signature):
input_list = dspy.InputField()
maximum = dspy.OutputField(desc="The maximum of the given numbers")
minimum = dspy.OutputField(desc="The minimum of the given numbers")
def extract_maximum_minimum(input_list: str) -> dict[str, float]:
numbers = list(map(float, input_list.split(",")))
return {"maximum": max(numbers), "minimum": min(numbers)}
def test_codeact_support_multiple_fields():
lm = DummyLM(
[
{
"reasoning": "Reason_A",
"generated_code": "```python\nresult = extract_maximum_minimum('2, 3, 5, 6')\nprint(result)\n```",
"finished": True,
},
{"reasoning": "Reason_B", "maximum": "6", "minimum": "2"},
]
)
dspy.configure(lm=lm)
program = CodeAct(ExtremumFinder, tools=[extract_maximum_minimum])
res = program(input_list="2, 3, 5, 6")
assert res.maximum == "6"
assert res.minimum == "2"
assert res.trajectory == {
"code_output_0": '"{\'maximum\': 6.0, \'minimum\': 2.0}\\n"',
"generated_code_0": "result = extract_maximum_minimum('2, 3, 5, 6')\nprint(result)",
}
assert program.interpreter.deno_process is None
def test_codeact_code_parse_failure():
lm = DummyLM(
[
{
"reasoning": "Reason_A",
"generated_code": "```python\nparse(error\n```",
"finished": False,
},
{
"reasoning": "Reason_A",
"generated_code": "```python\nresult = add(1,1)\nprint(result)\n```",
"finished": True,
},
{"reasoning": "Reason_B", "answer": "2"},
]
)
dspy.configure(lm=lm)
program = CodeAct(BasicQA, tools=[add])
res = program(question="What is 1+1?")
assert res.answer == "2"
assert res.trajectory == {
"generated_code_0": "parse(error",
"observation_0": "Failed to execute the generated code: Invalid Python syntax. message: ",
"generated_code_1": "result = add(1,1)\nprint(result)",
"code_output_1": '"2\\n"',
}
assert program.interpreter.deno_process is None
def test_codeact_code_execution_failure():
lm = DummyLM(
[
{
"reasoning": "Reason_A",
"generated_code": "```python\nunknown+1\n```",
"finished": False,
},
{
"reasoning": "Reason_A",
"generated_code": "```python\nresult = add(1,1)\nprint(result)\n```",
"finished": True,
},
{"reasoning": "Reason_B", "answer": "2"},
]
)
dspy.configure(lm=lm)
program = CodeAct(BasicQA, tools=[add])
res = program(question="What is 1+1?")
assert res.answer == "2"
assert res.trajectory == {
"generated_code_0": "unknown+1",
"observation_0": 'Failed to execute the generated code: NameError: ["name \'unknown\' is not defined"]',
"generated_code_1": "result = add(1,1)\nprint(result)",
"code_output_1": '"2\\n"',
}
assert program.interpreter.deno_process is None
class CustomTool:
def __call__(self, a: float, b: float) -> float:
return a + b
def test_codeact_tool_validation():
with pytest.raises(ValueError, match="CodeAct only accepts functions and not callable objects."):
CodeAct(BasicQA, tools=[CustomTool()])
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/predict/test_code_act.py",
"license": "MIT License",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/utils/inspect_history.py | def _green(text: str, end: str = "\n"):
return "\x1b[32m" + str(text).lstrip() + "\x1b[0m" + end
def _red(text: str, end: str = "\n"):
return "\x1b[31m" + str(text) + "\x1b[0m" + end
def _blue(text: str, end: str = "\n"):
return "\x1b[34m" + str(text) + "\x1b[0m" + end
def pretty_print_history(history, n: int = 1):
"""Prints the last n prompts and their completions."""
for item in history[-n:]:
messages = item["messages"] or [{"role": "user", "content": item["prompt"]}]
outputs = item["outputs"]
timestamp = item.get("timestamp", "Unknown time")
print("\n\n\n")
print("\x1b[34m" + f"[{timestamp}]" + "\x1b[0m" + "\n")
for msg in messages:
print(_red(f"{msg['role'].capitalize()} message:"))
if isinstance(msg["content"], str):
print(msg["content"].strip())
else:
if isinstance(msg["content"], list):
for c in msg["content"]:
if c["type"] == "text":
print(c["text"].strip())
elif c["type"] == "image_url":
image_str = ""
if "base64" in c["image_url"].get("url", ""):
len_base64 = len(c["image_url"]["url"].split("base64,")[1])
image_str = (
f"<{c['image_url']['url'].split('base64,')[0]}base64,"
f"<IMAGE BASE 64 ENCODED({len_base64!s})>"
)
else:
image_str = f"<image_url: {c['image_url']['url']}>"
print(_blue(image_str.strip()))
elif c["type"] == "input_audio":
audio_format = c["input_audio"]["format"]
len_audio = len(c["input_audio"]["data"])
audio_str = f"<audio format='{audio_format}' base64-encoded, length={len_audio}>"
print(_blue(audio_str.strip()))
elif c["type"] == "file" or c["type"] == "input_file":
file = c.get("file", c.get("input_file", {}))
filename = file.get("filename", "")
file_id = file.get("file_id", "")
file_data = file.get("file_data", "")
file_str = f"<file: name:{filename}, id:{file_id}, data_length:{len(file_data)}>"
print(_blue(file_str.strip()))
print("\n")
if isinstance(outputs[0], dict):
if outputs[0]["text"]:
print(_red("Response:"))
print(_green(outputs[0]["text"].strip()))
if outputs[0].get("tool_calls"):
print(_red("Tool calls:"))
for tool_call in outputs[0]["tool_calls"]:
print(_green(f"{tool_call['function']['name']}: {tool_call['function']['arguments']}"))
else:
print(_red("Response:"))
print(_green(outputs[0].strip()))
if len(outputs) > 1:
choices_text = f" \t (and {len(outputs) - 1} other completions)"
print(_red(choices_text, end=""))
print("\n\n\n")
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/utils/inspect_history.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:dspy/adapters/types/audio.py | import base64
import io
import mimetypes
import os
from typing import Any, Union
import pydantic
import requests
from dspy.adapters.types.base_type import Type
try:
import soundfile as sf
SF_AVAILABLE = True
except ImportError:
SF_AVAILABLE = False
def _normalize_audio_format(audio_format: str) -> str:
"""Removes 'x-' prefixes from audio format strings."""
return audio_format.removeprefix("x-")
class Audio(Type):
data: str
audio_format: str
model_config = pydantic.ConfigDict(
frozen=True,
extra="forbid",
)
def format(self) -> list[dict[str, Any]]:
try:
data = self.data
except Exception as e:
raise ValueError(f"Failed to format audio for DSPy: {e}")
return [{
"type": "input_audio",
"input_audio": {
"data": data,
"format": self.audio_format
}
}]
@pydantic.model_validator(mode="before")
@classmethod
def validate_input(cls, values: Any) -> Any:
"""
Validate input for Audio, expecting 'data' and 'audio_format' keys in dictionary.
"""
if isinstance(values, cls):
return {"data": values.data, "audio_format": values.audio_format}
return encode_audio(values)
@classmethod
def from_url(cls, url: str) -> "Audio":
"""
Download an audio file from URL and encode it as base64.
"""
response = requests.get(url)
response.raise_for_status()
mime_type = response.headers.get("Content-Type", "audio/wav")
if not mime_type.startswith("audio/"):
raise ValueError(f"Unsupported MIME type for audio: {mime_type}")
audio_format = mime_type.split("/")[1]
audio_format = _normalize_audio_format(audio_format)
encoded_data = base64.b64encode(response.content).decode("utf-8")
return cls(data=encoded_data, audio_format=audio_format)
@classmethod
def from_file(cls, file_path: str) -> "Audio":
"""
Read local audio file and encode it as base64.
"""
if not os.path.isfile(file_path):
raise ValueError(f"File not found: {file_path}")
mime_type, _ = mimetypes.guess_type(file_path)
if not mime_type or not mime_type.startswith("audio/"):
raise ValueError(f"Unsupported MIME type for audio: {mime_type}")
with open(file_path, "rb") as file:
file_data = file.read()
audio_format = mime_type.split("/")[1]
audio_format = _normalize_audio_format(audio_format)
encoded_data = base64.b64encode(file_data).decode("utf-8")
return cls(data=encoded_data, audio_format=audio_format)
@classmethod
def from_array(
cls, array: Any, sampling_rate: int, format: str = "wav"
) -> "Audio":
"""
Process numpy-like array and encode it as base64. Uses sampling rate and audio format for encoding.
"""
if not SF_AVAILABLE:
raise ImportError("soundfile is required to process audio arrays.")
byte_buffer = io.BytesIO()
sf.write(
byte_buffer,
array,
sampling_rate,
format=format.upper(),
subtype="PCM_16",
)
encoded_data = base64.b64encode(byte_buffer.getvalue()).decode("utf-8")
return cls(data=encoded_data, audio_format=format)
def __str__(self) -> str:
return self.serialize_model()
def __repr__(self) -> str:
length = len(self.data)
return f"Audio(data=<AUDIO_BASE_64_ENCODED({length})>, audio_format='{self.audio_format}')"
def encode_audio(audio: Union[str, bytes, dict, "Audio", Any], sampling_rate: int = 16000, format: str = "wav") -> dict:
"""
Encode audio to a dict with 'data' and 'audio_format'.
Accepts: local file path, URL, data URI, dict, Audio instance, numpy array, or bytes (with known format).
"""
if isinstance(audio, dict) and "data" in audio and "audio_format" in audio:
return audio
elif isinstance(audio, Audio):
return {"data": audio.data, "audio_format": audio.audio_format}
elif isinstance(audio, str) and audio.startswith("data:audio/"):
try:
header, b64data = audio.split(",", 1)
mime = header.split(";")[0].split(":")[1]
audio_format = mime.split("/")[1]
audio_format = _normalize_audio_format(audio_format)
return {"data": b64data, "audio_format": audio_format}
except Exception as e:
raise ValueError(f"Malformed audio data URI: {e}")
elif isinstance(audio, str) and os.path.isfile(audio):
a = Audio.from_file(audio)
return {"data": a.data, "audio_format": a.audio_format}
elif isinstance(audio, str) and audio.startswith("http"):
a = Audio.from_url(audio)
return {"data": a.data, "audio_format": a.audio_format}
elif SF_AVAILABLE and hasattr(audio, "shape"):
a = Audio.from_array(audio, sampling_rate=sampling_rate, format=format)
return {"data": a.data, "audio_format": a.audio_format}
elif isinstance(audio, bytes):
encoded = base64.b64encode(audio).decode("utf-8")
return {"data": encoded, "audio_format": format}
else:
raise ValueError(f"Unsupported type for encode_audio: {type(audio)}")
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/types/audio.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/signatures/test_custom_types.py |
import pydantic
import pytest
import dspy
from dspy import Signature
def test_basic_custom_type_resolution():
"""Test basic custom type resolution with both explicit and automatic mapping."""
class CustomType(pydantic.BaseModel):
value: str
# Custom types can be explicitly mapped
explicit_sig = Signature(
"input: CustomType -> output: str",
custom_types={"CustomType": CustomType}
)
assert explicit_sig.input_fields["input"].annotation == CustomType
# Custom types can also be auto-resolved from caller's scope
auto_sig = Signature("input: CustomType -> output: str")
assert auto_sig.input_fields["input"].annotation == CustomType
def test_type_alias_for_nested_types():
"""Test using type aliases for nested types."""
class Container:
class NestedType(pydantic.BaseModel):
value: str
NestedType = Container.NestedType
alias_sig = Signature("input: str -> output: NestedType")
assert alias_sig.output_fields["output"].annotation == Container.NestedType
class Container2:
class Query(pydantic.BaseModel):
text: str
class Score(pydantic.BaseModel):
score: float
signature = dspy.Signature("query: Container2.Query -> score: Container2.Score")
assert signature.output_fields["score"].annotation == Container2.Score
class GlobalCustomType(pydantic.BaseModel):
"""A type defined at module level for testing module-level resolution."""
value: str
notes: str = ""
def test_module_level_type_resolution():
"""Test resolution of types defined at module level."""
# Module-level types can be auto-resolved
sig = Signature("name: str -> result: GlobalCustomType")
assert sig.output_fields["result"].annotation == GlobalCustomType
# Create module-level nested class for testing
class OuterContainer:
class InnerType(pydantic.BaseModel):
name: str
value: int
def test_recommended_patterns():
"""Test recommended patterns for working with custom types in signatures."""
# PATTERN 1: Local type with auto-resolution
class LocalType(pydantic.BaseModel):
value: str
sig1 = Signature("input: str -> output: LocalType")
assert sig1.output_fields["output"].annotation == LocalType
# PATTERN 2: Module-level type with auto-resolution
sig2 = Signature("input: str -> output: GlobalCustomType")
assert sig2.output_fields["output"].annotation == GlobalCustomType
# PATTERN 3: Nested type with dot notation
sig3 = Signature("input: str -> output: OuterContainer.InnerType")
assert sig3.output_fields["output"].annotation == OuterContainer.InnerType
# PATTERN 4: Nested type using alias
InnerTypeAlias = OuterContainer.InnerType
sig4 = Signature("input: str -> output: InnerTypeAlias")
assert sig4.output_fields["output"].annotation == InnerTypeAlias
# PATTERN 5: Nested type with dot notation
sig5 = Signature("input: str -> output: OuterContainer.InnerType")
assert sig5.output_fields["output"].annotation == OuterContainer.InnerType
def test_expected_failure():
# InnerType DNE when not OuterContainer.InnerTypes, so this type shouldnt be resolved
with pytest.raises(ValueError):
Signature("input: str -> output: InnerType")
def test_module_type_resolution():
class TestModule(dspy.Module):
def __init__(self):
super().__init__()
self.predict = dspy.Predict("input: str -> output: OuterContainer.InnerType")
def predict(self, input: str) -> str:
return input
module = TestModule()
sig = module.predict.signature
assert sig.output_fields["output"].annotation == OuterContainer.InnerType
def test_basic_custom_type_resolution():
class CustomType(pydantic.BaseModel):
value: str
sig = Signature("input: CustomType -> output: str", custom_types={"CustomType": CustomType})
assert sig.input_fields["input"].annotation == CustomType
sig = Signature("input: CustomType -> output: str")
assert sig.input_fields["input"].annotation == CustomType
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/signatures/test_custom_types.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/adapters/types/base_type.py | import json
import re
from typing import TYPE_CHECKING, Any, Optional, get_args, get_origin
import json_repair
import pydantic
from litellm import ModelResponseStream
if TYPE_CHECKING:
from dspy.clients.lm import LM
from dspy.signatures.signature import Signature
CUSTOM_TYPE_START_IDENTIFIER = "<<CUSTOM-TYPE-START-IDENTIFIER>>"
CUSTOM_TYPE_END_IDENTIFIER = "<<CUSTOM-TYPE-END-IDENTIFIER>>"
class Type(pydantic.BaseModel):
"""Base class to support creating custom types for DSPy signatures.
This is the parent class of DSPy custom types, e.g, dspy.Image. Subclasses must implement the `format` method to
return a list of dictionaries (same as the Array of content parts in the OpenAI API user message's content field).
Example:
```python
class Image(Type):
url: str
def format(self) -> list[dict[str, Any]]:
return [{"type": "image_url", "image_url": {"url": self.url}}]
```
"""
def format(self) -> list[dict[str, Any]] | str:
raise NotImplementedError
@classmethod
def description(cls) -> str:
"""Description of the custom type"""
return ""
@classmethod
def extract_custom_type_from_annotation(cls, annotation):
"""Extract all custom types from the annotation.
This is used to extract all custom types from the annotation of a field, while the annotation can
have arbitrary level of nesting. For example, we detect `Tool` is in `list[dict[str, Tool]]`.
"""
# Direct match. Nested type like `list[dict[str, Event]]` passes `isinstance(annotation, type)` in python 3.10
# while fails in python 3.11. To accommodate users using python 3.10, we need to capture the error and ignore it.
try:
if isinstance(annotation, type) and issubclass(annotation, cls):
return [annotation]
except TypeError:
pass
origin = get_origin(annotation)
if origin is None:
return []
result = []
# Recurse into all type args
for arg in get_args(annotation):
result.extend(cls.extract_custom_type_from_annotation(arg))
return result
@pydantic.model_serializer()
def serialize_model(self):
formatted = self.format()
if isinstance(formatted, list):
return (
f"{CUSTOM_TYPE_START_IDENTIFIER}{json.dumps(formatted, ensure_ascii=False)}{CUSTOM_TYPE_END_IDENTIFIER}"
)
return formatted
@classmethod
def adapt_to_native_lm_feature(
cls,
signature: type["Signature"],
field_name: str,
lm: "LM",
lm_kwargs: dict[str, Any],
) -> type["Signature"]:
"""Adapt the custom type to the native LM feature if possible.
When the LM and configuration supports the related native LM feature, e.g., native tool calling, native
reasoning, etc., we adapt the signature and `lm_kwargs` to enable the native LM feature.
Args:
signature: The DSPy signature for the LM call.
field_name: The name of the field in the signature to adapt to the native LM feature.
lm: The LM instance.
lm_kwargs: The keyword arguments for the LM call, subject to in-place updates if adaptation if required.
Returns:
The adapted signature. If the custom type is not natively supported by the LM, return the original
signature.
"""
return signature
@classmethod
def is_streamable(cls) -> bool:
"""Whether the custom type is streamable."""
return False
@classmethod
def parse_stream_chunk(cls, chunk: ModelResponseStream) -> Optional["Type"]:
"""
Parse a stream chunk into the custom type.
Args:
chunk: A stream chunk.
Returns:
A custom type object or None if the chunk is not for this custom type.
"""
return None
@classmethod
def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Type"]:
"""Parse a LM response into the custom type.
Args:
response: A LM response.
Returns:
A custom type object.
"""
return None
def split_message_content_for_custom_types(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Split user message content into a list of content blocks.
This method splits each user message's content in the `messages` list to be a list of content block, so that
the custom types like `dspy.Image` can be properly formatted for better quality. For example, the split content
may look like below if the user message has a `dspy.Image` object:
```
[
{"type": "text", "text": "{text_before_image}"},
{"type": "image_url", "image_url": {"url": "{image_url}"}},
{"type": "text", "text": "{text_after_image}"},
]
```
This is implemented by finding the `<<CUSTOM-TYPE-START-IDENTIFIER>>` and `<<CUSTOM-TYPE-END-IDENTIFIER>>`
in the user message content and splitting the content around them. The `<<CUSTOM-TYPE-START-IDENTIFIER>>`
and `<<CUSTOM-TYPE-END-IDENTIFIER>>` are the reserved identifiers for the custom types as in `dspy.Type`.
Args:
messages: a list of messages sent to the LM. The format is the same as [OpenAI API's messages
format](https://platform.openai.com/docs/guides/chat-completions/response-format).
Returns:
A list of messages with the content split into a list of content blocks around custom types content.
"""
for message in messages:
if message["role"] != "user":
# Custom type messages are only in user messages
continue
pattern = rf"{CUSTOM_TYPE_START_IDENTIFIER}(.*?){CUSTOM_TYPE_END_IDENTIFIER}"
result = []
last_end = 0
# DSPy adapter always formats user input into a string content before custom type splitting
content: str = message["content"]
for match in re.finditer(pattern, content, re.DOTALL):
start, end = match.span()
# Add text before the current block
if start > last_end:
result.append({"type": "text", "text": content[last_end:start]})
# Parse the JSON inside the block
custom_type_content = match.group(1).strip()
parsed = None
for parse_fn in [json.loads, _parse_doubly_quoted_json, json_repair.loads]:
try:
parsed = parse_fn(custom_type_content)
break
except json.JSONDecodeError:
continue
if parsed:
for custom_type_content in parsed:
result.append(custom_type_content)
else:
# fallback to raw string if it's not valid JSON
result.append({"type": "text", "text": custom_type_content})
last_end = end
if last_end == 0:
# No custom type found, return the original message
continue
# Add any remaining text after the last match
if last_end < len(content):
result.append({"type": "text", "text": content[last_end:]})
message["content"] = result
return messages
def _parse_doubly_quoted_json(json_str: str) -> Any:
"""
Parse a doubly quoted JSON string into a Python dict.
`dspy.Type` can be json-encoded twice if included in either list or dict, e.g., `list[dspy.experimental.Document]`
"""
return json.loads(json.loads(f'"{json_str}"'))
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/types/base_type.py",
"license": "MIT License",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:dspy/teleprompt/grpo.py | import logging
import random
import time
from collections import Counter, deque
from typing import Any, Callable, Literal
from dspy.adapters.base import Adapter
from dspy.adapters.chat_adapter import ChatAdapter
from dspy.adapters.xml_adapter import XMLAdapter
from dspy.clients.lm import LM
from dspy.clients.utils_finetune import GRPOGroup, GRPOStatus, TrainDataFormat
from dspy.dsp.utils.settings import settings
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.example import Example
from dspy.primitives.module import Module
from dspy.teleprompt.bootstrap_finetune import (
FinetuneTeleprompter,
all_predictors_have_lms,
assert_structural_equivalency,
)
from dspy.teleprompt.bootstrap_trace import FailedPrediction, bootstrap_trace_data
logger = logging.getLogger(__name__)
class GRPO(FinetuneTeleprompter):
def __init__(
self,
metric: Callable | None = None,
multitask: bool = True,
train_kwargs: dict[str, Any] | dict[LM, dict[str, Any]] | None = None,
adapter: Adapter | dict[LM, Adapter] | None = None,
exclude_demos: bool = False,
num_threads: int = 6,
num_train_steps: int = 100,
seed: int = 0,
num_dspy_examples_per_grpo_step: int = 1,
num_rollouts_per_grpo_step: int = 1,
use_train_as_val: bool = False,
num_steps_for_val: int = 5,
report_train_scores: bool = False,
failure_score: float = 0,
format_failure_score: float = -1,
variably_invoked_predictor_grouping_mode: Literal["truncate"] | Literal["fill"] | Literal["ragged"] = "truncate",
variably_invoked_predictor_fill_strategy: Literal["randint"] | Literal["max"] | None = None,
):
super().__init__(train_kwargs=train_kwargs)
self.metric = metric
self.multitask = multitask
self.adapter: dict[LM, Adapter] = self.convert_to_lm_dict(adapter)
self.exclude_demos = exclude_demos
self.num_threads = num_threads
self.num_train_steps = num_train_steps
self.rng = random.Random(seed)
self.num_dspy_examples_per_grpo_step = num_dspy_examples_per_grpo_step
self.num_rollouts_per_grpo_step = num_rollouts_per_grpo_step
self.use_train_as_val = use_train_as_val
self.num_steps_for_val = num_steps_for_val
self.report_train_scores = report_train_scores
self.failure_score = failure_score
self.format_failure_score = format_failure_score
assert failure_score > format_failure_score, "failure_score must be greater than format_failure_score since the range [format_failure_score, failure_score] is used to provide dspy formatting rewards"
if self.use_train_as_val:
assert report_train_scores, "If use_train_as_val is True, report_train_scores must be True."
assert exclude_demos, "exclude_demos==False is not supported yet. Please set it to True."
assert multitask, "independent GRPO training jobs for each predictor in the student program is not supported yet. Please set multitask=True."
# The backend will be called with a batch of (num_dspy_examples_per_grpo_step * num_rollouts_per_grpo_step * num_predictors) per training set if multitask is True
# If multitask is False, the backend will be called with a batch of (num_dspy_examples_per_grpo_step * num_rollouts_per_grpo_step) per training job
self.variably_invoked_predictor_grouping_mode = variably_invoked_predictor_grouping_mode
if variably_invoked_predictor_grouping_mode == "fill":
assert variably_invoked_predictor_fill_strategy is not None, "variably_invoked_predictor_fill_strategy must be set when variably_invoked_predictor_grouping_mode is 'fill'"
assert variably_invoked_predictor_fill_strategy in ["randint", "max"], "variably_invoked_predictor_fill_strategy must be either 'randint' or 'max'"
self.variably_invoked_predictor_fill_strategy = variably_invoked_predictor_fill_strategy
self.shuffled_trainset_ids = []
self.epoch = -1
self.id_freqs = Counter()
self.fulfilled_batch_ids = []
self.pending_batch_ids = []
def validate_trace_data_and_log_issues(
self,
trace_data: list[list[list[dict[str, Any]]]],
subsample_training_dataset: list[Example],
num_teachers: int,
num_samples_per_input: int,
pred_signature_hash_to_ind: dict[int, int],
):
# At this point, trace_data: list[example_idx -> list[teacher_idx -> [num_samples_per_input * Dict(example, prediction, trace, example_ind, score)]]]
# Shape of trace is: [dspy_module_invocation_idx -> Tuple[Predictor, PredictorInputs, Prediction]]
assert len(trace_data) == len(subsample_training_dataset), f"Trace data length {len(trace_data)} does not match the number of examples {len(subsample_training_dataset)}"
assert len(trace_data[0]) == num_teachers, f"Trace data length {len(trace_data[0])} does not match the number of teachers {num_teachers}"
# TODO(GRPO Team): Ideally, once the dspy format issue is fixed, this change should be reverted back to being a normal assert.
if len(trace_data[0][0]) == 0:
logger.warning(f"Trace data for example {0} and teacher {0} is empty. This is likely due to all examples in the training set input, resulting in the model generating output not following the dspy response format.")
elif len(trace_data[0][0]) != num_samples_per_input:
logger.warning(f"Trace data length {len(trace_data[0][0])} does not match the expected number of samples per input {num_samples_per_input}")
assert "trace" in trace_data[0][0][0], "Trace data does not contain the 'trace' key"
assert len(trace_data[0][0][0]["trace"]) > 0, "Trace data is empty"
assert len(trace_data[0][0][0]["trace"][0]) == 3, f"Trace tuple length {len(trace_data[0][0][0]['trace'][0])} does not match the expected length 3"
for example_data in trace_data:
for teacher_data in example_data:
for sample in teacher_data:
for t in sample["trace"]:
assert hash(t[0].signature) in pred_signature_hash_to_ind
def report_validation_metrics(self, student, trainset, valset, logger, step_idx=-1):
if step_idx == -1 or step_idx == self.num_train_steps - 1 or (step_idx + 1) % self.num_steps_for_val == 0:
pass
else:
return
if valset is not None:
# Validation set provided by user
assert not self.use_train_as_val, "If valset is provided, use_train_as_val must be False."
assert isinstance(self.num_steps_for_val, int) and self.num_steps_for_val > 0, "num_steps_for_val must be a positive integer."
if self.report_train_scores:
if step_idx == -1:
logger.info("Using user provided validation set and reporting train scores for every validation step in addition.")
valset_evaluator = Evaluate(
devset=valset + trainset,
num_threads=self.num_threads,
display_progress=True,
provide_traceback=False, # TODO(check with team)
max_errors=len(valset)*10, # TODO(check with team)
failure_score=self.failure_score
)
if step_idx == -1:
logger.info("Evaluating the student program on the train+validation set before training loop...")
else:
logger.info(f"Evaluating the student program on the validation set after training step {step_idx + 1}/{self.num_train_steps}")
valset_evaluation = valset_evaluator(student, metric=self.metric)
trainset_scores = [r[-1] for r in valset_evaluation.results[len(valset):]]
valset_scores = [r[-1] for r in valset_evaluation.results[:len(valset)]]
trainset_agg = sum(trainset_scores) / len(trainset_scores)
valset_agg = sum(valset_scores) / len(valset_scores)
if step_idx == -1:
logger.info(f"Student program training set score before training loop: {trainset_agg}")
logger.info(f"Student program validation set score before training loop: {valset_agg}")
else:
logger.info(f"Student program training set score after training step {step_idx + 1}/{self.num_train_steps}: {trainset_agg}")
logger.info(f"Student program validation set score after training step {step_idx + 1}/{self.num_train_steps}: {valset_agg}")
else:
if step_idx == -1:
logger.info("Using user provided validation set and not reporting train scores.")
valset_evaluator = Evaluate(
devset=valset,
num_threads=self.num_threads,
display_progress=True,
provide_traceback=False, # TODO(check with team)
max_errors=len(valset)*10, # TODO(check with team)
failure_score=self.failure_score
)
if step_idx == -1:
logger.info("Evaluating the student program on the validation set before training loop...")
else:
logger.info(f"Evaluating the student program on the validation set after training step {step_idx + 1}/{self.num_train_steps}")
valset_evaluation = valset_evaluator(student, metric=self.metric)
if step_idx == -1:
logger.info(f"Student program validation set score before training loop: {valset_evaluation.score}")
else:
logger.info(f"Student program validation set score after training step {step_idx + 1}/{self.num_train_steps}: {valset_evaluation.score}")
else:
# No validation set provided by user
if self.report_train_scores:
assert self.use_train_as_val, "If report_train_scores is True, use_train_as_val must be True when valset is not provided explicitly."
assert isinstance(self.num_steps_for_val, int) and self.num_steps_for_val > 0, "num_steps_for_val must be a positive integer."
if step_idx == -1:
logger.info("Using trainset as validation set.")
valset_evaluator = Evaluate(
devset=trainset,
num_threads=self.num_threads,
display_progress=True,
provide_traceback=False, # TODO(check with team)
max_errors=len(trainset)*10, # TODO(check with team)
failure_score=self.failure_score
)
if step_idx == -1:
logger.info("Evaluating the student program on the validation set before training loop...")
else:
logger.info(f"Evaluating the student program on the validation set after training step {step_idx + 1}/{self.num_train_steps}")
valset_evaluation = valset_evaluator(student, metric=self.metric)
if step_idx == -1:
logger.info(f"Student program training set score before training loop: {valset_evaluation.score}")
else:
logger.info(f"Student program training set score after training step {step_idx + 1}/{self.num_train_steps}: {valset_evaluation.score}")
else:
# No valset provided, and not using train as val
assert not self.use_train_as_val, "If report_train_scores is False, use_train_as_val must be False."
if step_idx == -1:
logger.info("Not using any validation set and not reporting train scores.")
def update_shuffled_trainset(self, original_trainset):
self.shuffled_trainset_ids = list(range(len(original_trainset)))
self.rng.shuffle(self.shuffled_trainset_ids)
for id in self.shuffled_trainset_ids:
self.id_freqs[id] += 1
num_to_pad = self.num_dspy_examples_per_grpo_step - (len(original_trainset) % self.num_dspy_examples_per_grpo_step)
if num_to_pad > 0:
# Select ids based on least frequent ids
for _ in range(num_to_pad):
selected_id = self.id_freqs.most_common()[::-1][0][0]
self.shuffled_trainset_ids.append(selected_id)
self.id_freqs[selected_id] += 1
def select_training_sample_and_update_shuffled_trainset(
self,
original_trainset: list[Example],
train_step_idx: int,
) -> list[Example]:
base_idx = train_step_idx * self.num_dspy_examples_per_grpo_step
if self.epoch == -1:
curr_epoch = 0
else:
curr_epoch = base_idx // len(self.shuffled_trainset_ids)
if curr_epoch > self.epoch:
logger.info(f"Updating shuffled trainset for epoch {curr_epoch}...")
self.epoch = curr_epoch
self.update_shuffled_trainset(original_trainset)
assert len(self.shuffled_trainset_ids) >= self.num_dspy_examples_per_grpo_step, f"Shuffled trainset length {len(self.shuffled_trainset_ids)} is less than num_dspy_examples_per_grpo_step {self.num_dspy_examples_per_grpo_step}"
assert len(self.shuffled_trainset_ids) % self.num_dspy_examples_per_grpo_step == 0, f"Shuffled trainset length {len(self.shuffled_trainset_ids)} is not divisible by num_dspy_examples_per_grpo_step {self.num_dspy_examples_per_grpo_step}"
base_idx = base_idx % len(self.shuffled_trainset_ids)
end_idx = base_idx + self.num_dspy_examples_per_grpo_step
assert end_idx <= len(self.shuffled_trainset_ids), f"End index {end_idx} is out of bounds for shuffled trainset length {len(self.shuffled_trainset_ids)}"
selected_ids = self.shuffled_trainset_ids[base_idx:end_idx]
selected_trainset = [original_trainset[i] for i in selected_ids]
return selected_trainset
def compile(
self,
student: Module,
trainset: list[Example],
teacher: Module | list[Module] | None = None,
valset: list[Example] | None = None,
**kwargs,
) -> Module:
logger.info("Starting the GRPO compilation process... The LM(s) for the student program will be updated in place at the end of the training.")
logger.info("Validating the inputs...")
assert len(trainset) > 0, "Training set is empty. Please provide a non-empty training set."
if len(trainset) < self.num_dspy_examples_per_grpo_step:
logger.warning(
f"Number of training examples {len(trainset)} is less than the number of examples per GRPO step {self.num_dspy_examples_per_grpo_step}. "
"Repeating the training set to fill the GRPO step. This could lead to overfitting and training instability."
)
multiplier = (self.num_dspy_examples_per_grpo_step + len(trainset) - 1) // len(trainset)
if multiplier > 1:
logger.warning(
f"Repeating the training set {multiplier} times to fill the GRPO step. This could lead to overfitting and training instability."
)
trainset = trainset * multiplier
# TODO(GRPO Team): Following checks are for unimplemented features.
# Consider if we want to eventually implement them or remove. We don't
# yet support:
# * multitask == False
# * student program with multiple predictor LMs
# The main reason for these is that we update the LMs in place. If these
# LMs are shared between the different predictors of the student
# program and we have multitask == False, we need to decide which steps
# will use new LM copies and we need to ensure our decision is
# consistent with any teacher LMs that share the same LMs.
# TODO(GRPO Team): We want to make it possible to continue GRPO runs in
# the future by saving the state of the GRPO run in the event of a
# process failure.
if not self.multitask:
raise ValueError(
"Independent GRPO training jobs for each predictor in the student program "
"are not supported yet. Please set multitask=True."
)
student_lms = {id(pred.lm) for pred in student.predictors()}
assert len(student_lms) == 1, (
f"Student program has multiple LMs: {student_lms}. "
"GRPO only supports student programs with a single LM."
"You can set the LM for a program with `program.set_lm(...)`"
)
# Our regular input validation starts here
if self.use_train_as_val:
assert valset is None, "If use_train_as_val is True, valset must be None."
logger.info("Preparing the student program...")
all_predictors_have_lms(student)
pred_signature_hash_to_ind = {hash(pred.signature): ind for ind, pred in enumerate(student.predictors())}
num_student_predictors = len(student.predictors())
logging.info("Preparing the teacher program(s)... We will ensure that the provided programs have the same program structure as the student program.")
if (isinstance(teacher, list) and len(teacher) == 0) or teacher is None:
teacher = student
teachers = teacher if isinstance(teacher, list) else [teacher]
for t in teachers:
assert_structural_equivalency(student, t)
all_predictors_have_lms(t)
# Ensure that the teachers list contain the student program
assert student in teachers, f"Student program {student} is not in the list of teachers {teachers}. Please provide the student program as one of the teachers. Alternatively, you can leave the teacher argument as None, and the student program will be used as the teacher program."
assert self.num_rollouts_per_grpo_step % len(teachers) == 0, (
f"The GRPO group size (num_rollouts_per_grpo_step) {self.num_rollouts_per_grpo_step} is not divisible by the number of teachers {len(teachers)}. "
"This is required to ensure that each teacher gets the same number of examples."
"Please provide a number of examples that is divisible by the number of teachers."
)
num_samples_per_input = self.num_rollouts_per_grpo_step // len(teachers)
# We will disable the LM cache for all programs (student and teachers)
# These will be reverted to their original state at the end of the
# training
lm_cache_dict = {}
disable_lm_cache(program=student, lm_cache_dict=lm_cache_dict)
for t in teachers:
disable_lm_cache(program=t, lm_cache_dict=lm_cache_dict)
# Update train_kwargs
for pred in student.predictors():
train_kwargs = self.train_kwargs[pred.lm]
train_kwargs = {} if train_kwargs is None else train_kwargs
train_kwargs["num_generations"] = self.num_rollouts_per_grpo_step
self.train_kwargs[pred.lm] = train_kwargs
# We need to have a separate job for each unique LM x the data
# collection strategy. This properly handles all combinations of
# multitask and predictor LMs
logger.info("Preparing the GRPO training job(s)...")
grpo_training_jobs = {}
for pred_ind, pred in enumerate(student.predictors()):
data_key = None if self.multitask else pred_ind
job_key = (pred.lm, data_key)
if job_key not in grpo_training_jobs:
train_kwargs = self.train_kwargs[pred.lm]
job = pred.lm.reinforce(train_kwargs=train_kwargs)
grpo_training_jobs[job_key] = job
self.report_validation_metrics(
student=student,
trainset=trainset,
valset=valset,
logger=logger,
step_idx=-1,
)
# Queue of GRPO groups per training job; key is (LM, data_key)
group_queues = {}
logger.info("Starting the GRPO training loop...")
for train_step_idx in range(self.num_train_steps):
logger.info(f"GRPO training step {train_step_idx + 1}/{self.num_train_steps}...")
subsample_training_dataset = self.select_training_sample_and_update_shuffled_trainset(
original_trainset=trainset,
train_step_idx=train_step_idx,
)
def _any_available_for_step():
for _, job in grpo_training_jobs.items():
grpo_status: GRPOStatus = job.get_status()
pending_batch_ids = grpo_status["pending_batch_ids"]
available = set(pending_batch_ids) - set(self.fulfilled_batch_ids)
if available:
return True
return False
while not _any_available_for_step():
time.sleep(1)
logger.info("Bootstrapping data...")
trace_data = [[[] for _ in range(len(teachers))] for _ in range(len(subsample_training_dataset))]
for tind, teacher in enumerate(teachers):
subsample_training_dataset_repeated = [example for _ in range(num_samples_per_input) for example in subsample_training_dataset]
round_data = bootstrap_trace_data(
program=teacher,
dataset=subsample_training_dataset_repeated,
metric=self.metric,
num_threads=self.num_threads,
raise_on_error=False, # TODO(GRPO Team): This should be True, once the dspy format issue is fixed
capture_failed_parses=True,
failure_score=self.failure_score,
format_failure_score=self.format_failure_score,
log_format_failures=True,
)
for data_dict in round_data:
example_ind_in_subsample = data_dict["example_ind"] % len(subsample_training_dataset)
data_dict["example_ind"] = example_ind_in_subsample
trace_data[example_ind_in_subsample][tind].append(data_dict)
# The trace_data for examples with FailedPrediction cases will have the signature at index 0, instead of the predictor
# We need to replace the signature with the predictor
# At this point, trace_data: list[example_idx -> list[teacher_idx -> [num_samples_per_input * Dict(example, prediction, trace, example_ind, score)]]]
# Shape of trace is: [dspy_module_invocation_idx -> Tuple[Predictor, PredictorInputs, Prediction]]
self.validate_trace_data_and_log_issues(
trace_data=trace_data,
subsample_training_dataset=subsample_training_dataset,
num_teachers=len(teachers),
num_samples_per_input=num_samples_per_input,
pred_signature_hash_to_ind=pred_signature_hash_to_ind,
)
logger.info("Preparing the training data batch from bootstrapped examples for GRPO...")
# Now, we need to prepare batches of data to be sent for training
# Shape of train_batch_per_predictor: list[num_student_predictors -> list[ ]]
train_batch_per_predictor: list[list[GRPOGroup]] = [[] for _ in range(num_student_predictors)]
for pred_id in range(num_student_predictors):
for example_ind, example_data in enumerate(trace_data):
# Each example_data is a list of teacher_idx -> [num_samples_per_input * Dict(example, prediction, trace, example_ind, score)]
# We need to flatten this list and create a batch for each predictor
# TODO(Lakshya, Omar, Noah): Discuss what to do with the same module being invoked multiple times within a single dspy.Example
predictor_example_invocations: list[list[tuple]] = []
for teacher_data in example_data:
for sample in teacher_data:
# Each sample is a Dict(example, prediction, trace, example_ind, score)
# sample['prediction'] is module_level prediction
assert sample["example_ind"] == example_ind, f"Example index {sample['example_ind']} does not match the expected index {example_ind}"
trace_instances_for_current_pred = [(*t, sample["score"]) for t in sample["trace"] if hash(t[0].signature) == hash(student.predictors()[pred_id].signature)]
predictor_example_invocations.append(trace_instances_for_current_pred)
if len(predictor_example_invocations) == 0:
logger.warning(f"Skipping example {example_ind} for predictor {pred_id} as it has no invocations. This is likely due to all examples in the training set input, resulting in the model generating output not following the dspy response format.")
continue
elif len(predictor_example_invocations) != self.num_rollouts_per_grpo_step:
logger.warning(f"Number of predictor example invocations {len(predictor_example_invocations)} does not match the expected batch size {self.num_rollouts_per_grpo_step}. This is likely due to all examples in the training set input, resulting in the model generating output not following the dspy response format.")
min_len = min([len(predictor_example_invocations[i]) for i in range(len(predictor_example_invocations))])
max_len = max([len(predictor_example_invocations[i]) for i in range(len(predictor_example_invocations))])
if min_len == 0:
logger.warning(f"Skipping example {example_ind} for predictor {pred_id} as it has no invocations.")
continue
if self.variably_invoked_predictor_grouping_mode == "truncate":
predictor_example_invocations = [invocation[:min_len] for invocation in predictor_example_invocations]
elif self.variably_invoked_predictor_grouping_mode == "fill":
if self.variably_invoked_predictor_fill_strategy == "randint":
selector = lambda l: self.rng.choice(l) # noqa: E731, E741
else:
selector = lambda l: l[-1] # noqa: E731, E741
predictor_example_invocations = [
invocation + [selector(invocation) for _ in range(max_len - len(invocation))]
for invocation in predictor_example_invocations
]
else:
assert self.variably_invoked_predictor_grouping_mode == "ragged", f"Unknown variably invoked predictor grouping mode {self.variably_invoked_predictor_grouping_mode}"
max_len = max([len(predictor_example_invocations[i]) for i in range(len(predictor_example_invocations))])
example_training_data: list[GRPOGroup] = [[] for _ in range(max_len)]
for group_idx in range(max_len):
for rollout_idx in range(len(predictor_example_invocations)):
trace_instance = predictor_example_invocations[rollout_idx][group_idx]
score = trace_instance[3]
# for module_invocation_idx, trace_instance in enumerate(trace_instances_for_current_pred):
# Each trace is a tuple of (Predictor, PredictorInputs, Prediction)
trace_pred_id = pred_signature_hash_to_ind.get(hash(trace_instance[0].signature))
assert trace_pred_id == pred_id
predictor = trace_instance[0]
pred_lm = predictor.lm
adapter = self.adapter[pred_lm] or settings.adapter or XMLAdapter()
assert isinstance(adapter, ChatAdapter), f"Adapter {adapter} is not a ChatAdapter. GRPO training is not supported for this adapter."
# TODO(Lakshya): Currently we exclude demos from the training data
# TODO(GRPO Team): Use build_call_data_from_trace (from bootstrap_finetune) instead of
# dealing with the message formatting ourselves.
inp_messages = adapter.format(
signature=trace_instance[0].signature,
inputs=trace_instance[1],
demos=[] # TODO: Add support for demos
)
if isinstance(trace_instance[2], FailedPrediction):
score = trace_instance[2].format_reward or self.format_failure_score
example_training_data[group_idx].append({
"messages": inp_messages,
"completion": {
"role": "assistant",
"content": trace_instance[2].completion_text,
},
"reward": float(score),
})
logger.warning(f"Adding a format failure example to the training data for predictor {pred_id} and example {example_ind}.")
else:
all_messages = adapter.format_finetune_data(
signature=trace_instance[0].signature,
inputs=trace_instance[1],
outputs=trace_instance[2],
demos=[] # TODO: Add support for demos
)["messages"]
assert all_messages[:-1] == inp_messages, f"Input messages {inp_messages} do not match the expected messages {all_messages[:-1]}"
example_training_data[group_idx].append({
"messages": inp_messages,
"completion": {
"role": all_messages[-1]["role"],
"content": all_messages[-1]["content"],
},
"reward": float(score),
})
train_batch_per_predictor[pred_id].extend(example_training_data)
if not any(train_batch_per_predictor):
logger.warning("No training data found for this training step. This means that the model did not generate valid formatted responses for any of the examples in the training set. This is a critical error. Please check the model and the training set.")
continue
for predictor_train_batch in train_batch_per_predictor:
for grpo_train_group in predictor_train_batch:
if len(grpo_train_group) != self.num_rollouts_per_grpo_step:
logger.warning(f"Number of completions {len(grpo_train_group)} does not match the expected number num_rollouts_per_grpo_step={self.num_rollouts_per_grpo_step}")
assert len(grpo_train_group) <= self.num_rollouts_per_grpo_step, f"Number of completions {len(grpo_train_group)} is greater than the expected number num_rollouts_per_grpo_step={self.num_rollouts_per_grpo_step}"
if len(set(map(repr, grpo_train_group))) < 2:
# TODO(GRPO Team): How can we avoid this warning?
logger.warning(f"GRPOGroup has no diversity. This could be due to low temperature, or low number of rollouts, or the cache could be enabled inadvertently. The GRPOGroup is {grpo_train_group}.")
# We now run the GRPO step. Notes:
# * The job here has a reference to a particular M that's attached
# to the student program. We update the .model field of this LM
# inside the job, which also updates the LM in the student program
# since these point to the same reference (along with any teacher
# program that shares the same LM).
# * TODO(GRPO Team): This is inconsistent with how
# BootstrapFinetune works, which creates new LM instances post
# training. We should decide whether the LMs should be updated in
# place or new LMs should be created, and standardize our approach
# for both. If we decide to create new LMs, we should find a way
# to update self.adapter and self.train_kwargs accordingly, in
# addition to updating any teacher programs that share the same
# LM.
logger.info("Invoking GRPO training step...")
for (lm_for_job, data_key), job in grpo_training_jobs.items():
train_data: list[GRPOGroup] = sum(train_batch_per_predictor, []) if data_key is None else train_batch_per_predictor[data_key] #noqa: RUF017
for group in train_data:
if len(group) != self.num_rollouts_per_grpo_step:
# TODO(GRPO Team): This is very undesirable. This occurs only because in some of the generations, the model does not follow the correct dspy format.
# The ideal solution is to identify the full response string in that predictor's group, and then assign a high-negative (user-configurable) reward to that group.
# Pad the group to the expected number of generations by repeating the whole group, might require multiple iterations
while len(group) < self.num_rollouts_per_grpo_step:
group.extend(group[:min(self.num_rollouts_per_grpo_step - len(group), len(group))])
assert len(group) == self.num_rollouts_per_grpo_step, f"Number of completions {len(group)} does not match the expected number self.num_rollouts_per_grpo_step={self.num_rollouts_per_grpo_step}"
# Determine available batch IDs for this specific job
grpo_status: GRPOStatus = job.get_status()
pending_batch_ids = grpo_status["pending_batch_ids"]
available_batch_ids = list(set(pending_batch_ids) - set(self.fulfilled_batch_ids))
if not available_batch_ids:
continue
# Initialize and (re)fill the queue for this job as needed
job_key = (lm_for_job, data_key)
q = group_queues.setdefault(job_key, deque())
# Refill strategy: add randomized copies of current train_data until we can satisfy all batch_ids
if len(q) < len(available_batch_ids) and len(train_data) > 0:
need = len(available_batch_ids) - len(q)
while need > 0:
# Shuffle by sampling without replacement
shuffled = self.rng.sample(train_data, k=len(train_data))
q.extend(shuffled)
need -= len(shuffled)
# Build GRPOGroup items by popping from the queue; fallback to random selection if needed
final_train_data: list[GRPOGroup] = []
for bid in available_batch_ids:
if q:
grp = q.popleft()
else:
# Fallback: choose randomly from current train_data (or flattened pool) if queue underflows
fallback_pool = train_data if len(train_data) > 0 else sum(train_batch_per_predictor, [])
if len(fallback_pool) == 0:
# Nothing to send for this job
continue
grp = self.rng.choice(fallback_pool)
final_train_data.append({"batch_id": bid, "group": grp})
if not final_train_data:
continue
# Track fulfilled IDs to avoid reuse
self.fulfilled_batch_ids.extend([item["batch_id"] for item in final_train_data])
job.step(train_data=final_train_data, train_data_format=TrainDataFormat.GRPO_CHAT)
logger.info(f"GRPO training step {train_step_idx + 1}/{self.num_train_steps} completed.")
self.report_validation_metrics(
student=student,
trainset=trainset,
valset=valset,
logger=logger,
step_idx=train_step_idx,
)
logger.info("Done with the iterations! Retrieving the final model(s)...")
for _, job in grpo_training_jobs.items():
job.terminate()
# Revert cache states to their initial values
recover_lm_cache(program=student, lm_cache_dict=lm_cache_dict)
for t in teachers:
recover_lm_cache(program=t, lm_cache_dict=lm_cache_dict)
logger.info("GRPO compiler has finished compiling the student program")
student._compiled = True
return student
def disable_lm_cache(program: Module, lm_cache_dict: dict):
"""Disable the LM cache for all predictors in the program."""
for pred in program.predictors():
if not pred.lm:
raise ValueError(f"Cannot disable cache: predictor {pred} does not have an LM set.")
if pred.lm not in lm_cache_dict: # Check to avoid overwriting the cache
lm_cache_dict[pred.lm] = pred.lm.cache
pred.lm.cache = False
def recover_lm_cache(program: Module, lm_cache_dict: dict):
"""Recover the LM caches for all predictors in the program to their original state."""
for pred in program.predictors():
if pred.lm in lm_cache_dict:
pred.lm.cache = lm_cache_dict[pred.lm]
else:
# We do not expect this branch to execute at all since all the LMs
# are modified in place and no new LMs are created during training.
# However, we do not complain if this happens since this is a
# relatively minor feature. We default the LM cache to True.
pred.lm.cache = True
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/teleprompt/grpo.py",
"license": "MIT License",
"lines": 564,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/teleprompt/test_grpo.py | from dspy.teleprompt.grpo import GRPO
def test_grpo_dataset_shuffler():
dataset = [1, 2, 3]
grpo = GRPO(
num_dspy_examples_per_grpo_step=3,
exclude_demos=True,
)
trainset_instances = []
for i in range(4):
trainset_instances.append(grpo.select_training_sample_and_update_shuffled_trainset(dataset, i))
assert len(trainset_instances[-1]) == 3
assert set(trainset_instances[-1]) == set(dataset)
def test_grpo_dataset_shuffler_with_num_ex_per_step_less_dataset():
dataset = [1, 2, 3]
grpo = GRPO(
num_dspy_examples_per_grpo_step=2,
exclude_demos=True,
)
trainset_instances = []
for i in range(15):
trainset_instances.append(grpo.select_training_sample_and_update_shuffled_trainset(dataset, i))
assert len(trainset_instances[-1]) == 2
from collections import Counter
counter = Counter()
for instance in trainset_instances:
counter.update(instance)
assert len(counter) == 3
for i in counter:
assert counter[i] == 10
def test_grpo_dataset_shuffler_with_num_ex_per_step_greater_dataset():
dataset = [1, 2, 3]
grpo = GRPO(
num_dspy_examples_per_grpo_step=5,
exclude_demos=True,
)
trainset_instances = []
for i in range(6):
trainset_instances.append(grpo.select_training_sample_and_update_shuffled_trainset(dataset, i))
assert len(trainset_instances[-1]) == 5
from collections import Counter
counter = Counter()
for instance in trainset_instances:
counter.update(instance)
assert len(counter) == 3
for i in counter:
assert counter[i] == 10
if __name__ == "__main__":
test_grpo_dataset_shuffler()
test_grpo_dataset_shuffler_with_num_ex_per_step_less_dataset()
test_grpo_dataset_shuffler_with_num_ex_per_step_greater_dataset()
print("All tests passed!")
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/teleprompt/test_grpo.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/utils/exceptions.py |
from dspy.signatures.signature import Signature
class AdapterParseError(Exception):
"""Exception raised when adapter cannot parse the LM response."""
def __init__(
self,
adapter_name: str,
signature: Signature,
lm_response: str,
message: str | None = None,
parsed_result: str | None = None,
):
self.adapter_name = adapter_name
self.signature = signature
self.lm_response = lm_response
self.parsed_result = parsed_result
message = f"{message}\n\n" if message else ""
message = (
f"{message}"
f"Adapter {adapter_name} failed to parse the LM response. \n\n"
f"LM Response: {lm_response} \n\n"
f"Expected to find output fields in the LM response: [{', '.join(signature.output_fields.keys())}] \n\n"
)
if parsed_result is not None:
message += f"Actual output fields parsed from the LM response: [{', '.join(parsed_result.keys())}] \n\n"
super().__init__(message)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/utils/exceptions.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
stanfordnlp/dspy:tests/utils/test_exceptions.py | import dspy
from dspy.utils.exceptions import AdapterParseError
def test_adapter_parse_error_basic():
adapter_name = "ChatAdapter"
signature = dspy.make_signature("question->answer1, answer2")
lm_response = "[[ ## answer1 ## ]]\nanswer1"
error = AdapterParseError(adapter_name=adapter_name, signature=signature, lm_response=lm_response)
assert error.adapter_name == adapter_name
assert error.signature == signature
assert error.lm_response == lm_response
error_message = str(error)
assert error_message == (
"Adapter ChatAdapter failed to parse the LM response. \n\n"
"LM Response: [[ ## answer1 ## ]]\nanswer1 \n\n"
"Expected to find output fields in the LM response: [answer1, answer2] \n\n"
)
def test_adapter_parse_error_with_message():
adapter_name = "ChatAdapter"
signature = dspy.make_signature("question->answer1, answer2")
lm_response = "[[ ## answer1 ## ]]\nanswer1"
message = "Critical error, please fix!"
error = AdapterParseError(adapter_name=adapter_name, signature=signature, lm_response=lm_response, message=message)
assert error.adapter_name == adapter_name
assert error.signature == signature
assert error.lm_response == lm_response
error_message = str(error)
assert error_message == (
"Critical error, please fix!\n\n"
"Adapter ChatAdapter failed to parse the LM response. \n\n"
"LM Response: [[ ## answer1 ## ]]\nanswer1 \n\n"
"Expected to find output fields in the LM response: [answer1, answer2] \n\n"
)
def test_adapter_parse_error_with_parsed_result():
adapter_name = "ChatAdapter"
signature = dspy.make_signature("question->answer1, answer2")
lm_response = "[[ ## answer1 ## ]]\nanswer1"
parsed_result = {"answer1": "value1"}
error = AdapterParseError(
adapter_name=adapter_name, signature=signature, lm_response=lm_response, parsed_result=parsed_result
)
error_message = str(error)
assert error_message == (
"Adapter ChatAdapter failed to parse the LM response. \n\n"
"LM Response: [[ ## answer1 ## ]]\nanswer1 \n\n"
"Expected to find output fields in the LM response: [answer1, answer2] \n\n"
"Actual output fields parsed from the LM response: [answer1] \n\n"
)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/utils/test_exceptions.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/retrievers/test_embeddings.py | import os
import tempfile
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pytest
from dspy.retrievers.embeddings import Embeddings
def dummy_corpus():
return [
"The cat sat on the mat.",
"The dog barked at the mailman.",
"Birds fly in the sky.",
]
def dummy_embedder(texts):
embeddings = []
for text in texts:
if "cat" in text:
embeddings.append(np.array([1, 0, 0], dtype=np.float32))
elif "dog" in text:
embeddings.append(np.array([0, 1, 0], dtype=np.float32))
else:
embeddings.append(np.array([0, 0, 1], dtype=np.float32))
return np.stack(embeddings)
def test_embeddings_basic_search():
corpus = dummy_corpus()
embedder = dummy_embedder
retriever = Embeddings(corpus=corpus, embedder=embedder, k=1)
query = "I saw a dog running."
result = retriever(query)
assert hasattr(result, "passages")
assert hasattr(result, "indices")
assert isinstance(result.passages, list)
assert isinstance(result.indices, list)
assert len(result.passages) == 1
assert len(result.indices) == 1
assert result.passages[0] == "The dog barked at the mailman."
def test_embeddings_multithreaded_search():
corpus = dummy_corpus()
embedder = dummy_embedder
retriever = Embeddings(corpus=corpus, embedder=embedder, k=1)
queries = [
("A cat is sitting on the mat.", "The cat sat on the mat."),
("My dog is awesome!", "The dog barked at the mailman."),
("Birds flying high.", "Birds fly in the sky."),
] * 10
def worker(query_text, expected_passage):
result = retriever(query_text)
assert result.passages[0] == expected_passage
return result.passages[0]
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(worker, q, expected) for q, expected in queries]
# Results will be in original order
results = [f.result() for f in futures]
assert results[0] == "The cat sat on the mat."
assert results[1] == "The dog barked at the mailman."
assert results[2] == "Birds fly in the sky."
def test_embeddings_save_load():
corpus = dummy_corpus()
embedder = dummy_embedder
original_retriever = Embeddings(corpus=corpus, embedder=embedder, k=2, normalize=False, brute_force_threshold=1000)
with tempfile.TemporaryDirectory() as temp_dir:
save_path = os.path.join(temp_dir, "test_embeddings")
# Save original
original_retriever.save(save_path)
# Verify files were created
assert os.path.exists(os.path.join(save_path, "config.json"))
assert os.path.exists(os.path.join(save_path, "corpus_embeddings.npy"))
assert not os.path.exists(os.path.join(save_path, "faiss_index.bin")) # No FAISS for small corpus
# Load into new instance
new_retriever = Embeddings(corpus=["dummy"], embedder=embedder, k=1, normalize=True, brute_force_threshold=500)
new_retriever.load(save_path, embedder)
# Verify configuration was loaded correctly
assert new_retriever.corpus == corpus
assert new_retriever.k == 2
assert new_retriever.normalize is False
assert new_retriever.embedder == embedder
assert new_retriever.index is None
# Verify search results are preserved
query = "cat sitting"
original_result = original_retriever(query)
loaded_result = new_retriever(query)
assert loaded_result.passages == original_result.passages
assert loaded_result.indices == original_result.indices
def test_embeddings_from_saved():
corpus = dummy_corpus()
embedder = dummy_embedder
original_retriever = Embeddings(corpus=corpus, embedder=embedder, k=3, normalize=True, brute_force_threshold=1000)
with tempfile.TemporaryDirectory() as temp_dir:
save_path = os.path.join(temp_dir, "test_embeddings")
original_retriever.save(save_path)
loaded_retriever = Embeddings.from_saved(save_path, embedder)
assert loaded_retriever.k == original_retriever.k
assert loaded_retriever.normalize == original_retriever.normalize
assert loaded_retriever.corpus == original_retriever.corpus
def test_embeddings_load_nonexistent_path():
with pytest.raises((FileNotFoundError, OSError)):
Embeddings.from_saved("/nonexistent/path", dummy_embedder)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/retrievers/test_embeddings.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/st_navigation_expanded.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test app for st.navigation with expanded parameter."""
import streamlit as st
# Create 10 page functions
def page_1():
st.header("Page 1")
def page_2():
st.header("Page 2")
def page_3():
st.header("Page 3")
def page_4():
st.header("Page 4")
def page_5():
st.header("Page 5")
def page_6():
st.header("Page 6")
def page_7():
st.header("Page 7")
def page_8():
st.header("Page 8")
def page_9():
st.header("Page 9")
def page_10():
st.header("Page 10")
pages = [
page_1,
page_2,
page_3,
page_4,
page_5,
page_6,
page_7,
page_8,
page_9,
page_10,
]
# Use expanded=3 to show only 3 pages initially
# With 10 pages and expanded=3, the collapse threshold is 5 (3 + 2)
# Since 10 > 5, this should show "View 7 more" button
st.sidebar.write("Sidebar content")
st.navigation(pages, expanded=3).run()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_navigation_expanded.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/st_navigation_expanded_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E tests for st.navigation with expanded parameter."""
from __future__ import annotations
from playwright.sync_api import Page, expect
def test_expanded_int_shows_limited_pages_with_view_more_button(app: Page) -> None:
"""Test that expanded=N shows N pages with 'View X more' button."""
# The app has 10 pages with expanded=3
# Should show only 3 pages initially
sidebar_nav_links = app.get_by_test_id("stSidebarNavLink")
expect(sidebar_nav_links).to_have_count(3)
# Should show "View 7 more" button (10 total - 3 shown = 7)
view_button = app.get_by_test_id("stSidebarNavViewButton")
expect(view_button).to_be_visible()
expect(view_button).to_contain_text("View 7 more")
def test_expanded_int_view_button_expands_to_show_all_pages(app: Page) -> None:
"""Test that clicking 'View X more' shows all pages."""
# Click the view button to expand
view_button = app.get_by_test_id("stSidebarNavViewButton")
view_button.click()
# Now all 10 pages should be visible
sidebar_nav_links = app.get_by_test_id("stSidebarNavLink")
expect(sidebar_nav_links).to_have_count(10)
# Button should now say "View less"
expect(view_button).to_contain_text("View less")
def test_expanded_int_view_button_can_collapse_again(app: Page) -> None:
"""Test that clicking 'View less' collapses back to N pages."""
view_button = app.get_by_test_id("stSidebarNavViewButton")
# Expand
view_button.click()
expect(app.get_by_test_id("stSidebarNavLink")).to_have_count(10)
expect(view_button).to_contain_text("View less")
# Collapse
view_button.click()
expect(app.get_by_test_id("stSidebarNavLink")).to_have_count(3)
expect(view_button).to_contain_text("View 7 more")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_navigation_expanded_test.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/metric_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform some "type checking testing"; mypy should flag any assignments that are
# incorrect.
if TYPE_CHECKING:
from decimal import Decimal
from streamlit.delta_generator import DeltaGenerator
from streamlit.elements.metric import MetricMixin
metric = MetricMixin().metric
# =====================================================================
# st.metric return type tests
# =====================================================================
# Basic metric - returns DeltaGenerator
assert_type(metric("Temperature", "70°F"), DeltaGenerator)
assert_type(metric("Speed", 100), DeltaGenerator)
assert_type(metric("Value", 3.14), DeltaGenerator)
assert_type(metric("Decimal", Decimal("10.5")), DeltaGenerator)
assert_type(metric("None value", None), DeltaGenerator)
# Metric with delta parameter
assert_type(metric("Temperature", "70°F", delta="1.2°F"), DeltaGenerator)
assert_type(metric("Speed", 100, delta=5), DeltaGenerator)
assert_type(metric("Value", 3.14, delta=-0.5), DeltaGenerator)
assert_type(
metric("Decimal", Decimal("10.5"), delta=Decimal("0.5")), DeltaGenerator
)
assert_type(metric("No delta", "100", delta=None), DeltaGenerator)
# Metric with delta_color parameter
assert_type(metric("Metric", 100, delta=5, delta_color="normal"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="inverse"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="off"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="red"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="orange"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="yellow"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="green"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="blue"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="violet"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="gray"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="grey"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_color="primary"), DeltaGenerator)
# Metric with help parameter
assert_type(metric("Metric", 100, help="This is help text"), DeltaGenerator)
assert_type(metric("Metric", 100, help=None), DeltaGenerator)
# Metric with label_visibility parameter
assert_type(metric("Metric", 100, label_visibility="visible"), DeltaGenerator)
assert_type(metric("Metric", 100, label_visibility="hidden"), DeltaGenerator)
assert_type(metric("Metric", 100, label_visibility="collapsed"), DeltaGenerator)
# Metric with border parameter
assert_type(metric("Metric", 100, border=True), DeltaGenerator)
assert_type(metric("Metric", 100, border=False), DeltaGenerator)
# Metric with width parameter
assert_type(metric("Metric", 100, width="stretch"), DeltaGenerator)
assert_type(metric("Metric", 100, width="content"), DeltaGenerator)
assert_type(metric("Metric", 100, width=200), DeltaGenerator)
# Metric with height parameter
assert_type(metric("Metric", 100, height="content"), DeltaGenerator)
assert_type(metric("Metric", 100, height="stretch"), DeltaGenerator)
assert_type(metric("Metric", 100, height=150), DeltaGenerator)
# Metric with chart_data parameter
assert_type(metric("Metric", 100, chart_data=[1, 2, 3, 4, 5]), DeltaGenerator)
assert_type(metric("Metric", 100, chart_data=(1, 2, 3)), DeltaGenerator)
assert_type(metric("Metric", 100, chart_data=None), DeltaGenerator)
# Metric with chart_type parameter
assert_type(
metric("Metric", 100, chart_data=[1, 2, 3], chart_type="line"), DeltaGenerator
)
assert_type(
metric("Metric", 100, chart_data=[1, 2, 3], chart_type="bar"), DeltaGenerator
)
assert_type(
metric("Metric", 100, chart_data=[1, 2, 3], chart_type="area"), DeltaGenerator
)
# Metric with delta_arrow parameter
assert_type(metric("Metric", 100, delta=5, delta_arrow="auto"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_arrow="up"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_arrow="down"), DeltaGenerator)
assert_type(metric("Metric", 100, delta=5, delta_arrow="off"), DeltaGenerator)
# Metric with format parameter
assert_type(metric("Metric", 100, format="%.2f"), DeltaGenerator)
assert_type(metric("Metric", 100, format=None), DeltaGenerator)
# Metric with delta_description parameter
assert_type(
metric("Metric", 100, delta=5, delta_description="since yesterday"),
DeltaGenerator,
)
assert_type(metric("Metric", 100, delta=5, delta_description=None), DeltaGenerator)
# Metric with all parameters combined
assert_type(
metric(
"Full Metric",
100,
delta=5,
delta_color="normal",
help="Full help text",
label_visibility="visible",
border=True,
width="stretch",
height="content",
chart_data=[1, 2, 3, 4, 5],
chart_type="line",
delta_arrow="auto",
format="%.1f",
delta_description="since last week",
),
DeltaGenerator,
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/metric_types.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/data_editor_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform type checking tests for st.data_editor.
# The return type depends on the data parameter:
# - pd.DataFrame/pd.Series -> returns the same type (via DataFrameGenericAlias)
# - list/dict/set -> returns the same type with preserved generics
# - Other types (pd.Index, np.ndarray, tuple, etc.) -> returns pd.DataFrame
if TYPE_CHECKING:
import numpy as np
import pandas as pd
from streamlit.elements.widgets.data_editor import DataEditorMixin
data_editor = DataEditorMixin().data_editor
# =====================================================================
# Return type tests based on data parameter type
# =====================================================================
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"]})
# pd.DataFrame returns pd.DataFrame (matches DataFrameGenericAlias via iloc)
assert_type(data_editor(df), pd.DataFrame)
# pd.Series returns pd.Series with preserved type parameter
series_int: pd.Series[int] = pd.Series([1, 2, 3])
series_str: pd.Series[str] = pd.Series(["a", "b", "c"])
assert_type(data_editor(series_int), pd.Series[int])
assert_type(data_editor(series_str), pd.Series[str])
# pd.Index falls through to Any overload -> returns pd.DataFrame
# (Index.iloc doesn't return _iLocIndexer, so it doesn't match DataFrameGenericAlias)
index: pd.Index[int] = pd.Index([1, 2, 3])
assert_type(data_editor(index), pd.DataFrame)
# np.ndarray falls through to Any overload -> returns pd.DataFrame
arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_type(data_editor(arr), pd.DataFrame)
# list returns list with preserved type parameter
assert_type(data_editor([[1, 2], [3, 4]]), list[list[int]])
list_data: list[dict[str, int]] = [{"a": 1}, {"a": 2}]
assert_type(data_editor(list_data), list[dict[str, int]])
# dict returns dict with preserved type parameters
assert_type(data_editor({"col1": [1, 2], "col2": [3, 4]}), dict[str, list[int]])
dict_data: dict[str, list[str]] = {"col1": ["a", "b"]}
assert_type(data_editor(dict_data), dict[str, list[str]])
# set returns set with preserved type parameter
assert_type(data_editor({1, 2, 3}), set[int])
set_data: set[str] = {"a", "b", "c"}
assert_type(data_editor(set_data), set[str])
# Nested tuples fall through to the Any overload -> returns pd.DataFrame
assert_type(data_editor(((1, 2), (3, 4))), pd.DataFrame)
tuple_data: tuple[int, int, int] = (1, 2, 3)
assert_type(data_editor(tuple_data), pd.DataFrame)
# =====================================================================
# Test with various optional parameters (return type unchanged)
# =====================================================================
assert_type(
data_editor(
df,
width="stretch",
height=400,
hide_index=True,
column_order=["B", "A"],
column_config={"A": "Integer values"},
num_rows="dynamic",
disabled=["B"],
key="full_editor",
on_change=lambda: None,
row_height=35,
placeholder="-",
),
pd.DataFrame,
)
# Return type preserved with optional parameters for non-DataFrame types
assert_type(data_editor(list_data, num_rows="dynamic"), list[dict[str, int]])
assert_type(data_editor(dict_data, disabled=True), dict[str, list[str]])
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/data_editor_types.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/shared/stats_reporter.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pytest plugin for collecting and reporting E2E test statistics.
This plugin captures test execution statistics (test duration, status, etc.) and
outputs them to a JSON file that can be used for tracking test performance over time.
Usage:
- By default, statistics are collected and saved to 'test-results/test-stats.json'
- Use --stats-output=PATH to specify a custom output file path
- Use --no-stats to disable statistics collection entirely
The output JSON file (schema_version "1.1") can be uploaded as a CI artifact
for historical performance tracking and analysis.
Schema version history:
- 1.0: Initial schema with test results, durations, browser breakdown, etc.
- 1.1: Added fixture statistics (setup/teardown durations, slowest_setup,
slowest_teardown lists)
- 1.2: Added snapshot statistics (total_snapshots count)
"""
from __future__ import annotations
import json
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from statistics import mean, median
from typing import TYPE_CHECKING, Any, Final
import psutil
import pytest
if TYPE_CHECKING:
from _pytest.config import Config
from _pytest.reports import TestReport
@dataclass
class TestResult:
"""Holds the result of a single test."""
nodeid: str
outcome: str # "passed", "failed", "skipped", "error"
duration: float # in seconds (call phase)
browser: str | None = None
rerun_count: int = 0
worker_id: str | None = None
setup_duration: float = 0.0 # fixture setup time
teardown_duration: float = 0.0 # fixture teardown time
@dataclass
class WorkerStats:
"""Statistics for a single xdist worker."""
test_count: int = 0
total_runtime: float = 0.0
memory_mb: float = 0.0 # Memory usage at end of worker session
@dataclass
class StatsCollector:
"""Collects test statistics during a pytest session."""
results: list[TestResult] = field(default_factory=list)
session_start_time: float = 0.0
session_end_time: float = 0.0
# Track reruns by nodeid
rerun_counts: dict[str, int] = field(default_factory=dict)
# Track outcome by nodeid (for deduplication across reruns)
final_outcomes: dict[str, TestResult] = field(default_factory=dict)
# Track per-worker statistics
worker_stats: dict[str, WorkerStats] = field(default_factory=dict)
# Track setup/teardown durations by nodeid (before TestResult is finalized)
phase_durations: dict[str, dict[str, float]] = field(default_factory=dict)
_BROWSERS: Final[tuple[str, ...]] = ("chromium", "firefox", "webkit")
_BYTES_PER_MB: Final[int] = 1024 * 1024
_SNAPSHOTS_DIR: Final[str] = "__snapshots__/linux"
def _extract_browser_from_nodeid(nodeid: str) -> str | None:
"""Extract browser name from test nodeid if parametrized with browser."""
for browser in _BROWSERS:
if (
f"[{browser}]" in nodeid
or f"-{browser}]" in nodeid
or f"[{browser}-" in nodeid
):
return browser
return None
def _get_worker_id(report: TestReport | None = None) -> str:
"""Get the xdist worker ID for a test report.
When running under xdist:
- On workers: reads from PYTEST_XDIST_WORKER env var
- On primary: extracts from report.node.gateway.id (forwarded reports)
Returns 'primary' if not running under xdist.
"""
# Try to get worker_id from the report's node attribute (xdist forwarded reports)
if report is not None and hasattr(report, "node"):
try:
return str(report.node.gateway.id)
except AttributeError:
pass
# Fall back to env var (works on workers, returns "primary" otherwise)
return os.getenv("PYTEST_XDIST_WORKER", "primary")
class StatsReporterPlugin:
"""Pytest plugin that collects and reports test statistics."""
def __init__(self, output_path: Path) -> None:
self.output_path = output_path
self.collector = StatsCollector()
def pytest_sessionstart(self, session: pytest.Session) -> None: # noqa: ARG002
"""Called at the start of the test session."""
# Use perf_counter for elapsed time measurement (monotonic, not affected by
# system clock adjustments like NTP or VM clock drift)
self.collector.session_start_time = time.perf_counter()
def pytest_runtest_logreport(self, report: TestReport) -> None:
"""Called after each test phase (setup, call, teardown)."""
nodeid = report.nodeid
if report.when == "setup":
self.collector.phase_durations.setdefault(nodeid, {})["setup"] = (
report.duration
)
if report.outcome == "failed":
self._record_test_result(report, outcome="error")
return
if report.when == "teardown":
self.collector.phase_durations.setdefault(nodeid, {})["teardown"] = (
report.duration
)
if nodeid in self.collector.final_outcomes:
self.collector.final_outcomes[
nodeid
].teardown_duration = report.duration
if report.outcome == "failed":
# If we already have a result from call phase, just update outcome to
# "error" instead of creating a new result (which would have wrong
# duration and create a duplicate entry)
if nodeid in self.collector.final_outcomes:
self.collector.final_outcomes[nodeid].outcome = "error"
else:
self._record_test_result(report, outcome="error")
return
if report.when == "call":
outcome = (
report.outcome
if report.outcome in {"passed", "failed", "skipped"}
else "error"
)
self._record_test_result(report, outcome=outcome)
def _record_test_result(self, report: TestReport, outcome: str) -> None:
"""Record a test result from a report."""
nodeid = report.nodeid
browser = _extract_browser_from_nodeid(nodeid)
worker_id = _get_worker_id(report)
# Only count reruns for "call" phase to avoid false positives from
# setup/teardown failures on the same test run.
if report.when == "call" and nodeid in self.collector.final_outcomes:
self.collector.rerun_counts[nodeid] = (
self.collector.rerun_counts.get(nodeid, 0) + 1
)
# Get setup duration if available
setup_duration = self.collector.phase_durations.get(nodeid, {}).get(
"setup", 0.0
)
result = TestResult(
nodeid=nodeid,
outcome=outcome,
duration=report.duration,
browser=browser,
rerun_count=self.collector.rerun_counts.get(nodeid, 0),
worker_id=worker_id,
setup_duration=setup_duration,
)
self.collector.final_outcomes[nodeid] = result
self.collector.results.append(result)
# Track per-worker statistics (only for tests that actually ran on workers)
if report.when == "call" and outcome != "skipped" and worker_id != "primary":
if worker_id not in self.collector.worker_stats:
self.collector.worker_stats[worker_id] = WorkerStats()
worker_stat = self.collector.worker_stats[worker_id]
worker_stat.test_count += 1
worker_stat.total_runtime += report.duration
def pytest_testnodedown(self, node: Any, error: Any) -> None: # noqa: ARG002
"""Merge worker stats when an xdist worker node goes down."""
if hasattr(node, "workeroutput") and "worker_stats" in node.workeroutput:
worker_data = node.workeroutput["worker_stats"]
worker_id = worker_data.get("worker_id", "unknown")
if worker_id not in self.collector.worker_stats:
self.collector.worker_stats[worker_id] = WorkerStats()
ws = self.collector.worker_stats[worker_id]
ws.test_count += worker_data.get("test_count", 0)
ws.total_runtime += worker_data.get("total_runtime", 0.0)
ws.memory_mb = max(ws.memory_mb, worker_data.get("memory_mb", 0.0))
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(self, session: pytest.Session, exitstatus: int) -> None:
"""Generate the statistics JSON at the end of the test session."""
worker_id = _get_worker_id()
if worker_id != "primary" and hasattr(session.config, "workeroutput"):
ws = self.collector.worker_stats.get(worker_id, WorkerStats())
session.config.workeroutput["worker_stats"] = {
"worker_id": worker_id,
"test_count": ws.test_count,
"total_runtime": ws.total_runtime,
"memory_mb": self._get_current_process_memory(),
}
return # Workers don't write the stats file
self.collector.session_end_time = time.perf_counter()
stats = self._compute_statistics(session, exitstatus)
self._write_stats(stats)
def _compute_statistics(
self,
session: pytest.Session, # noqa: ARG002
exitstatus: int,
) -> dict[str, Any]:
"""Compute aggregate statistics from collected test results."""
final_results = list(self.collector.final_outcomes.values())
# Basic counts
total_tests = len(final_results)
passed = sum(1 for r in final_results if r.outcome == "passed")
failed = sum(1 for r in final_results if r.outcome == "failed")
skipped = sum(1 for r in final_results if r.outcome == "skipped")
errors = sum(1 for r in final_results if r.outcome == "error")
# Rerun statistics
tests_with_reruns = sum(1 for r in final_results if r.rerun_count > 0)
total_reruns = sum(self.collector.rerun_counts.values())
# Duration statistics (only for tests that actually ran)
durations = [r.duration for r in final_results if r.outcome != "skipped"]
setup_durations = [
r.setup_duration for r in final_results if r.outcome != "skipped"
]
teardown_durations = [
r.teardown_duration for r in final_results if r.outcome != "skipped"
]
total_duration = (
self.collector.session_end_time - self.collector.session_start_time
)
duration_stats: dict[str, float | None] = {
"total_test_time_seconds": sum(durations) if durations else 0.0,
"mean_duration_seconds": mean(durations) if durations else None,
"median_duration_seconds": median(durations) if durations else None,
"min_duration_seconds": min(durations) if durations else None,
"max_duration_seconds": max(durations) if durations else None,
# Fixture setup/teardown statistics
"total_setup_time_seconds": sum(setup_durations)
if setup_durations
else 0.0,
"total_teardown_time_seconds": (
sum(teardown_durations) if teardown_durations else 0.0
),
"mean_setup_duration_seconds": (
mean(setup_durations) if setup_durations else None
),
"mean_teardown_duration_seconds": (
mean(teardown_durations) if teardown_durations else None
),
}
# Per-browser breakdown
browser_stats: dict[str, dict[str, int]] = {}
for browser in _BROWSERS:
browser_results = [r for r in final_results if r.browser == browser]
if browser_results:
browser_stats[browser] = {
"total": len(browser_results),
"passed": sum(1 for r in browser_results if r.outcome == "passed"),
"failed": sum(1 for r in browser_results if r.outcome == "failed"),
"skipped": sum(
1 for r in browser_results if r.outcome == "skipped"
),
"errors": sum(1 for r in browser_results if r.outcome == "error"),
}
# Slowest tests (top 10)
sorted_by_duration = sorted(
[r for r in final_results if r.outcome != "skipped"],
key=lambda r: r.duration,
reverse=True,
)[:10]
slowest_tests = [
{
"nodeid": r.nodeid,
"duration_seconds": r.duration,
"setup_duration_seconds": r.setup_duration,
"teardown_duration_seconds": r.teardown_duration,
"browser": r.browser,
}
for r in sorted_by_duration
]
# Slowest setup (top 10 by fixture setup time)
sorted_by_setup = sorted(
[
r
for r in final_results
if r.outcome != "skipped" and r.setup_duration > 0
],
key=lambda r: r.setup_duration,
reverse=True,
)[:10]
slowest_setup = [
{
"nodeid": r.nodeid,
"setup_duration_seconds": r.setup_duration,
"browser": r.browser,
}
for r in sorted_by_setup
]
# Slowest teardown (top 10 by fixture teardown time)
sorted_by_teardown = sorted(
[
r
for r in final_results
if r.outcome != "skipped" and r.teardown_duration > 0
],
key=lambda r: r.teardown_duration,
reverse=True,
)[:10]
slowest_teardown = [
{
"nodeid": r.nodeid,
"teardown_duration_seconds": r.teardown_duration,
"browser": r.browser,
}
for r in sorted_by_teardown
]
# Aggregate test durations by module/file
module_durations: dict[str, dict[str, Any]] = {}
for r in final_results:
if r.outcome == "skipped":
continue
module = r.nodeid.split("::")[0]
data = module_durations.setdefault(
module, {"total_duration": 0.0, "test_count": 0}
)
data["total_duration"] += r.duration
data["test_count"] += 1
test_modules = sorted(
[
{
"module": mod,
"total_duration_seconds": data["total_duration"],
"test_count": data["test_count"],
"avg_duration_seconds": data["total_duration"] / data["test_count"],
}
for mod, data in module_durations.items()
],
key=lambda x: x["total_duration_seconds"], # noqa: FURB118
reverse=True,
)
# Tests that required reruns
rerun_tests = [
{
"nodeid": r.nodeid,
"final_outcome": r.outcome,
"rerun_count": r.rerun_count,
"browser": r.browser,
}
for r in final_results
if r.rerun_count > 0
]
# Environment info
env_info = {
"python_version": sys.version,
"platform": sys.platform,
"pytest_version": pytest.__version__,
"ci": os.getenv("CI", "false").lower() == "true",
"github_run_id": os.getenv("GITHUB_RUN_ID"),
"github_sha": os.getenv("GITHUB_SHA"),
"github_ref": os.getenv("GITHUB_REF"),
}
# Compute xdist worker statistics
xdist_stats = self._compute_worker_stats()
return {
"schema_version": "1.2",
"summary": {
"total_tests": total_tests,
"passed": passed,
"failed": failed,
"skipped": skipped,
"errors": errors,
"tests_with_reruns": tests_with_reruns,
"total_reruns": total_reruns,
"exit_status": exitstatus,
},
"duration": {
"session_duration_seconds": total_duration,
**duration_stats,
},
"browser_breakdown": browser_stats,
"slowest_tests": slowest_tests,
"slowest_setup": slowest_setup,
"slowest_teardown": slowest_teardown,
"test_modules": test_modules,
"rerun_details": rerun_tests,
"environment": env_info,
"xdist_workers": xdist_stats,
"memory": self._get_memory_stats(),
"snapshots": self._get_snapshot_stats(),
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
}
def _compute_worker_stats(self) -> dict[str, Any]:
"""Compute statistics for xdist workers."""
worker_stats = self.collector.worker_stats
if not worker_stats:
return {}
per_worker = {}
for worker_id, ws in sorted(worker_stats.items()):
per_worker[worker_id] = {
"test_count": ws.test_count,
"total_runtime_seconds": ws.total_runtime,
"avg_test_duration_seconds": (
ws.total_runtime / ws.test_count if ws.test_count > 0 else 0.0
),
"memory_mb": ws.memory_mb,
}
test_counts = [ws.test_count for ws in worker_stats.values()]
runtimes = [ws.total_runtime for ws in worker_stats.values()]
memories = [ws.memory_mb for ws in worker_stats.values() if ws.memory_mb > 0]
return {
"worker_count": len(worker_stats),
"per_worker": per_worker,
"aggregate": {
"min_tests_per_worker": min(test_counts) if test_counts else 0,
"max_tests_per_worker": max(test_counts) if test_counts else 0,
"avg_tests_per_worker": mean(test_counts) if test_counts else 0.0,
"min_runtime_seconds": min(runtimes) if runtimes else 0.0,
"max_runtime_seconds": max(runtimes) if runtimes else 0.0,
"avg_runtime_seconds": mean(runtimes) if runtimes else 0.0,
"total_runtime_seconds": sum(runtimes) if runtimes else 0.0,
"min_memory_mb": min(memories) if memories else 0.0,
"max_memory_mb": max(memories) if memories else 0.0,
"avg_memory_mb": mean(memories) if memories else 0.0,
"total_memory_mb": sum(memories) if memories else 0.0,
},
}
def _get_current_process_memory(self) -> float:
"""Get current process memory usage in MB."""
try:
return psutil.Process().memory_info().rss / _BYTES_PER_MB
except (psutil.NoSuchProcess, psutil.AccessDenied):
return 0.0
def _get_snapshot_stats(self) -> dict[str, Any]:
"""Get statistics about Playwright snapshots in the linux snapshots directory.
The snapshots directory is resolved by walking up from the output path
and checking each parent for a ``__snapshots__/linux`` child directory.
This handles custom ``--stats-output`` paths with different depths.
"""
# Find the snapshots directory by walking up from the output path
snapshots_dir: Path | None = None
try:
for parent in self.output_path.parents:
candidate = parent / _SNAPSHOTS_DIR
if candidate.exists() and candidate.is_dir():
snapshots_dir = candidate
break
except OSError:
return {}
if snapshots_dir is None:
return {}
total_snapshots = 0
try:
for module_dir in snapshots_dir.iterdir():
if not module_dir.is_dir():
continue
for snapshot_file in module_dir.iterdir():
if (
snapshot_file.is_file()
and snapshot_file.suffix.lower() == ".png"
):
total_snapshots += 1
except OSError:
return {}
return {"total_snapshots": total_snapshots}
def _get_memory_stats(self) -> dict[str, float]:
"""Get memory statistics for current process and xdist workers.
Note: When using pytest-xdist, child worker processes have already exited
by the time this method is called (at session finish). So we use the
memory values reported by workers via workeroutput instead of trying
to measure child processes directly.
"""
try:
process = psutil.Process()
main_mem = process.memory_info()
main_rss_mb = main_mem.rss / _BYTES_PER_MB
except (psutil.NoSuchProcess, psutil.AccessDenied):
main_rss_mb = 0.0
# Sum memory from xdist workers (captured before each worker exited)
workers_rss_mb = sum(
ws.memory_mb
for ws in self.collector.worker_stats.values()
if ws.memory_mb > 0
)
return {
"main_process_rss_mb": main_rss_mb,
"workers_rss_mb": workers_rss_mb,
"total_rss_mb": main_rss_mb + workers_rss_mb,
}
def _write_stats(self, stats: dict[str, Any]) -> None:
"""Write statistics to JSON file."""
try:
self.output_path.parent.mkdir(parents=True, exist_ok=True)
with open(self.output_path, "w", encoding="utf-8") as f:
json.dump(stats, f, indent=2)
print(f"\nTest statistics written to: {self.output_path}")
except OSError as e:
print(
f"\nWarning: Failed to write test statistics to {self.output_path}: {e}"
)
def pytest_addoption(parser: pytest.Parser) -> None:
"""Register command-line options for the stats reporter."""
group = parser.getgroup("stats_reporter", "Test statistics reporting")
group.addoption(
"--stats-output",
action="store",
default="test-results/test-stats.json",
help="Path for the test statistics JSON output (default: test-results/test-stats.json)",
)
group.addoption(
"--no-stats",
action="store_true",
default=False,
help="Disable test statistics collection and reporting",
)
def pytest_configure(config: Config) -> None:
"""Register the stats reporter plugin if not disabled."""
if config.getoption("--no-stats", default=False):
return
output_path = Path(config.getoption("--stats-output"))
# Make path relative to the e2e_playwright directory if not absolute
if not output_path.is_absolute():
# Get the root directory from where pytest is run
rootdir = config.rootpath if hasattr(config, "rootpath") else Path.cwd()
output_path = rootdir / output_path
plugin = StatsReporterPlugin(output_path)
config.pluginmanager.register(plugin, "stats_reporter")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/shared/stats_reporter.py",
"license": "Apache License 2.0",
"lines": 529,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/streamlit/elements/table.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Literal, cast
from streamlit import dataframe_util
from streamlit.elements.lib.layout_utils import (
Height,
LayoutConfig,
Width,
validate_height,
validate_width,
)
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
from streamlit.errors import StreamlitAPIException, StreamlitValueError
from streamlit.proto.Table_pb2 import Table as TableProto
from streamlit.runtime.metrics_util import gather_metrics
if TYPE_CHECKING:
from streamlit.dataframe_util import Data
from streamlit.delta_generator import DeltaGenerator
from streamlit.proto.ArrowData_pb2 import ArrowData as ArrowDataProto
def parse_border_mode(
border: bool | Literal["horizontal"],
) -> TableProto.BorderMode.ValueType:
"""Parse and check the user provided border mode."""
if isinstance(border, bool):
return TableProto.BorderMode.ALL if border else TableProto.BorderMode.NONE
if border == "horizontal":
return TableProto.BorderMode.HORIZONTAL
raise StreamlitValueError("border", ["True", "False", "'horizontal'"])
def marshall_table(
proto: ArrowDataProto, data: Data, default_uuid: str | None = None
) -> None:
"""Marshall data into an ArrowData proto for Table element.
Parameters
----------
proto : proto.ArrowData
Output. The protobuf for Streamlit ArrowData proto.
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.DataFrame, Iterable, dict, or None
Something that is or can be converted to a dataframe.
default_uuid : str | None
If pandas.Styler UUID is not provided, this value will be used.
This attribute is optional and only used for pandas.Styler, other elements
can ignore it.
""" # noqa: E501
if dataframe_util.is_pandas_styler(data):
# default_uuid is a string only if the data is a `Styler`,
# and `None` otherwise.
if not isinstance(default_uuid, str):
raise StreamlitAPIException(
"Default UUID must be a string for Styler data."
)
marshall_styler(proto, data, default_uuid)
proto.data = dataframe_util.convert_anything_to_arrow_bytes(data)
class TableMixin:
@gather_metrics("table")
def table(
self,
data: Data = None,
*,
border: bool | Literal["horizontal"] = True,
width: Width = "stretch",
height: Height = "content",
) -> DeltaGenerator:
"""Display a static table.
While ``st.dataframe`` is geared towards large datasets and interactive
data exploration, ``st.table`` is useful for displaying small, styled
tables without sorting or scrolling. For example, ``st.table`` may be
the preferred way to display a confusion matrix or leaderboard.
Additionally, ``st.table`` supports Markdown.
Parameters
----------
data : Anything supported by st.dataframe
The table data.
All cells including the index and column headers can optionally
contain GitHub-flavored Markdown. Syntax information can be found
at: https://github.github.com/gfm.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
border : bool or "horizontal"
Whether to show borders around the table and between cells. This can be one
of the following:
- ``True`` (default): Show borders around the table and between cells.
- ``False``: Don't show any borders.
- ``"horizontal"``: Show only horizontal borders between rows.
width : "stretch", "content", or int
The width of the table element. This can be one of the following:
- ``"stretch"`` (default): The width of the element matches the
width of the parent container.
- ``"content"``: The width of the element matches the width of its
content, but doesn't exceed the width of the parent container.
- An integer specifying the width in pixels: The element has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the element matches the width
of the parent container.
height : "stretch", "content", or int
The height of the table element. This can be one of the following:
- ``"content"`` (default): The height of the element matches the
height of its content, showing all rows.
- ``"stretch"``: The height of the element expands to fill the
available vertical space in its parent container. When multiple
elements with stretch height are in the same container, they
share the available vertical space evenly.
- An integer specifying the height in pixels: The element has a
fixed height. If the table content exceeds this height,
scrolling is enabled with sticky headers. Row index columns
remain sticky only when horizontal scrolling is enabled via a
fixed pixel ``width``.
Examples
--------
**Example 1: Display a confusion matrix as a static table**
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> confusion_matrix = pd.DataFrame(
... {
... "Predicted Cat": [85, 3, 2, 1],
... "Predicted Dog": [2, 78, 4, 0],
... "Predicted Bird": [1, 5, 72, 3],
... "Predicted Fish": [0, 2, 1, 89],
... },
... index=["Actual Cat", "Actual Dog", "Actual Bird", "Actual Fish"],
... )
>>> st.table(confusion_matrix)
.. output::
https://doc-table-confusion.streamlit.app/
height: 250px
**Example 2: Display a product leaderboard with Markdown and horizontal borders**
>>> import streamlit as st
>>>
>>> product_data = {
... "Product": [
... ":material/devices: Widget Pro",
... ":material/smart_toy: Smart Device",
... ":material/inventory: Premium Kit",
... ],
... "Category": [":blue[Electronics]", ":green[IoT]", ":violet[Bundle]"],
... "Stock": ["🟢 Full", "🟡 Low", "🔴 Empty"],
... "Units sold": [1247, 892, 654],
... "Revenue": [125000, 89000, 98000],
... }
>>> st.table(product_data, border="horizontal")
.. output::
https://doc-table-horizontal-border.streamlit.app/
height: 200px
**Example 3: Display a scrollable table with fixed height**
>>> import pandas as pd
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df = pd.DataFrame(
... rng(0).standard_normal((50, 5)), columns=["A", "B", "C", "D", "E"]
... )
>>> st.table(df, height=300)
"""
# Validate width and height parameters
validate_width(width, allow_content=True)
validate_height(height, allow_content=True)
# Parse border parameter to enum value
border_mode = parse_border_mode(border)
# Check if data is uncollected, and collect it but with 100 rows max, instead of
# 10k rows, which is done in all other cases.
# We use 100 rows in st.table, because large tables render slowly,
# take too much screen space, and can crush the app.
if dataframe_util.is_unevaluated_data_object(data):
data = dataframe_util.convert_anything_to_pandas_df(
data, max_unevaluated_rows=100
)
# If pandas.Styler uuid is not provided, a hash of the position
# of the element will be used. This will cause a rerender of the table
# when the position of the element is changed.
delta_path = self.dg._get_delta_path_str()
default_uuid = str(hash(delta_path))
# Create layout configuration for width and height
layout_config = LayoutConfig(
width=width,
height=height,
)
proto = TableProto()
marshall_table(proto.arrow_data, data, default_uuid)
proto.border_mode = border_mode
return self.dg._enqueue("table", proto, layout_config=layout_config)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/elements/table.py",
"license": "Apache License 2.0",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/multipage_apps_v2/mpa_v2_page_visibility.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
import streamlit as st
if TYPE_CHECKING:
from streamlit.navigation.page import StreamlitPage
# Check for query parameter to determine navigation position
nav_position = st.query_params.get("nav_position", "sidebar")
# Check for query parameter to test single visible page scenario
single_visible = st.query_params.get("single_visible", "false") == "true"
# Check for query parameter to test sections with all hidden pages
test_hidden_section = st.query_params.get("test_hidden_section", "false") == "true"
def about():
st.header("About Page")
def detail():
st.header("Detail Page")
st.write("This is a hidden page accessible via URL or page_link")
def admin():
st.header("Admin Page")
def settings():
st.header("Settings Page")
# Define page objects first so they can be referenced
about_page = st.Page(about, title="About")
detail_page = st.Page(detail, title="Detail", visibility="hidden")
admin_page = st.Page(admin, title="Admin", visibility="hidden")
def home():
st.header("Home Page")
st.page_link(detail_page, label="Go to Detail Page")
if st.button("Switch to Admin"):
st.switch_page(admin_page)
home_page = st.Page(home, title="Home", default=True)
pages: list[StreamlitPage] | dict[str, list[StreamlitPage]]
if single_visible:
# Only home page is visible, all others are hidden
about_hidden = st.Page(
about, title="About", url_path="about_hidden", visibility="hidden"
)
pages = [home_page, about_hidden, detail_page, admin_page]
elif test_hidden_section:
# Test sections where one section has all hidden pages
# Section "Main" has visible pages, Section "Admin" has all hidden pages
home_section = st.Page(home, title="Home", default=True)
about_section = st.Page(about, title="About")
# All pages in "Admin" section are hidden - section header should not appear
admin_hidden = st.Page(
admin, title="Admin", url_path="admin_section", visibility="hidden"
)
settings_hidden = st.Page(
settings, title="Settings", url_path="settings_section", visibility="hidden"
)
pages = {
"Main": [home_section, about_section],
"Admin": [admin_hidden, settings_hidden],
}
else:
pages = [home_page, about_page, detail_page, admin_page]
# Configure navigation position based on query parameter
if nav_position == "top":
pg = st.navigation(pages, position="top")
else:
pg = st.navigation(pages)
pg.run()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/multipage_apps_v2/mpa_v2_page_visibility.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/multipage_apps_v2/mpa_v2_page_visibility_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import wait_for_app_run
from e2e_playwright.shared.app_utils import goto_app
def test_hidden_pages_not_shown_in_sidebar_nav(app: Page) -> None:
"""Test that hidden pages are not shown in the sidebar navigation."""
# Sidebar navigation should be visible
expect(app.get_by_test_id("stSidebarNav")).to_be_visible()
# Only visible pages should be shown (Home and About)
sidebar_nav_links = app.get_by_test_id("stSidebarNavLink")
expect(sidebar_nav_links).to_have_count(2)
# Verify the visible pages
expect(sidebar_nav_links.first).to_contain_text("Home")
expect(sidebar_nav_links.nth(1)).to_contain_text("About")
def test_hidden_page_accessible_via_url(app: Page, app_port: int) -> None:
"""Test that hidden pages can be accessed directly via URL."""
# Navigate to the hidden detail page via URL
goto_app(app, f"http://localhost:{app_port}/detail")
# The detail page should be rendered
expect(
app.get_by_test_id("stHeading").filter(has_text="Detail Page")
).to_be_visible()
# But it should not appear in navigation
sidebar_nav_links = app.get_by_test_id("stSidebarNavLink")
expect(sidebar_nav_links).to_have_count(2) # Still only 2 visible pages
def test_hidden_page_accessible_via_page_link(app: Page) -> None:
"""Test that hidden pages can be accessed via st.page_link."""
# Home page should have a link to the hidden detail page
expect(app.get_by_test_id("stHeading").filter(has_text="Home Page")).to_be_visible()
# Click the page link to the hidden detail page
app.get_by_test_id("stPageLink").click()
wait_for_app_run(app)
# The detail page should be rendered
expect(
app.get_by_test_id("stHeading").filter(has_text="Detail Page")
).to_be_visible()
def test_navigation_shows_only_visible_pages(app: Page) -> None:
"""Test that navigation count reflects only visible pages."""
# The app has 4 pages total, but only 2 are visible
sidebar_nav_links = app.get_by_test_id("stSidebarNavLink")
expect(sidebar_nav_links).to_have_count(2)
# Navigate to About page
sidebar_nav_links.nth(1).click()
wait_for_app_run(app)
# About page should be shown
expect(
app.get_by_test_id("stHeading").filter(has_text="About Page")
).to_be_visible()
# Navigation should still show only 2 pages
expect(sidebar_nav_links).to_have_count(2)
def test_hidden_pages_not_shown_in_top_nav(app: Page, app_port: int) -> None:
"""Test that hidden pages are not shown in top navigation."""
# Navigate to the app with top navigation
goto_app(app, f"http://localhost:{app_port}/?nav_position=top")
# Only visible pages should be shown (Home and About)
top_nav_links = app.get_by_test_id("stTopNavLink")
expect(top_nav_links).to_have_count(2)
# Verify the visible pages
expect(top_nav_links.first).to_contain_text("Home")
expect(top_nav_links.nth(1)).to_contain_text("About")
def test_hidden_page_accessible_via_url_with_top_nav(app: Page, app_port: int) -> None:
"""Test that hidden pages can be accessed via URL with top navigation."""
# Navigate to hidden page with top nav position
goto_app(app, f"http://localhost:{app_port}/detail?nav_position=top")
# The detail page should be rendered
expect(
app.get_by_test_id("stHeading").filter(has_text="Detail Page")
).to_be_visible()
# Top navigation should show only visible pages
top_nav_links = app.get_by_test_id("stTopNavLink")
expect(top_nav_links).to_have_count(2)
def test_nav_hidden_when_only_one_visible_page(app: Page, app_port: int) -> None:
"""Test that navigation is hidden when only one page is visible."""
# Navigate to app with single visible page mode
goto_app(app, f"http://localhost:{app_port}/?single_visible=true")
# Home page should be shown
expect(app.get_by_test_id("stHeading").filter(has_text="Home Page")).to_be_visible()
# Sidebar should NOT be mounted at all when there's only 1 visible page
# and no sidebar elements (prevents empty sidebar from appearing)
expect(app.get_by_test_id("stSidebar")).to_have_count(0)
# Sidebar navigation should also NOT be mounted
expect(app.get_by_test_id("stSidebarNav")).to_have_count(0)
def test_nav_hidden_when_only_one_visible_page_top_nav(
app: Page, app_port: int
) -> None:
"""Test that top navigation is hidden when only one page is visible."""
# Navigate to app with single visible page mode and top nav
goto_app(app, f"http://localhost:{app_port}/?single_visible=true&nav_position=top")
# Home page should be shown
expect(app.get_by_test_id("stHeading").filter(has_text="Home Page")).to_be_visible()
# Top navigation should NOT be mounted (only 1 visible page)
expect(app.get_by_test_id("stTopNavLink")).to_have_count(0)
def test_hidden_page_accessible_via_switch_page(app: Page) -> None:
"""Test that hidden pages can be accessed via st.switch_page."""
# Home page should be shown with the switch page button
expect(app.get_by_test_id("stHeading").filter(has_text="Home Page")).to_be_visible()
# Click the button that triggers st.switch_page to the hidden admin page
app.get_by_test_id("stButton").filter(has_text="Switch to Admin").click()
wait_for_app_run(app)
# The admin page should be rendered
expect(
app.get_by_test_id("stHeading").filter(has_text="Admin Page")
).to_be_visible()
# Navigation should still show only 2 visible pages (Home and About)
sidebar_nav_links = app.get_by_test_id("stSidebarNavLink")
expect(sidebar_nav_links).to_have_count(2)
def test_section_hidden_when_all_pages_hidden(app: Page, app_port: int) -> None:
"""Test that section headers are not shown when all pages in a section are hidden."""
# Navigate to app with test_hidden_section mode
goto_app(app, f"http://localhost:{app_port}/?test_hidden_section=true")
# Home page should be shown
expect(app.get_by_test_id("stHeading").filter(has_text="Home Page")).to_be_visible()
# Sidebar navigation should be visible
expect(app.get_by_test_id("stSidebarNav")).to_be_visible()
# Only the "Main" section header should be visible, not "Admin"
section_headers = app.get_by_test_id("stNavSectionHeader")
expect(section_headers).to_have_count(1)
expect(section_headers.first).to_contain_text("Main")
# Only 2 visible pages should be shown (Home and About from Main section)
sidebar_nav_links = app.get_by_test_id("stSidebarNavLink")
expect(sidebar_nav_links).to_have_count(2)
def test_section_hidden_when_all_pages_hidden_top_nav(app: Page, app_port: int) -> None:
"""Test that section headers are not shown in top nav when all pages are hidden."""
# Navigate to app with test_hidden_section mode and top nav
goto_app(
app, f"http://localhost:{app_port}/?test_hidden_section=true&nav_position=top"
)
# Home page should be shown
expect(app.get_by_test_id("stHeading").filter(has_text="Home Page")).to_be_visible()
# Only the "Main" section should be visible as a dropdown (Admin section is all hidden)
# In top nav with sections, pages are rendered inside section dropdowns
top_nav_sections = app.get_by_test_id("stTopNavSection")
expect(top_nav_sections).to_have_count(1)
expect(top_nav_sections.first).to_contain_text("Main")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/multipage_apps_v2/mpa_v2_page_visibility_test.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/shared/app_target.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for interacting with the Streamlit app under test.
This module defines `AppTarget`, a core e2e abstraction that wraps Playwright's
`Page` and `FrameLocator` APIs into a single, stable interface. The goal is to
centralize (and hide) the "where does the app DOM live?" details so tests and
shared helpers don't need to branch on local vs external/iframe-hosted modes,
which keeps cyclomatic complexity low across the suite.
In e2e tests, prefer using the `app_target` fixture (from
`e2e_playwright/conftest.py`) and call `locator()`, `get_by_test_id()`, etc., on
the returned `AppTarget` instead of accessing `Page`/`FrameLocator` directly.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Literal, cast
if TYPE_CHECKING:
from playwright.sync_api import FrameLocator, Locator, Page
@dataclass(frozen=True)
class AppTarget:
"""A stable abstraction for interacting with the app under test.
- `page` is the top-level Playwright Page we control (for routing, events,
reload, timeouts, tracing, etc).
- `dom` is where app selectors should run (Page in local mode; FrameLocator
in external host mode).
- `base_url` is the canonical base URL of the app (not the host page).
"""
page: Page
dom: Page | FrameLocator
base_url: str
mode: Literal["local", "external_direct", "external_host"]
def locator(self, selector: str) -> Locator:
return self.dom.locator(selector)
def get_by_test_id(self, test_id: str) -> Locator:
return self.dom.get_by_test_id(test_id)
def get_by_role(self, role: str, **kwargs: Any) -> Locator:
# Playwright's `get_by_role` is typed with a Literal union of valid ARIA
# roles. We accept `str` here for convenience and cast at the call site.
return self.dom.get_by_role(cast("Any", role), **kwargs)
def get_by_text(self, text: str, **kwargs: Any) -> Locator:
return self.dom.get_by_text(text, **kwargs)
def wait_for_run(self, *, wait_delay: int = 100, initial_wait: int = 210) -> None:
wait_for_app_target_run(self, wait_delay=wait_delay, initial_wait=initial_wait)
def wait_for_loaded(self) -> None:
wait_for_app_target_loaded(self)
@property
def locator_context(self) -> Page | FrameLocator:
return self.dom
def wait_for_app_target_run(
app: AppTarget,
*,
wait_delay: int = 100,
initial_wait: int = 210,
) -> None:
"""Wait for the given app target to finish running.
This intentionally reuses the existing `wait_for_app_run` implementation so
we have a single place to maintain the core "app is connected and idle"
detection logic.
"""
# Import lazily to avoid introducing import-time circular dependencies.
from e2e_playwright.conftest import wait_for_app_run
wait_for_app_run(app.dom, wait_delay=wait_delay, initial_wait=initial_wait)
def wait_for_app_target_loaded(app: AppTarget) -> None:
"""Wait for initial app load (works when hosted in an iframe)."""
app.dom.locator("[data-testid='stAppViewContainer']").wait_for(
timeout=30000, state="attached"
)
wait_for_app_target_run(app)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/shared/app_target.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/typing/audio_input_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform type checking tests for st.audio_input
# The return type is always UploadedFile | None
if TYPE_CHECKING:
from streamlit.elements.widgets.audio_input import AudioInputMixin
from streamlit.runtime.uploaded_file_manager import UploadedFile
audio_input = AudioInputMixin().audio_input
# =====================================================================
# Basic return type tests
# =====================================================================
assert_type(audio_input("Record audio"), UploadedFile | None)
# =====================================================================
# Test sample_rate parameter
# =====================================================================
# Default sample rate (16000)
assert_type(audio_input("Record audio", sample_rate=16000), UploadedFile | None)
# Other valid sample rates
assert_type(audio_input("Record audio", sample_rate=8000), UploadedFile | None)
assert_type(audio_input("Record audio", sample_rate=11025), UploadedFile | None)
assert_type(audio_input("Record audio", sample_rate=22050), UploadedFile | None)
assert_type(audio_input("Record audio", sample_rate=24000), UploadedFile | None)
assert_type(audio_input("Record audio", sample_rate=32000), UploadedFile | None)
assert_type(audio_input("Record audio", sample_rate=44100), UploadedFile | None)
assert_type(audio_input("Record audio", sample_rate=48000), UploadedFile | None)
# None for browser default
assert_type(audio_input("Record audio", sample_rate=None), UploadedFile | None)
# =====================================================================
# Test key parameter (str or int)
# =====================================================================
assert_type(audio_input("Record audio", key="audio_key"), UploadedFile | None)
assert_type(audio_input("Record audio", key=123), UploadedFile | None)
assert_type(audio_input("Record audio", key=None), UploadedFile | None)
# =====================================================================
# Test help parameter
# =====================================================================
assert_type(
audio_input("Record audio", help="Click to start recording"),
UploadedFile | None,
)
assert_type(audio_input("Record audio", help=None), UploadedFile | None)
# =====================================================================
# Test disabled parameter
# =====================================================================
assert_type(audio_input("Record audio", disabled=True), UploadedFile | None)
assert_type(audio_input("Record audio", disabled=False), UploadedFile | None)
# =====================================================================
# Test label_visibility parameter
# =====================================================================
assert_type(
audio_input("Record audio", label_visibility="visible"), UploadedFile | None
)
assert_type(
audio_input("Record audio", label_visibility="hidden"), UploadedFile | None
)
assert_type(
audio_input("Record audio", label_visibility="collapsed"), UploadedFile | None
)
# =====================================================================
# Test width parameter
# =====================================================================
assert_type(audio_input("Record audio", width="stretch"), UploadedFile | None)
assert_type(audio_input("Record audio", width=400), UploadedFile | None)
# =====================================================================
# Test callback parameters (on_change, args, kwargs)
# =====================================================================
def my_callback() -> None:
pass
def callback_with_args(x: int, y: str) -> None:
pass
assert_type(audio_input("Record audio", on_change=my_callback), UploadedFile | None)
assert_type(
audio_input("Record audio", on_change=callback_with_args, args=(1, "test")),
UploadedFile | None,
)
assert_type(
audio_input(
"Record audio", on_change=callback_with_args, kwargs={"x": 1, "y": "a"}
),
UploadedFile | None,
)
assert_type(audio_input("Record audio", on_change=None), UploadedFile | None)
# =====================================================================
# Test with all parameters combined
# =====================================================================
assert_type(
audio_input(
"Full audio input",
sample_rate=44100,
key="full_audio",
help="Record your voice message",
on_change=my_callback,
args=None,
kwargs=None,
disabled=False,
label_visibility="visible",
width="stretch",
),
UploadedFile | None,
)
# =====================================================================
# Test with all parameters combined (different values)
# =====================================================================
assert_type(
audio_input(
"High quality recording",
sample_rate=48000,
key=42,
help="This records in high fidelity",
on_change=callback_with_args,
args=(1, "audio"),
kwargs=None,
disabled=True,
label_visibility="hidden",
width=500,
),
UploadedFile | None,
)
# =====================================================================
# Test with browser default sample rate
# =====================================================================
assert_type(
audio_input(
"Browser default audio",
sample_rate=None,
key="browser_default",
help=None,
on_change=None,
args=None,
kwargs=None,
disabled=False,
label_visibility="collapsed",
width=300,
),
UploadedFile | None,
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/audio_input_types.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/chat_input_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform type checking tests for st.chat_input
# The return type depends on accept_file and accept_audio parameters:
# - accept_file=False and accept_audio=False (default) -> returns str | None
# - accept_file=True/multiple/directory OR accept_audio=True -> returns ChatInputValue | None
if TYPE_CHECKING:
from streamlit.elements.widgets.chat import ChatInputValue, ChatMixin
chat_input = ChatMixin().chat_input
# =====================================================================
# Basic return type tests based on accept_file and accept_audio
# =====================================================================
# Default (no file/audio acceptance) returns str | None
assert_type(chat_input(), str | None)
assert_type(chat_input("Your message"), str | None)
assert_type(chat_input(placeholder="Ask me anything"), str | None)
# accept_file=False explicitly still returns str | None
assert_type(chat_input("Message", accept_file=False), str | None)
assert_type(
chat_input("Message", accept_file=False, accept_audio=False), str | None
)
# accept_file=True returns ChatInputValue | None
assert_type(chat_input("Message", accept_file=True), ChatInputValue | None)
# accept_file="multiple" returns ChatInputValue | None
assert_type(chat_input("Message", accept_file="multiple"), ChatInputValue | None)
# accept_file="directory" returns ChatInputValue | None
assert_type(chat_input("Message", accept_file="directory"), ChatInputValue | None)
# accept_audio=True returns ChatInputValue | None
assert_type(chat_input("Message", accept_audio=True), ChatInputValue | None)
# Both accept_file and accept_audio enabled returns ChatInputValue | None
assert_type(
chat_input("Message", accept_file=True, accept_audio=True),
ChatInputValue | None,
)
assert_type(
chat_input("Message", accept_file="multiple", accept_audio=True),
ChatInputValue | None,
)
# =====================================================================
# Test key parameter (str or int)
# =====================================================================
assert_type(chat_input("Message", key="chat_key"), str | None)
assert_type(chat_input("Message", key=123), str | None)
assert_type(chat_input("Message", key=None), str | None)
assert_type(
chat_input("Message", accept_file=True, key="chat_key"), ChatInputValue | None
)
# =====================================================================
# Test max_chars parameter
# =====================================================================
assert_type(chat_input("Message", max_chars=500), str | None)
assert_type(chat_input("Message", max_chars=None), str | None)
assert_type(
chat_input("Message", accept_file=True, max_chars=1000), ChatInputValue | None
)
# =====================================================================
# Test max_upload_size parameter
# =====================================================================
assert_type(chat_input("Message", max_upload_size=10), str | None)
assert_type(chat_input("Message", max_upload_size=None), str | None)
assert_type(
chat_input("Message", accept_file=True, max_upload_size=50),
ChatInputValue | None,
)
# =====================================================================
# Test file_type parameter
# =====================================================================
assert_type(chat_input("Message", file_type="csv"), str | None)
assert_type(chat_input("Message", file_type=["jpg", "png"]), str | None)
assert_type(chat_input("Message", file_type=None), str | None)
assert_type(
chat_input("Message", accept_file=True, file_type="pdf"), ChatInputValue | None
)
assert_type(
chat_input("Message", accept_file="multiple", file_type=["doc", "docx"]),
ChatInputValue | None,
)
# =====================================================================
# Test audio_sample_rate parameter (only with accept_audio=True)
# =====================================================================
assert_type(
chat_input("Message", accept_audio=True, audio_sample_rate=16000),
ChatInputValue | None,
)
assert_type(
chat_input("Message", accept_audio=True, audio_sample_rate=44100),
ChatInputValue | None,
)
assert_type(
chat_input("Message", accept_audio=True, audio_sample_rate=None),
ChatInputValue | None,
)
# =====================================================================
# Test disabled parameter
# =====================================================================
assert_type(chat_input("Message", disabled=True), str | None)
assert_type(chat_input("Message", disabled=False), str | None)
assert_type(
chat_input("Message", accept_file=True, disabled=True), ChatInputValue | None
)
# =====================================================================
# Test width parameter
# =====================================================================
assert_type(chat_input("Message", width="stretch"), str | None)
assert_type(chat_input("Message", width=400), str | None)
assert_type(
chat_input("Message", accept_file=True, width="stretch"), ChatInputValue | None
)
assert_type(
chat_input("Message", accept_file=True, width=500), ChatInputValue | None
)
# =====================================================================
# Test callback parameters (on_submit, args, kwargs)
# =====================================================================
def my_callback() -> None:
pass
def callback_with_args(x: int, y: str) -> None:
pass
assert_type(chat_input("Message", on_submit=my_callback), str | None)
assert_type(
chat_input("Message", on_submit=callback_with_args, args=(1, "test")),
str | None,
)
assert_type(
chat_input("Message", on_submit=callback_with_args, kwargs={"x": 1, "y": "a"}),
str | None,
)
assert_type(chat_input("Message", on_submit=None), str | None)
assert_type(
chat_input("Message", accept_file=True, on_submit=my_callback),
ChatInputValue | None,
)
# =====================================================================
# Test with all parameters combined (no file/audio - returns str | None)
# =====================================================================
assert_type(
chat_input(
placeholder="Ask me anything",
key="full_chat",
max_chars=1000,
max_upload_size=None,
accept_file=False,
file_type=None,
accept_audio=False,
disabled=False,
on_submit=my_callback,
args=None,
kwargs=None,
width="stretch",
),
str | None,
)
# =====================================================================
# Test with all parameters combined (accept_file=True - returns ChatInputValue | None)
# =====================================================================
assert_type(
chat_input(
placeholder="Upload files",
key="file_chat",
max_chars=500,
max_upload_size=100,
accept_file=True,
file_type=["pdf", "doc"],
accept_audio=False,
audio_sample_rate=16000,
disabled=False,
on_submit=my_callback,
args=None,
kwargs=None,
width=600,
),
ChatInputValue | None,
)
# =====================================================================
# Test with all parameters combined (accept_audio=True - returns ChatInputValue | None)
# =====================================================================
assert_type(
chat_input(
placeholder="Record audio",
key="audio_chat",
max_chars=200,
max_upload_size=50,
accept_file=False,
file_type=None,
accept_audio=True,
audio_sample_rate=48000,
disabled=False,
on_submit=my_callback,
args=None,
kwargs=None,
width="stretch",
),
ChatInputValue | None,
)
# =====================================================================
# Test with all parameters combined (both file and audio - returns ChatInputValue | None)
# =====================================================================
assert_type(
chat_input(
placeholder="Files and audio",
key="full_media_chat",
max_chars=1000,
max_upload_size=200,
accept_file="multiple",
file_type=["jpg", "png", "gif"],
accept_audio=True,
audio_sample_rate=44100,
disabled=False,
on_submit=my_callback,
args=None,
kwargs=None,
width=800,
),
ChatInputValue | None,
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/chat_input_types.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/text_area_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform type checking tests for st.text_area
# The return type depends on the value parameter:
# - value=str (or default "") -> returns str
# - value=None or SupportsStr|None -> returns str | None
if TYPE_CHECKING:
from streamlit.elements.widgets.text_widgets import TextWidgetsMixin
text_area = TextWidgetsMixin().text_area
# =====================================================================
# Basic return type tests based on value parameter
# =====================================================================
# Default value (empty string) returns str
assert_type(text_area("Enter text"), str)
assert_type(text_area("Enter text", "default value"), str)
assert_type(text_area("Enter text", value="hello world"), str)
# value=None returns str | None
assert_type(text_area("Enter text", None), str | None)
assert_type(text_area("Enter text", value=None), str | None)
# =====================================================================
# Test key parameter (str or int)
# =====================================================================
assert_type(text_area("Enter text", key="my_area"), str)
assert_type(text_area("Enter text", key=123), str)
assert_type(text_area("Enter text", key=None), str)
assert_type(text_area("Enter text", value=None, key="my_area"), str | None)
# =====================================================================
# Test height parameter (int, None, "content", or "stretch")
# =====================================================================
assert_type(text_area("Enter text", height=200), str)
assert_type(text_area("Enter text", height=None), str)
assert_type(text_area("Enter text", height="content"), str)
assert_type(text_area("Enter text", height="stretch"), str)
assert_type(text_area("Enter text", value=None, height=300), str | None)
assert_type(text_area("Enter text", value=None, height="content"), str | None)
assert_type(text_area("Enter text", value=None, height="stretch"), str | None)
# =====================================================================
# Test max_chars parameter
# =====================================================================
assert_type(text_area("Enter text", max_chars=500), str)
assert_type(text_area("Enter text", max_chars=None), str)
assert_type(text_area("Enter text", value=None, max_chars=1000), str | None)
# =====================================================================
# Test help parameter
# =====================================================================
assert_type(text_area("Enter text", help="Type your text here"), str)
assert_type(text_area("Enter text", help=None), str)
assert_type(text_area("Enter text", value=None, help="Help text"), str | None)
# =====================================================================
# Test placeholder parameter (keyword-only)
# =====================================================================
assert_type(text_area("Enter text", placeholder="Type here..."), str)
assert_type(text_area("Enter text", placeholder=None), str)
assert_type(
text_area("Enter text", value=None, placeholder="Placeholder"), str | None
)
# =====================================================================
# Test disabled parameter (keyword-only)
# =====================================================================
assert_type(text_area("Enter text", disabled=True), str)
assert_type(text_area("Enter text", disabled=False), str)
assert_type(text_area("Enter text", value=None, disabled=True), str | None)
# =====================================================================
# Test label_visibility parameter (keyword-only)
# =====================================================================
assert_type(text_area("Enter text", label_visibility="visible"), str)
assert_type(text_area("Enter text", label_visibility="hidden"), str)
assert_type(text_area("Enter text", label_visibility="collapsed"), str)
assert_type(
text_area("Enter text", value=None, label_visibility="hidden"), str | None
)
# =====================================================================
# Test width parameter (keyword-only)
# =====================================================================
assert_type(text_area("Enter text", width="stretch"), str)
assert_type(text_area("Enter text", width=400), str)
assert_type(text_area("Enter text", value=None, width=500), str | None)
# =====================================================================
# Test bind parameter (keyword-only)
# =====================================================================
assert_type(text_area("Enter text", bind="query-params"), str)
assert_type(text_area("Enter text", bind=None), str)
assert_type(text_area("Enter text", value=None, bind="query-params"), str | None)
# =====================================================================
# Test callback parameters (on_change, args, kwargs)
# =====================================================================
def my_callback() -> None:
pass
def callback_with_args(x: int, y: str) -> None:
pass
assert_type(text_area("Enter text", on_change=my_callback), str)
assert_type(
text_area("Enter text", on_change=callback_with_args, args=(1, "a")), str
)
assert_type(
text_area(
"Enter text", on_change=callback_with_args, kwargs={"x": 1, "y": "a"}
),
str,
)
assert_type(text_area("Enter text", on_change=None), str)
assert_type(text_area("Enter text", value=None, on_change=my_callback), str | None)
assert_type(
text_area(
"Enter text", value=None, on_change=callback_with_args, args=(1, "test")
),
str | None,
)
# =====================================================================
# Test with all parameters combined (str value)
# =====================================================================
assert_type(
text_area(
"Full text area",
value="initial text",
height=300,
max_chars=1000,
key="full_area",
help="Enter your text here",
on_change=my_callback,
args=None,
kwargs=None,
placeholder="Type something...",
disabled=False,
label_visibility="visible",
width="stretch",
bind="query-params",
),
str,
)
# =====================================================================
# Test with all parameters combined (None value)
# =====================================================================
assert_type(
text_area(
"Full text area",
value=None,
height="content",
max_chars=500,
key="nullable_area",
help="Enter your description",
on_change=my_callback,
args=None,
kwargs=None,
placeholder="Description",
disabled=False,
label_visibility="visible",
width=400,
bind="query-params",
),
str | None,
)
# =====================================================================
# Test height variants with value combinations
# =====================================================================
# Integer height
assert_type(text_area("Notes", value="Some notes", height=150), str)
assert_type(text_area("Notes", value=None, height=150), str | None)
# "content" height - auto-sizes to content
assert_type(text_area("Description", value="Text", height="content"), str)
assert_type(text_area("Description", value=None, height="content"), str | None)
# "stretch" height - fills available space
assert_type(text_area("Full content", value="Content", height="stretch"), str)
assert_type(text_area("Full content", value=None, height="stretch"), str | None)
# None height - uses default (approximately three lines)
assert_type(text_area("Default height", value="Text", height=None), str)
assert_type(text_area("Default height", value=None, height=None), str | None)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/text_area_types.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/text_input_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform type checking tests for st.text_input
# The return type depends on the value parameter:
# - value=str (or default "") -> returns str
# - value=None or SupportsStr|None -> returns str | None
if TYPE_CHECKING:
from streamlit.elements.widgets.text_widgets import TextWidgetsMixin
text_input = TextWidgetsMixin().text_input
# =====================================================================
# Basic return type tests based on value parameter
# =====================================================================
# Default value (empty string) returns str
assert_type(text_input("Enter text"), str)
assert_type(text_input("Enter text", "default value"), str)
assert_type(text_input("Enter text", value="hello"), str)
# value=None returns str | None
assert_type(text_input("Enter text", None), str | None)
assert_type(text_input("Enter text", value=None), str | None)
# =====================================================================
# Test key parameter (str or int)
# =====================================================================
assert_type(text_input("Enter text", key="my_input"), str)
assert_type(text_input("Enter text", key=123), str)
assert_type(text_input("Enter text", key=None), str)
assert_type(text_input("Enter text", value=None, key="my_input"), str | None)
# =====================================================================
# Test type parameter ("default" or "password")
# =====================================================================
assert_type(text_input("Enter text", type="default"), str)
assert_type(text_input("Enter text", type="password"), str)
assert_type(text_input("Enter password", value=None, type="password"), str | None)
# =====================================================================
# Test max_chars parameter
# =====================================================================
assert_type(text_input("Enter text", max_chars=100), str)
assert_type(text_input("Enter text", max_chars=None), str)
assert_type(text_input("Enter text", value=None, max_chars=50), str | None)
# =====================================================================
# Test help parameter
# =====================================================================
assert_type(text_input("Enter text", help="Type something here"), str)
assert_type(text_input("Enter text", help=None), str)
assert_type(text_input("Enter text", value=None, help="Help text"), str | None)
# =====================================================================
# Test autocomplete parameter
# =====================================================================
assert_type(text_input("Enter text", autocomplete="off"), str)
assert_type(text_input("Email", autocomplete="email"), str)
assert_type(text_input("Enter text", autocomplete=None), str)
assert_type(text_input("Enter text", value=None, autocomplete="name"), str | None)
# =====================================================================
# Test placeholder parameter (keyword-only)
# =====================================================================
assert_type(text_input("Enter text", placeholder="Type here..."), str)
assert_type(text_input("Enter text", placeholder=None), str)
assert_type(
text_input("Enter text", value=None, placeholder="Placeholder"), str | None
)
# =====================================================================
# Test disabled parameter (keyword-only)
# =====================================================================
assert_type(text_input("Enter text", disabled=True), str)
assert_type(text_input("Enter text", disabled=False), str)
assert_type(text_input("Enter text", value=None, disabled=True), str | None)
# =====================================================================
# Test label_visibility parameter (keyword-only)
# =====================================================================
assert_type(text_input("Enter text", label_visibility="visible"), str)
assert_type(text_input("Enter text", label_visibility="hidden"), str)
assert_type(text_input("Enter text", label_visibility="collapsed"), str)
assert_type(
text_input("Enter text", value=None, label_visibility="hidden"), str | None
)
# =====================================================================
# Test icon parameter (keyword-only)
# =====================================================================
assert_type(text_input("Search", icon=":material/search:"), str)
assert_type(text_input("Search", icon=None), str)
assert_type(text_input("Search", value=None, icon=":material/search:"), str | None)
# =====================================================================
# Test width parameter (keyword-only)
# =====================================================================
assert_type(text_input("Enter text", width="stretch"), str)
assert_type(text_input("Enter text", width=200), str)
assert_type(text_input("Enter text", value=None, width=300), str | None)
# =====================================================================
# Test bind parameter (keyword-only)
# =====================================================================
assert_type(text_input("Enter text", bind="query-params"), str)
assert_type(text_input("Enter text", bind=None), str)
assert_type(text_input("Enter text", value=None, bind="query-params"), str | None)
# =====================================================================
# Test callback parameters (on_change, args, kwargs)
# =====================================================================
def my_callback() -> None:
pass
def callback_with_args(x: int, y: str) -> None:
pass
assert_type(text_input("Enter text", on_change=my_callback), str)
assert_type(
text_input("Enter text", on_change=callback_with_args, args=(1, "a")), str
)
assert_type(
text_input(
"Enter text", on_change=callback_with_args, kwargs={"x": 1, "y": "a"}
),
str,
)
assert_type(text_input("Enter text", on_change=None), str)
assert_type(text_input("Enter text", value=None, on_change=my_callback), str | None)
assert_type(
text_input(
"Enter text", value=None, on_change=callback_with_args, args=(1, "test")
),
str | None,
)
# =====================================================================
# Test with all parameters combined (str value)
# =====================================================================
assert_type(
text_input(
"Full text input",
value="initial",
max_chars=100,
key="full_input",
type="default",
help="Enter your text here",
autocomplete="off",
on_change=my_callback,
args=None,
kwargs=None,
placeholder="Type something...",
disabled=False,
label_visibility="visible",
icon=":material/edit:",
width="stretch",
bind="query-params",
),
str,
)
# =====================================================================
# Test with all parameters combined (None value)
# =====================================================================
assert_type(
text_input(
"Full text input",
value=None,
max_chars=50,
key="nullable_input",
type="password",
help="Enter your password",
autocomplete="new-password",
on_change=my_callback,
args=None,
kwargs=None,
placeholder="Password",
disabled=False,
label_visibility="visible",
icon=":material/lock:",
width=300,
bind="query-params",
),
str | None,
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/text_input_types.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/elements/lib/mutable_expander_container.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Literal
from typing_extensions import Self
from streamlit.delta_generator import DeltaGenerator
if TYPE_CHECKING:
from types import TracebackType
from streamlit.cursor import Cursor
class ExpanderContainer(DeltaGenerator):
"""DeltaGenerator subclass returned by ``st.expander``.
Provides the ``.open`` property for checking expander state when
``on_change`` is used.
"""
def __init__(
self,
root_container: int | None,
cursor: Cursor | None,
parent: DeltaGenerator | None,
block_type: str | None,
) -> None:
super().__init__(root_container, cursor, parent, block_type)
self._open: bool | None = None
@property
def open(self) -> bool | None:
"""The open/collapsed state of the expander.
Returns
-------
bool or None
``True`` if expanded, ``False`` if collapsed, or ``None`` if
state tracking is not enabled (``on_change`` was not set or
set to ``"ignore"``).
"""
return self._open
@open.setter # noqa: A003
def open(self, value: bool | None) -> None:
self._open = value
def __enter__(self) -> Self: # type: ignore[override]
super().__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
return super().__exit__(exc_type, exc_val, exc_tb)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/elements/lib/mutable_expander_container.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/streamlit/elements/lib/mutable_popover_container.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Literal
from typing_extensions import Self
from streamlit.delta_generator import DeltaGenerator
if TYPE_CHECKING:
from types import TracebackType
from streamlit.cursor import Cursor
class PopoverContainer(DeltaGenerator):
"""DeltaGenerator subclass returned by ``st.popover``.
Provides the ``.open`` property for checking popover state when
``on_change`` is used.
"""
def __init__(
self,
root_container: int | None,
cursor: Cursor | None,
parent: DeltaGenerator | None,
block_type: str | None,
) -> None:
super().__init__(root_container, cursor, parent, block_type)
self._open: bool | None = None
@property
def open(self) -> bool | None:
"""The open/closed state of the popover.
Returns
-------
bool or None
``True`` if open, ``False`` if closed, or ``None`` if state
tracking is not enabled (``on_change`` was not set or set to
``"ignore"``).
"""
return self._open
@open.setter # noqa: A003
def open(self, value: bool | None) -> None:
self._open = value
def __enter__(self) -> Self: # type: ignore[override]
super().__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
return super().__exit__(exc_type, exc_val, exc_tb)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/elements/lib/mutable_popover_container.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/streamlit/elements/lib/mutable_tab_container.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Literal
from typing_extensions import Self
from streamlit.delta_generator import DeltaGenerator
if TYPE_CHECKING:
from types import TracebackType
from streamlit.cursor import Cursor
class TabContainer(DeltaGenerator):
"""DeltaGenerator subclass returned for each tab in ``st.tabs``.
Provides the ``.open`` property for checking whether this tab is active
when ``on_change`` is used.
"""
def __init__(
self,
root_container: int | None,
cursor: Cursor | None,
parent: DeltaGenerator | None,
block_type: str | None,
) -> None:
super().__init__(root_container, cursor, parent, block_type)
self._open: bool | None = None
@property
def open(self) -> bool | None:
"""Whether this tab is the currently active tab.
Returns
-------
bool or None
``True`` if this tab is active, ``False`` if inactive, or ``None``
if state tracking is not enabled (``on_change`` was not set or
set to ``"ignore"``).
"""
return self._open
@open.setter # noqa: A003
def open(self, value: bool | None) -> None:
self._open = value
def __enter__(self) -> Self: # type: ignore[override]
super().__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
return super().__exit__(exc_type, exc_val, exc_tb)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/elements/lib/mutable_tab_container.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/typing/expander_container_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Type tests for ExpanderContainer."""
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform some "type checking testing"; mypy should flag any assignments that are
# incorrect.
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
from streamlit.elements.layouts import LayoutsMixin
from streamlit.elements.lib.mutable_expander_container import ExpanderContainer
expander = LayoutsMixin().expander
# st.expander returns ExpanderContainer
assert_type(expander("Test"), ExpanderContainer)
# ExpanderContainer is a DeltaGenerator (Liskov substitution)
exp: DeltaGenerator = expander("Test")
assert_type(exp, DeltaGenerator)
# Context manager returns Self
with expander("Test") as ctx:
assert_type(ctx, ExpanderContainer)
# .open property returns bool | None
assert_type(expander("Test").open, bool | None)
# on_change accepts string literals
assert_type(expander("Test", on_change="rerun"), ExpanderContainer)
assert_type(expander("Test", on_change="ignore"), ExpanderContainer)
# on_change accepts callable with key
def _noop() -> None: ...
assert_type(expander("Test", key="k", on_change=_noop), ExpanderContainer)
# on_change callable with args and kwargs
def _callback(x: int, y: str) -> None: ...
assert_type(
expander(
"Test",
key="k2",
on_change=_callback,
args=(1,),
kwargs={"y": "hello"},
),
ExpanderContainer,
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/expander_container_types.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/popover_container_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Type tests for PopoverContainer."""
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform some "type checking testing"; mypy should flag any assignments that are
# incorrect.
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
from streamlit.elements.layouts import LayoutsMixin
from streamlit.elements.lib.mutable_popover_container import PopoverContainer
popover = LayoutsMixin().popover
# st.popover returns PopoverContainer
assert_type(popover("Test"), PopoverContainer)
# PopoverContainer is a DeltaGenerator (Liskov substitution)
pop: DeltaGenerator = popover("Test")
assert_type(pop, DeltaGenerator)
# Context manager returns Self
with popover("Test") as ctx:
assert_type(ctx, PopoverContainer)
# .open property returns bool | None
assert_type(popover("Test").open, bool | None)
# on_change accepts "ignore", "rerun", or a callable
popover("Test", on_change="ignore")
popover("Test", on_change="rerun")
popover("Test", on_change=lambda: None)
# Callback with args and kwargs
def my_callback(prefix: str, suffix: str = "") -> None: ...
popover("Test", on_change=my_callback, args=("hello",), kwargs={"suffix": "world"})
# Callback without key is valid
popover("Test", on_change=lambda: None)
# Callback with key is also valid
popover("Test", key="my_pop", on_change=lambda: None)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/popover_container_types.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/tab_container_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Type tests for TabContainer."""
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform some "type checking testing"; mypy should flag any assignments that are
# incorrect.
if TYPE_CHECKING:
from collections.abc import Sequence
from streamlit.delta_generator import DeltaGenerator
from streamlit.elements.layouts import LayoutsMixin
from streamlit.elements.lib.mutable_tab_container import TabContainer
tabs = LayoutsMixin().tabs
# st.tabs returns Sequence[TabContainer]
assert_type(tabs(["A", "B", "C"]), Sequence[TabContainer])
# Tab unpacking works correctly
tab1, tab2, tab3 = tabs(["A", "B", "C"])
assert_type(tab1, TabContainer)
assert_type(tab2, TabContainer)
assert_type(tab3, TabContainer)
# TabContainer is a DeltaGenerator (Liskov substitution)
tab_list: Sequence[DeltaGenerator] = tabs(["A", "B"])
assert_type(tab_list, Sequence[DeltaGenerator])
# Context manager returns Self
with tabs(["A", "B"])[0] as ctx:
assert_type(ctx, TabContainer)
# .open property returns bool | None
assert_type(tabs(["A", "B"])[0].open, bool | None)
# --- Callback support ---
def valid_callback() -> None:
pass
def callback_with_args(arg1: str, arg2: int) -> None:
pass
# Valid usages — should type check
assert_type(tabs(["A", "B"], on_change="rerun"), Sequence[TabContainer])
assert_type(
tabs(["A", "B"], on_change=valid_callback, key="t1"), Sequence[TabContainer]
)
assert_type(
tabs(
["A", "B"],
on_change=callback_with_args,
args=("test", 1),
key="t2",
),
Sequence[TabContainer],
)
assert_type(
tabs(
["A", "B"],
on_change=callback_with_args,
kwargs={"arg1": "test", "arg2": 1},
key="t3",
),
Sequence[TabContainer],
)
# Invalid usages — should NOT type check
tabs(["A", "B"], on_change=123) # type: ignore[arg-type]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/tab_container_types.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/theming/theme_metric_value_style.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
st.set_page_config(initial_sidebar_state="collapsed")
st.header("Metric Value Style Test")
col1, col2, col3 = st.columns(3)
with col1:
st.metric("Revenue", "$1,234,567", "+12.3%")
with col2:
st.metric("Users", "45,678", "-5.2%", delta_color="inverse")
with col3:
st.metric("Performance", "98.5%", "+2.1%")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/theming/theme_metric_value_style.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/theming/theme_metric_value_style_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E tests for metricValueFontSize and metricValueFontWeight theme options."""
import os
import pytest
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import ImageCompareFunction
from e2e_playwright.shared.app_utils import expect_no_skeletons, get_metric
@pytest.fixture(scope="module")
@pytest.mark.early
def configure_metric_value_style():
"""Configure custom metric value font size (using rem) and weight."""
os.environ["STREAMLIT_THEME_METRIC_VALUE_FONT_SIZE"] = "3rem"
os.environ["STREAMLIT_THEME_METRIC_VALUE_FONT_WEIGHT"] = "300"
yield
del os.environ["STREAMLIT_THEME_METRIC_VALUE_FONT_SIZE"]
del os.environ["STREAMLIT_THEME_METRIC_VALUE_FONT_WEIGHT"]
@pytest.mark.usefixtures("configure_metric_value_style")
def test_metric_value_font_size_with_rem(app: Page):
"""Test that metricValueFontSize accepts rem values and applies them correctly."""
expect_no_skeletons(app, timeout=25000)
metric = get_metric(app, "Revenue")
metric_value = metric.get_by_test_id("stMetricValue")
# Verify rem value is converted and applied correctly (3rem = 48px with 16px base)
expect(metric_value).to_have_css("font-size", "48px")
# Verify custom font weight is applied (300)
expect(metric_value).to_have_css("font-weight", "300")
# Verify it's NOT the default size (2.25rem = 36px)
expect(metric_value).not_to_have_css("font-size", "36px")
@pytest.mark.usefixtures("configure_metric_value_style")
def test_metric_value_style_snapshot(app: Page, assert_snapshot: ImageCompareFunction):
"""Visual snapshot test for custom metric value styling."""
expect_no_skeletons(app, timeout=25000)
# Wait for fonts to load to reduce flakiness
app.wait_for_timeout(5000)
metric = get_metric(app, "Revenue")
assert_snapshot(metric, name="metric_value_custom_style")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/theming/theme_metric_value_style_test.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/st_altair_chart_multiview_select.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-view Altair charts with selections.
This script tests selection functionality for multi-view charts including:
- Layer charts (overlapping views)
- HConcat charts (horizontally concatenated views)
- VConcat charts (vertically concatenated views)
- Charts with multiple independent selections
"""
from typing import cast
import altair as alt
import pandas as pd
from vega_datasets import data
import streamlit as st
@st.cache_data
def get_cars_data() -> pd.DataFrame:
return cast("pd.DataFrame", data.cars())
cars = get_cars_data()
st.header("Multi-view charts with selections")
# Layer chart with selection
st.subheader("Layer chart with selection_point")
layer_point = alt.selection_point(
name="layer_selection", fields=["Origin", "Horsepower", "Miles_per_Gallon"]
)
layer_chart = alt.layer(
alt.Chart(cars).mark_line().encode(x="Horsepower:Q", y="Miles_per_Gallon:Q"),
alt.Chart(cars)
.mark_circle(size=60)
.encode(
x="Horsepower:Q",
y="Miles_per_Gallon:Q",
color=alt.condition(layer_point, "Origin:N", alt.value("lightgray")),
tooltip=alt.value(None),
)
.add_params(layer_point),
)
layer_selection = st.altair_chart(
layer_chart, on_select="rerun", key="layer_chart", width="stretch"
)
# Check if any selection parameter has actual data (non-empty dict/list)
if any(layer_selection["selection"].get(k) for k in layer_selection["selection"]):
st.write("Layer chart selection:", str(layer_selection["selection"]))
# HConcat chart with shared selection
st.subheader("HConcat chart with shared selection_interval")
hconcat_interval = alt.selection_interval(name="hconcat_selection")
hconcat_chart = alt.hconcat(
alt.Chart(cars)
.mark_circle()
.encode(
x="Horsepower:Q",
y="Miles_per_Gallon:Q",
color=alt.condition(hconcat_interval, "Origin:N", alt.value("lightgray")),
tooltip=alt.value(None),
)
.add_params(hconcat_interval)
.properties(width=250, height=200),
alt.Chart(cars)
.mark_bar()
.encode(
x="Origin:N",
y="count():Q",
color=alt.condition(hconcat_interval, "Origin:N", alt.value("lightgray")),
tooltip=alt.value(None),
)
.properties(width=150, height=200),
)
hconcat_selection = st.altair_chart(
hconcat_chart, on_select="rerun", key="hconcat_chart"
)
# Check if any selection parameter has actual data (non-empty dict/list)
if any(hconcat_selection["selection"].get(k) for k in hconcat_selection["selection"]):
st.write("HConcat chart selection:", str(hconcat_selection["selection"]))
# VConcat chart with selection
st.subheader("VConcat chart with selection_point")
vconcat_point = alt.selection_point(name="vconcat_selection", fields=["Origin"])
vconcat_chart = alt.vconcat(
alt.Chart(cars)
.mark_circle()
.encode(
x="Horsepower:Q",
y="Miles_per_Gallon:Q",
color=alt.condition(vconcat_point, "Origin:N", alt.value("lightgray")),
tooltip=alt.value(None),
)
.add_params(vconcat_point)
.properties(width=400, height=150),
alt.Chart(cars)
.mark_bar()
.encode(
x="Origin:N",
y="count():Q",
color=alt.condition(vconcat_point, "Origin:N", alt.value("lightgray")),
tooltip=alt.value(None),
)
.properties(width=400, height=100),
)
vconcat_selection = st.altair_chart(
vconcat_chart, on_select="rerun", key="vconcat_chart"
)
# Check if any selection parameter has actual data (non-empty dict/list)
if any(vconcat_selection["selection"].get(k) for k in vconcat_selection["selection"]):
st.write("VConcat chart selection:", str(vconcat_selection["selection"]))
# HConcat chart with MULTIPLE selections (one per view)
st.subheader("HConcat chart with multiple selections")
hconcat_left_sel = alt.selection_point(
name="left_point", fields=["Origin", "Horsepower", "Miles_per_Gallon"]
)
hconcat_right_sel = alt.selection_interval(name="right_interval")
hconcat_multi_chart = alt.hconcat(
alt.Chart(cars)
.mark_circle()
.encode(
x="Horsepower:Q",
y="Miles_per_Gallon:Q",
color=alt.condition(hconcat_left_sel, "Origin:N", alt.value("lightgray")),
tooltip=alt.value(None),
)
.add_params(hconcat_left_sel)
.properties(width=250, height=200),
alt.Chart(cars)
.mark_circle()
.encode(
x="Acceleration:Q",
y="Displacement:Q",
color=alt.condition(hconcat_right_sel, "Origin:N", alt.value("lightgray")),
tooltip=alt.value(None),
)
.add_params(hconcat_right_sel)
.properties(width=250, height=200),
)
hconcat_multi_selection = st.altair_chart(
hconcat_multi_chart, on_select="rerun", key="hconcat_multi_chart"
)
# Check if any selection parameter has actual data (non-empty dict/list)
if any(
hconcat_multi_selection["selection"].get(k)
for k in hconcat_multi_selection["selection"]
):
st.write("HConcat multi selection:", str(hconcat_multi_selection["selection"]))
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_altair_chart_multiview_select.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/st_altair_chart_multiview_select_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multi-view Altair chart selections.
This tests selection functionality for multi-view charts including:
- Layer charts (overlapping views)
- HConcat charts (horizontally concatenated views)
- VConcat charts (vertically concatenated views)
- Charts with multiple independent selections
"""
import re
from dataclasses import dataclass
from playwright.sync_api import Locator, Page, expect
from e2e_playwright.conftest import wait_for_app_run
from e2e_playwright.shared.app_utils import (
expect_prefixed_markdown,
get_element_by_key,
)
@dataclass
class _MousePosition:
x: int
y: int
def _create_selection_rectangle(
app: Page,
chart: Locator,
canvas_start_pos: _MousePosition,
canvas_end_pos: _MousePosition,
) -> None:
expect(chart).to_be_visible()
chart.scroll_into_view_if_needed()
bounding_box = chart.bounding_box()
assert bounding_box is not None
canvas_start_x_px = bounding_box.get("x", 0)
canvas_start_y_px = bounding_box.get("y", 0)
app.mouse.move(
canvas_start_x_px + canvas_start_pos.x, canvas_start_y_px + canvas_start_pos.y
)
app.mouse.down()
app.mouse.move(
canvas_start_x_px + canvas_end_pos.x, canvas_start_y_px + canvas_end_pos.y
)
app.mouse.up()
wait_for_app_run(app)
def _click(app: Page, chart: Locator, click_position: _MousePosition) -> None:
expect(chart).to_be_visible()
chart.scroll_into_view_if_needed()
chart.click(position={"x": click_position.x, "y": click_position.y})
wait_for_app_run(app)
def _get_layer_chart(app: Page) -> Locator:
return get_element_by_key(app, "layer_chart").locator("[role='graphics-document']")
def _get_hconcat_chart(app: Page) -> Locator:
return get_element_by_key(app, "hconcat_chart").locator(
"[role='graphics-document']"
)
def _get_vconcat_chart(app: Page) -> Locator:
return get_element_by_key(app, "vconcat_chart").locator(
"[role='graphics-document']"
)
def _get_hconcat_multi_chart(app: Page) -> Locator:
return get_element_by_key(app, "hconcat_multi_chart").locator(
"[role='graphics-document']"
)
def test_layer_chart_point_selection(app: Page):
"""Test point selection on a layer chart (multi-view)."""
chart = _get_layer_chart(app)
expect(chart).to_be_visible()
chart.scroll_into_view_if_needed()
# Verify no selection text is displayed before interaction
selection_text = app.get_by_text("Layer chart selection:")
expect(selection_text).not_to_be_visible()
# Click on a point in the scatter layer
_click(app, chart, _MousePosition(264, 120))
# Verify selection text is displayed
expected_prefix = "Layer chart selection:"
expected_selection = re.compile(r"\{'layer_selection': \[\{.+\}\]\}")
expect_prefixed_markdown(app, expected_prefix, expected_selection)
def test_hconcat_chart_interval_selection(app: Page):
"""Test interval selection on an hconcat chart (multi-view) with cross-view highlighting."""
chart = _get_hconcat_chart(app)
expect(chart).to_be_visible()
chart.scroll_into_view_if_needed()
# Verify no selection text is displayed before interaction
selection_text = app.get_by_text("HConcat chart selection:")
expect(selection_text).not_to_be_visible()
# Create interval selection on the left scatter plot
_create_selection_rectangle(
app, chart, _MousePosition(100, 80), _MousePosition(180, 150)
)
# Verify selection text is displayed with interval selection data
expected_prefix = "HConcat chart selection:"
expected_selection = re.compile(
r"\{'hconcat_selection': \{'Horsepower': \[.+, .+\], 'Miles_per_Gallon': \[.+, .+\]\}\}"
)
expect_prefixed_markdown(app, expected_prefix, expected_selection)
def test_vconcat_chart_point_selection(app: Page):
"""Test point selection on a vconcat chart (multi-view) with field-based selection."""
chart = _get_vconcat_chart(app)
expect(chart).to_be_visible()
chart.scroll_into_view_if_needed()
# Verify no selection text is displayed before interaction
selection_text = app.get_by_text("VConcat chart selection:")
expect(selection_text).not_to_be_visible()
# Click on a point in the scatter plot (top view)
_click(app, chart, _MousePosition(200, 80))
# Verify selection text is displayed - field-based selection returns the Origin value
expected_prefix = "VConcat chart selection:"
expected_selection = re.compile(
r"\{'vconcat_selection': \[\{'Origin': '(USA|Japan|Europe)'\}\]\}"
)
expect_prefixed_markdown(app, expected_prefix, expected_selection)
def test_hconcat_chart_multiple_selections(app: Page):
"""Test that a multi-view chart with multiple selection params works correctly.
This test verifies that a point selection can be triggered on the left chart
of an hconcat chart that has two separate selections (one per view).
The key test is that having multiple selection params doesn't break selection handling.
"""
chart = _get_hconcat_multi_chart(app)
expect(chart).to_be_visible()
chart.scroll_into_view_if_needed()
# Verify no selection text is displayed before interaction
selection_text = app.get_by_text("HConcat multi selection:")
expect(selection_text).not_to_be_visible()
# Click on the LEFT chart to trigger point selection
# The left chart uses Horsepower vs Miles_per_Gallon
_click(app, chart, _MousePosition(150, 120))
# Verify the point selection on the left chart is captured
# The selection dict only includes triggered selections (non-empty)
expected_prefix = "HConcat multi selection:"
expected_left = re.compile(r"\{'left_point': \[\{.+\}\]\}")
expect_prefixed_markdown(app, expected_prefix, expected_left)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_altair_chart_multiview_select_test.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/path_security.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared path security utilities for preventing path traversal and SSRF attacks.
This module provides a centralized implementation for path validation that is
used by multiple parts of the codebase. Having a single implementation ensures
consistent security checks and avoids divergent behavior between components.
Security Context
----------------
These checks are designed to run BEFORE any filesystem operations (like
``os.path.realpath()``) to prevent Windows from triggering SMB connections
to attacker-controlled servers when resolving UNC paths. This prevents
SSRF attacks and NTLM hash disclosure.
"""
from __future__ import annotations
import os
import string
def is_unsafe_path_pattern(path: str) -> bool:
r"""Return True if path contains UNC, absolute, drive, or traversal patterns.
This function checks for dangerous path patterns that could lead to:
- SSRF attacks via Windows UNC path resolution
- NTLM hash disclosure via SMB connections
- Path traversal outside intended directories
- Path truncation via null bytes
IMPORTANT: This check must run BEFORE any ``os.path.realpath()`` calls
to prevent Windows from triggering SMB connections to attacker-controlled
servers.
Parameters
----------
path : str
The path string to validate.
Returns
-------
bool
True if the path contains unsafe patterns, False if it appears safe
for further processing.
Examples
--------
>>> is_unsafe_path_pattern("subdir/file.js")
False
>>> is_unsafe_path_pattern("\\\\server\\share")
True
>>> is_unsafe_path_pattern("../../../etc/passwd")
True
>>> is_unsafe_path_pattern("C:\\Windows\\system32")
True
"""
# Null bytes can be used for path truncation attacks
if "\x00" in path:
return True
# UNC paths (Windows network shares, including \\?\ and \\.\ prefixes)
if path.startswith(("\\\\", "//")):
return True
# Windows drive paths (e.g. C:\, D:foo) - on Windows, os.path.realpath() on a
# drive path can trigger SMB connections if the drive is mapped to a network share.
# This enables SSRF attacks and NTLM hash disclosure. We reject all drive-qualified
# paths including drive-relative paths like "C:foo" which resolve against the current
# directory of that drive. Checked on all platforms for defense-in-depth and
# testability (CI runs on Linux).
if len(path) >= 2 and path[0] in string.ascii_letters and path[1] == ":":
return True
# Rooted backslash or forward slash (absolute paths)
if path.startswith(("\\", "/")):
return True
# Also check os.path.isabs for platform-specific absolute path detection
if os.path.isabs(path):
return True
# Path traversal - check segments after normalizing separators
normalized = path.replace("\\", "/")
segments = [seg for seg in normalized.split("/") if seg]
return ".." in segments
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/path_security.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_path_security_middleware.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Path security middleware for blocking unsafe path patterns.
This middleware implements the "Swiss Cheese" defense model - it provides
an additional layer of protection that catches dangerous path patterns even
if individual route handlers forget to validate paths. This is especially
important for preventing SSRF attacks via Windows UNC paths.
Defense Layers
--------------
Layer 1 (this middleware): Catch-all for any route, including future routes
Layer 2 (route handlers): Defense-in-depth via build_safe_abspath() and
explicit is_unsafe_path_pattern() checks
Each layer has potential "holes" (ways it could fail):
- Middleware: Could be accidentally removed, misconfigured, or bypassed
- Route handlers: Developer could forget to add checks to new routes
By keeping both layers, an attack only succeeds if BOTH fail simultaneously.
See Also
--------
streamlit.path_security : Core path validation functions used by this middleware
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from starlette.responses import Response
from streamlit.path_security import is_unsafe_path_pattern
if TYPE_CHECKING:
from starlette.types import ASGIApp, Receive, Scope, Send
class PathSecurityMiddleware:
"""ASGI middleware that blocks requests with unsafe path patterns.
Implements Swiss Cheese defense - catches dangerous patterns even if
route handlers forget to validate paths. This prevents SSRF attacks
via Windows UNC paths and other path traversal vulnerabilities.
Parameters
----------
app
The ASGI application to wrap.
"""
def __init__(self, app: ASGIApp) -> None:
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
"""Process incoming requests and block unsafe paths.
Only validates HTTP requests; WebSocket and lifespan scopes are
passed through without validation since they don't serve file content.
"""
# Only validate HTTP requests (skip WebSocket, lifespan)
if scope["type"] != "http":
await self.app(scope, receive, send)
return
path = scope.get("path", "")
# SECURITY: Check for double-slash patterns BEFORE stripping slashes.
# UNC paths like "//server/share" would be normalized to "server/share"
# by lstrip("/"), making them look safe. We must reject these early.
if path.startswith(("//", "\\\\")):
response = Response(content="Bad Request", status_code=400)
await response(scope, receive, send)
return
# Strip leading slash to get the relative path for validation
relative_path = path.lstrip("/")
# Check if the path contains unsafe patterns
if relative_path and is_unsafe_path_pattern(relative_path):
response = Response(content="Bad Request", status_code=400)
await response(scope, receive, send)
return
await self.app(scope, receive, send)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_path_security_middleware.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/path_security_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the shared path security utilities."""
from __future__ import annotations
import pytest
from streamlit.path_security import is_unsafe_path_pattern
class TestIsUnsafePathPattern:
"""Tests for the is_unsafe_path_pattern function."""
@pytest.mark.parametrize(
("path", "expected_unsafe"),
[
# Safe paths
pytest.param("inside.txt", False, id="simple_filename_safe"),
pytest.param("subdir/file.js", False, id="subdir_forward_slash_safe"),
pytest.param("subdir\\file.js", False, id="subdir_backslash_safe"),
pytest.param("file..name.js", False, id="double_dots_in_filename_safe"),
pytest.param("..file.js", False, id="dots_prefix_filename_safe"),
pytest.param("file..", False, id="dots_suffix_filename_safe"),
pytest.param("", False, id="empty_string_safe"),
pytest.param(".", False, id="current_dir_safe"),
# UNC paths
pytest.param("\\\\server\\share", True, id="unc_backslash_unsafe"),
pytest.param("//server/share", True, id="unc_forward_unsafe"),
pytest.param("\\\\?\\C:\\Windows", True, id="extended_length_path"),
pytest.param("\\\\.\\device", True, id="device_namespace"),
# Absolute paths
pytest.param("/etc/passwd", True, id="absolute_posix_unsafe"),
pytest.param("\\rooted", True, id="rooted_backslash_unsafe"),
# Path traversal
pytest.param("../secret", True, id="traversal_parent_unsafe"),
pytest.param("dir/../secret", True, id="traversal_in_middle_unsafe"),
pytest.param("a/b/../c/../../../d", True, id="traversal_complex_unsafe"),
# Windows drive paths
pytest.param("C:\\file.txt", True, id="windows_drive_backslash"),
pytest.param("C:/file.txt", True, id="windows_drive_forward"),
pytest.param("D:\\path\\to\\file", True, id="windows_drive_d"),
pytest.param("c:/users/file.txt", True, id="windows_drive_lowercase"),
pytest.param("Z:foo", True, id="windows_drive_relative"),
pytest.param("C:Windows", True, id="windows_drive_relative_no_slash"),
# Null bytes
pytest.param("\x00", True, id="null_only"),
pytest.param("file\x00.txt", True, id="null_in_middle"),
pytest.param("\x00../secret", True, id="null_before_traversal"),
],
)
def test_pattern_detection(self, path: str, expected_unsafe: bool) -> None:
"""Validates is_unsafe_path_pattern correctly identifies dangerous patterns.
This is the shared core function used by both component_file_utils and
component_path_utils to ensure consistent security checks.
"""
assert is_unsafe_path_pattern(path) == expected_unsafe
def test_traversal_with_mixed_separators(self) -> None:
"""Path traversal using mixed separators should be detected."""
mixed_traversal_paths = [
"sub\\..\\..\\secret",
"sub/../..\\secret",
"sub\\../secret",
]
for path in mixed_traversal_paths:
assert is_unsafe_path_pattern(path), f"Expected {path!r} to be unsafe"
def test_safe_nested_paths(self) -> None:
"""Nested subdirectory paths should be safe."""
safe_paths = [
"sub/nested/deep.txt",
"a/b/c/d/e/f.js",
"components/my-component/index.js",
]
for path in safe_paths:
assert not is_unsafe_path_pattern(path), f"Expected {path!r} to be safe"
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/path_security_test.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_path_security_middleware_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for starlette_path_security_middleware module."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse
from starlette.routing import Route
from starlette.testclient import TestClient
from streamlit.web.server.starlette.starlette_path_security_middleware import (
PathSecurityMiddleware,
)
if TYPE_CHECKING:
from starlette.websockets import WebSocket
def _create_test_app() -> Starlette:
"""Create a test Starlette app with the PathSecurityMiddleware."""
async def echo_path(request):
return PlainTextResponse(f"Path: {request.url.path}")
app = Starlette(
routes=[
Route("/{path:path}", echo_path),
]
)
app.add_middleware(PathSecurityMiddleware)
return app
def _create_websocket_app() -> Starlette:
"""Create a test app with a WebSocket endpoint."""
from starlette.routing import WebSocketRoute
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
await websocket.send_text("connected")
await websocket.close()
app = Starlette(
routes=[
WebSocketRoute("/ws", websocket_endpoint),
]
)
app.add_middleware(PathSecurityMiddleware)
return app
class TestPathSecurityMiddleware:
"""Tests for PathSecurityMiddleware."""
@pytest.mark.parametrize(
("path", "expected_path"),
[
("/../../../etc/passwd", "/etc/passwd"),
("///attacker.com/share", "/attacker.com/share"),
],
ids=[
"forward-slash-traversal-normalized",
"multiple-forward-slashes-normalized",
],
)
def test_starlette_normalizes_paths(self, path: str, expected_path: str) -> None:
"""Test that Starlette normalizes certain path patterns before middleware.
These patterns are handled securely by the framework's path normalization,
so they reach the middleware as safe paths.
"""
app = _create_test_app()
client = TestClient(app)
response = client.get(path)
assert response.status_code == 200
assert f"Path: {expected_path}" in response.text
@pytest.mark.parametrize(
"unsafe_path",
[
"/..\\..\\etc\\passwd",
"/C:/Windows/system32",
"/D:/secrets",
"/%5c%5cattacker%5cshare", # \\attacker\share (URL-decoded by Starlette)
"/file%00.txt",
],
ids=[
"path-traversal-backslash",
"windows-drive-c",
"windows-drive-d",
"unc-backslash",
"null-byte",
],
)
def test_blocks_unsafe_paths(self, unsafe_path: str) -> None:
"""Test that unsafe path patterns are blocked with 400.
Note: Forward-slash path traversal (/../..) and multiple forward slashes
(///) are normalized by Starlette before reaching the middleware, which
is secure framework behavior. This test covers patterns that are NOT
normalized by the framework.
"""
app = _create_test_app()
client = TestClient(app)
response = client.get(unsafe_path)
assert response.status_code == 400
assert response.text == "Bad Request"
@pytest.mark.parametrize(
"safe_path",
[
"/",
"/index.html",
"/static/app.js",
"/component/my_component/index.html",
"/deeply/nested/path/to/file.css",
"/file-with-dots.min.js",
"/path.with.dots/file.txt",
"/file..js",
"/files/...hidden",
],
ids=[
"root",
"simple-file",
"static-dir",
"component-path",
"deeply-nested",
"dots-in-filename",
"dots-in-dirname",
"double-dots-in-filename",
"triple-dots-in-filename",
],
)
def test_allows_safe_paths(self, safe_path: str) -> None:
"""Test that safe path patterns are allowed."""
app = _create_test_app()
client = TestClient(app)
response = client.get(safe_path)
assert response.status_code == 200
assert f"Path: {safe_path}" in response.text
def test_websocket_connections_pass_through(self) -> None:
"""Test that WebSocket connections are not blocked by path validation."""
app = _create_websocket_app()
client = TestClient(app)
with client.websocket_connect("/ws") as websocket:
data = websocket.receive_text()
assert data == "connected"
class TestDoubleSlashBypass:
"""Tests for the double-slash UNC path bypass vulnerability.
This tests a specific attack vector where `//server/share` (a UNC path on Windows)
could bypass the middleware's path validation because lstrip("/") normalizes
away the leading slashes before the check, but the original path remains in scope.
Note: We test with raw ASGI scope rather than TestClient because TestClient
interprets `//host/path` as a URL with authority component (host), not as a
path starting with `//`. Raw ASGI scope tests the actual attack scenario.
"""
@pytest.mark.parametrize(
"unc_path",
[
"//attacker.com/share",
"//192.168.1.1/admin",
"//localhost/c$/Windows",
],
ids=[
"unc-domain",
"unc-ip-address",
"unc-localhost-admin-share",
],
)
@pytest.mark.anyio
async def test_double_slash_unc_paths_are_blocked(self, unc_path: str) -> None:
"""Test that double-slash UNC paths are blocked by the middleware.
The middleware must detect and block paths like `//server/share` which
are UNC paths on Windows. These should NOT pass through even though
`attacker.com/share` (after lstrip) looks like a safe relative path.
We use raw ASGI scope to simulate an attacker sending a malicious request
directly, bypassing URL parsing that would interpret // as authority.
"""
# Build the app with middleware
app = _create_test_app()
# Construct a raw ASGI scope with the malicious path
scope = {
"type": "http",
"method": "GET",
"path": unc_path,
"query_string": b"",
"headers": [],
"server": ("localhost", 8000),
"asgi": {"version": "3.0"},
}
response_status: int | None = None
response_body = b""
async def receive():
return {"type": "http.request", "body": b""}
async def send(message):
nonlocal response_status, response_body
if message["type"] == "http.response.start":
response_status = message["status"]
elif message["type"] == "http.response.body":
response_body += message.get("body", b"")
await app(scope, receive, send)
# These MUST be blocked - if they return 200, we have a security bypass
assert response_status == 400, (
f"UNC path {unc_path!r} was not blocked! "
"Double-slash paths should be rejected for SSRF protection."
)
assert response_body == b"Bad Request"
class TestMiddlewarePosition:
"""Tests to verify the middleware is positioned correctly in the stack."""
def test_middleware_is_first_in_streamlit_stack(self) -> None:
"""Test that PathSecurityMiddleware is the first middleware added."""
from starlette.middleware import Middleware
from streamlit.web.server.starlette.starlette_app import (
create_streamlit_middleware,
)
middleware_list = create_streamlit_middleware()
# PathSecurityMiddleware should be first
assert len(middleware_list) >= 1
first_middleware = middleware_list[0]
assert isinstance(first_middleware, Middleware)
assert first_middleware.cls is PathSecurityMiddleware
def test_middleware_runs_before_other_processing(self) -> None:
"""Test that unsafe paths are blocked before reaching session middleware."""
from starlette.middleware import Middleware
from starlette.middleware.sessions import SessionMiddleware
# Create app with both middlewares (path security first, then session)
async def echo_path(request):
# If we get here, path security didn't block us
return PlainTextResponse(f"Path: {request.url.path}")
app = Starlette(
routes=[Route("/{path:path}", echo_path)],
middleware=[
Middleware(PathSecurityMiddleware),
Middleware(SessionMiddleware, secret_key="test-secret"),
],
)
client = TestClient(app)
# Safe path should work
response = client.get("/safe/path")
assert response.status_code == 200
# Unsafe path (backslash traversal - not normalized by Starlette)
# should be blocked before session processing
response = client.get("/..\\..\\etc\\passwd")
assert response.status_code == 400
def test_middleware_protects_routes_without_explicit_validation(self) -> None:
"""Test that middleware blocks unsafe paths even when handler doesn't validate.
This verifies the Swiss Cheese defense model: the middleware acts as a
catch-all safety net for routes that forget to call is_unsafe_path_pattern().
"""
# Track whether the handler was called
handler_called = False
async def naive_handler(request):
"""A deliberately vulnerable handler that does NOT validate the path.
In production, this would be a security vulnerability without middleware.
"""
nonlocal handler_called
handler_called = True
path = request.path_params.get("path", "")
return PlainTextResponse(f"Received: {path}")
app = Starlette(
routes=[Route("/vulnerable/{path:path}", naive_handler)],
)
app.add_middleware(PathSecurityMiddleware)
client = TestClient(app)
# Safe path should reach the handler
handler_called = False
response = client.get("/vulnerable/safe/file.txt")
assert response.status_code == 200
assert handler_called is True
# Unsafe path should be blocked by middleware BEFORE reaching handler
handler_called = False
response = client.get("/vulnerable/..\\..\\etc\\passwd")
assert response.status_code == 400
assert response.text == "Bad Request"
assert handler_called is False # Key assertion: handler was never called
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_path_security_middleware_test.py",
"license": "Apache License 2.0",
"lines": 270,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/elements/widgets/feedback.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Final,
Literal,
cast,
overload,
)
from streamlit.elements.lib.form_utils import current_form_id
from streamlit.elements.lib.layout_utils import (
LayoutConfig,
Width,
validate_width,
)
from streamlit.elements.lib.policies import check_widget_policies
from streamlit.elements.lib.utils import (
Key,
compute_and_register_element_id,
save_for_app_testing,
to_key,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Feedback_pb2 import Feedback as FeedbackProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner_utils.script_run_context import get_script_run_ctx
from streamlit.runtime.state import register_widget
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
)
# Number of options for each feedback type
_NUM_THUMBS_OPTIONS: Final = 2
_NUM_FACES_OPTIONS: Final = 5
_NUM_STARS_OPTIONS: Final = 5
def _get_num_options(feedback_type: Literal["thumbs", "faces", "stars"]) -> int:
"""Get the number of options for the given feedback type."""
if feedback_type == "thumbs":
return _NUM_THUMBS_OPTIONS
if feedback_type == "faces":
return _NUM_FACES_OPTIONS
return _NUM_STARS_OPTIONS
def _feedback_type_to_proto(
feedback_type: Literal["thumbs", "faces", "stars"],
) -> FeedbackProto.FeedbackType.ValueType:
"""Convert a feedback type string to the proto enum value."""
if feedback_type == "thumbs":
return FeedbackProto.FeedbackType.THUMBS
if feedback_type == "faces":
return FeedbackProto.FeedbackType.FACES
return FeedbackProto.FeedbackType.STARS
class FeedbackSerde:
"""Serializer/deserializer for feedback widget values.
Uses string as the wire format to distinguish three states:
- None (field not set): No UI interaction yet -> use default_value
- "" (empty string): User explicitly cleared -> return None
- "2" (string with value): User selected -> return int value
This allows clearing to work correctly even when a default is set.
The session state and return values are always int | None.
"""
def __init__(self, default_value: int | None = None):
self.default_value = default_value
def serialize(self, value: int | None) -> str:
"""Serialize int value to string for wire format."""
return "" if value is None else str(value)
def deserialize(self, ui_value: str | None) -> int | None:
"""Deserialize string wire format back to int value."""
if ui_value is None:
return self.default_value # No UI interaction yet
if ui_value == "":
return None # User explicitly cleared
return int(ui_value) # User selected a value
class FeedbackMixin:
@overload
def feedback(
self,
options: Literal["thumbs"] = ...,
*,
key: Key | None = None,
default: int | None = None,
disabled: bool = False,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
width: Width = "content",
) -> Literal[0, 1] | None: ...
@overload
def feedback(
self,
options: Literal["faces", "stars"] = ...,
*,
key: Key | None = None,
default: int | None = None,
disabled: bool = False,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
width: Width = "content",
) -> Literal[0, 1, 2, 3, 4] | None: ...
@gather_metrics("feedback")
def feedback(
self,
options: Literal["thumbs", "faces", "stars"] = "thumbs",
*,
key: Key | None = None,
default: int | None = None,
disabled: bool = False,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
width: Width = "content",
) -> int | None:
"""Display a feedback widget.
A feedback widget is an icon-based button group available in three
styles, as described in ``options``. It is commonly used in chat and AI
apps to allow users to rate responses.
Parameters
----------
options : "thumbs", "faces", or "stars"
The feedback options displayed to the user. ``options`` can be one
of the following:
- ``"thumbs"`` (default): Streamlit displays a thumb-up and
thumb-down button group.
- ``"faces"``: Streamlit displays a row of five buttons with
facial expressions depicting increasing satisfaction from left to
right.
- ``"stars"``: Streamlit displays a row of star icons, allowing the
user to select a rating from one to five stars.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
default : int or None
Default feedback value. This must be consistent with the feedback
type in ``options``:
- 0 or 1 if ``options="thumbs"``.
- Between 0 and 4, inclusive, if ``options="faces"`` or
``options="stars"``.
disabled : bool
An optional boolean that disables the feedback widget if set
to ``True``. The default is ``False``.
on_change : callable
An optional callback invoked when this feedback widget's value
changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
width : "content", "stretch", or int
The width of the feedback widget. This can be one of the following:
- ``"content"`` (default): The width of the widget matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the widget matches the width of the
parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
int or None
An integer indicating the user's selection, where ``0`` is the
lowest feedback. Higher values indicate more positive feedback.
If no option was selected, the widget returns ``None``.
- For ``options="thumbs"``, a return value of ``0`` indicates
thumbs-down, and ``1`` indicates thumbs-up.
- For ``options="faces"`` and ``options="stars"``, return values
range from ``0`` (least satisfied) to ``4`` (most satisfied).
Examples
--------
Display a feedback widget with stars, and show the selected sentiment:
>>> import streamlit as st
>>>
>>> sentiment_mapping = ["one", "two", "three", "four", "five"]
>>> selected = st.feedback("stars")
>>> if selected is not None:
>>> st.markdown(f"You selected {sentiment_mapping[selected]} star(s).")
.. output::
https://doc-feedback-stars.streamlit.app/
height: 200px
Display a feedback widget with thumbs, and show the selected sentiment:
>>> import streamlit as st
>>>
>>> sentiment_mapping = [":material/thumb_down:", ":material/thumb_up:"]
>>> selected = st.feedback("thumbs")
>>> if selected is not None:
>>> st.markdown(f"You selected: {sentiment_mapping[selected]}")
.. output::
https://doc-feedback-thumbs.streamlit.app/
height: 200px
"""
if options not in {"thumbs", "faces", "stars"}:
raise StreamlitAPIException(
"The options argument to st.feedback must be one of "
"['thumbs', 'faces', 'stars']. "
f"The argument passed was '{options}'."
)
num_options = _get_num_options(options)
if default is not None and (default < 0 or default >= num_options):
raise StreamlitAPIException(
f"The default value in '{options}' must be a number between 0 and {num_options - 1}."
f" The passed default value is {default}"
)
key = to_key(key)
validate_width(width, allow_content=True)
layout_config = LayoutConfig(width=width)
check_widget_policies(self.dg, key, on_change, default_value=default)
ctx = get_script_run_ctx()
form_id = current_form_id(self.dg)
element_id = compute_and_register_element_id(
"feedback",
user_key=key,
key_as_main_identity={"options"},
dg=self.dg,
options=options,
default=default,
width=width,
)
# Build the proto
proto = FeedbackProto()
proto.id = element_id
proto.type = _feedback_type_to_proto(options)
proto.disabled = disabled
proto.form_id = form_id
if default is not None:
proto.default = default
serde = FeedbackSerde(default_value=default)
widget_state = register_widget(
proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="string_value",
)
if widget_state.value_changed:
if widget_state.value is not None:
proto.value = widget_state.value
proto.set_value = True
if ctx:
save_for_app_testing(ctx, element_id, None)
self.dg._enqueue("feedback", proto, layout_config=layout_config)
return widget_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/elements/widgets/feedback.py",
"license": "Apache License 2.0",
"lines": 265,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/elements/feedback_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""st.feedback unit tests."""
from __future__ import annotations
from typing import Literal
from unittest.mock import MagicMock, patch
import pytest
from parameterized import parameterized
import streamlit as st
from streamlit.elements.widgets.feedback import FeedbackSerde
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Feedback_pb2 import Feedback as FeedbackProto
from streamlit.runtime.state.session_state import get_script_run_ctx
from tests.delta_generator_test_case import DeltaGeneratorTestCase
from tests.streamlit.elements.layout_test_utils import WidthConfigFields
class TestFeedbackSerde:
"""Tests for the FeedbackSerde serializer/deserializer.
The serde uses string as wire format to distinguish three states:
- None (field not set): No UI interaction -> use default
- "" (empty string): User cleared -> return None
- "2" (string): User selected -> return int
"""
def test_serialize_value_to_string(self):
"""Test that int value is serialized to string."""
serde = FeedbackSerde()
assert serde.serialize(3) == "3"
def test_serialize_none_to_empty_string(self):
"""Test that None is serialized to empty string (cleared state)."""
serde = FeedbackSerde()
assert serde.serialize(None) == ""
def test_deserialize_string_value_to_int(self):
"""Test that string value is deserialized to int."""
serde = FeedbackSerde()
assert serde.deserialize("3") == 3
def test_deserialize_none_returns_default(self):
"""Test that None (no UI interaction) returns default value."""
serde = FeedbackSerde(default_value=2)
assert serde.deserialize(None) == 2
def test_deserialize_none_without_default(self):
"""Test that None returns None when no default is set."""
serde = FeedbackSerde()
assert serde.deserialize(None) is None
def test_deserialize_empty_string_returns_none(self):
"""Test that empty string (cleared state) returns None, even with default."""
serde = FeedbackSerde(default_value=2)
# This is the key behavior: empty string means "user cleared"
# and should return None, not the default
assert serde.deserialize("") is None
def test_clearing_with_default_set(self):
"""Regression test: clearing should work even when default is set.
When a user has default=2 and clicks to deselect, the widget should
return None, not revert to the default.
"""
serde = FeedbackSerde(default_value=2)
# Initial state: no interaction -> returns default
assert serde.deserialize(None) == 2
# User selects option 1
assert serde.deserialize("1") == 1
# User clears (clicks selected option again) -> returns None, NOT default
assert serde.deserialize("") is None
assert serde.deserialize("") != 2 # Explicitly verify it's not the default
class TestFeedbackCommand(DeltaGeneratorTestCase):
"""Tests for the st.feedback command."""
@parameterized.expand(
[
("thumbs", FeedbackProto.FeedbackType.THUMBS),
("faces", FeedbackProto.FeedbackType.FACES),
("stars", FeedbackProto.FeedbackType.STARS),
]
)
def test_feedback_type_options(
self,
option: Literal["thumbs", "faces", "stars"],
expected_type: FeedbackProto.FeedbackType.ValueType,
):
"""Test that each feedback type option is correctly converted to proto."""
st.feedback(option)
delta = self.get_delta_from_queue().new_element.feedback
assert delta.type == expected_type
def test_invalid_option_literal(self):
"""Test that invalid option raises StreamlitAPIException."""
with pytest.raises(StreamlitAPIException) as e:
st.feedback("foo")
assert str(e.value) == (
"The options argument to st.feedback must be one of "
"['thumbs', 'faces', 'stars']. The argument passed was 'foo'."
)
@parameterized.expand([(0,), (1,)])
def test_widget_state_changed_via_session_state(self, session_state_index: int):
"""Test that widget state can be set via session_state."""
st.session_state.feedback_key = session_state_index
val = st.feedback("thumbs", key="feedback_key")
assert val == session_state_index
def test_default_value_thumbs(self):
"""Test that default value is set correctly for thumbs."""
val = st.feedback("thumbs", default=1)
assert val == 1
delta = self.get_delta_from_queue().new_element.feedback
assert delta.default == 1
def test_default_value_faces(self):
"""Test that default value is set correctly for faces."""
val = st.feedback("faces", default=3)
assert val == 3
delta = self.get_delta_from_queue().new_element.feedback
assert delta.default == 3
def test_default_value_stars(self):
"""Test that default value is set correctly for stars."""
val = st.feedback("stars", default=2)
assert val == 2
delta = self.get_delta_from_queue().new_element.feedback
assert delta.default == 2
def test_no_default_returns_none(self):
"""Test that widget returns None when no default is set."""
val = st.feedback("thumbs")
assert val is None
def test_invalid_default_for_thumbs(self):
"""Test that invalid default for thumbs raises exception."""
with pytest.raises(StreamlitAPIException) as e:
st.feedback("thumbs", default=2)
assert "must be a number between 0 and 1" in str(e.value)
def test_invalid_default_for_faces(self):
"""Test that invalid default for faces raises exception."""
with pytest.raises(StreamlitAPIException) as e:
st.feedback("faces", default=5)
assert "must be a number between 0 and 4" in str(e.value)
def test_invalid_default_for_stars(self):
"""Test that invalid default for stars raises exception."""
with pytest.raises(StreamlitAPIException) as e:
st.feedback("stars", default=-1)
assert "must be a number between 0 and 4" in str(e.value)
def test_disabled_state(self):
"""Test that disabled state is set correctly."""
st.feedback("thumbs", disabled=True)
delta = self.get_delta_from_queue().new_element.feedback
assert delta.disabled is True
def test_enabled_state(self):
"""Test that enabled state is the default."""
st.feedback("thumbs")
delta = self.get_delta_from_queue().new_element.feedback
assert delta.disabled is False
@parameterized.expand([("string_key",), (0,), (None,)])
def test_key_types(self, key: str | int | None):
"""Test that different key types are handled correctly."""
st.feedback("thumbs", key=key)
delta = self.get_delta_from_queue().new_element.feedback
assert delta.id.endswith(f"-{key}")
def test_on_change_callback_registered(self):
"""Test that on_change callback is registered."""
st.feedback("thumbs", on_change=lambda: None)
ctx = get_script_run_ctx()
assert ctx is not None
session_state = ctx.session_state._state
widget_id = session_state.get_widget_states()[0].id
metadata = session_state._new_widget_state.widget_metadata.get(widget_id)
assert metadata is not None
assert metadata.callback is not None
def test_outside_form(self):
"""Test that form_id is empty outside of a form."""
st.feedback("thumbs")
proto = self.get_delta_from_queue().new_element.feedback
assert proto.form_id == ""
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form(self):
"""Test that form_id is set correctly inside of a form."""
with st.form("form"):
st.feedback("thumbs")
# 2 elements: form block, widget
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
proto = self.get_delta_from_queue(1).new_element.feedback
assert proto.form_id == form_proto.form.form_id
def test_inside_column(self):
"""Test that st.feedback works correctly inside of a column."""
col1, _ = st.columns(2)
with col1:
st.feedback("thumbs")
# 4 elements: 1 horizontal block, 2 columns, 1 widget
all_deltas = self.get_all_deltas_from_queue()
assert len(all_deltas) == 4
proto = self.get_delta_from_queue().new_element.feedback
assert proto.type == FeedbackProto.FeedbackType.THUMBS
class TestFeedbackWidthConfig(DeltaGeneratorTestCase):
"""Tests for st.feedback width configuration."""
def test_default_width_is_content(self):
"""Test that default width is content."""
st.feedback("thumbs")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_stretch_width(self):
"""Test that stretch width is set correctly."""
st.feedback("thumbs", width="stretch")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
def test_pixel_width(self):
"""Test that pixel width is set correctly."""
st.feedback("thumbs", width=100)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == 100
class TestFeedbackStableId(DeltaGeneratorTestCase):
"""Tests for st.feedback widget ID stability."""
def test_stable_id_with_key(self):
"""Test that the widget ID is stable when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render
st.feedback(
key="feedback_key",
disabled=False,
width="content",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
default=0,
options="thumbs",
)
proto1 = self.get_delta_from_queue().new_element.feedback
id1 = proto1.id
# Second render with different non-whitelisted params
st.feedback(
key="feedback_key",
disabled=True,
width="stretch",
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
default=1,
options="thumbs",
)
proto2 = self.get_delta_from_queue().new_element.feedback
id2 = proto2.id
assert id1 == id2
def test_id_changes_with_different_options(self):
"""Test that the widget ID changes when options change."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
st.feedback("thumbs", key="feedback_key_1")
proto1 = self.get_delta_from_queue().new_element.feedback
id1 = proto1.id
st.feedback("faces", key="feedback_key_1")
proto2 = self.get_delta_from_queue().new_element.feedback
id2 = proto2.id
assert id1 != id2
def test_different_feedback_types_have_different_ids(self):
"""Test that different feedback types produce different IDs without key."""
st.feedback("thumbs", key="thumbs_id")
proto_thumbs = self.get_delta_from_queue().new_element.feedback
st.feedback("faces", key="faces_id")
proto_faces = self.get_delta_from_queue().new_element.feedback
st.feedback("stars", key="stars_id")
proto_stars = self.get_delta_from_queue().new_element.feedback
assert proto_thumbs.id != proto_faces.id
assert proto_faces.id != proto_stars.id
assert proto_thumbs.id != proto_stars.id
class TestFeedbackDuplicateId(DeltaGeneratorTestCase):
"""Tests for st.feedback duplicate ID error messages."""
def test_duplicate_element_id_error_message(self):
"""Test that duplicate widget ID produces helpful error message."""
with pytest.raises(StreamlitAPIException) as exception:
st.feedback("thumbs")
st.feedback("thumbs")
# Make sure the correct name is used in the error message
assert "feedback" in str(exception.value)
# AppTest-based integration tests
def test_apptest_feedback_clearing_with_default():
"""Test that feedback can be cleared even when a default is set.
Regression test: Previously, clearing a feedback widget with a default
would revert to the default value instead of returning None.
"""
from streamlit.testing.v1 import AppTest
def script():
import streamlit as st
result = st.feedback("thumbs", default=1, key="test_feedback")
st.write(f"Result: {result}")
at = AppTest.from_function(script).run()
feedback = at.feedback[0]
# Initial state: default is selected
assert feedback.value == 1
# Select a different option
at = feedback.set_value(0).run()
feedback = at.feedback[0]
assert feedback.value == 0
# Clear the selection (set to None)
at = feedback.set_value(None).run()
feedback = at.feedback[0]
# Key assertion: value should be None, NOT revert to default (1)
assert feedback.value is None
def test_apptest_feedback_no_default_clearing():
"""Test that feedback without default can be set and cleared."""
from streamlit.testing.v1 import AppTest
def script():
import streamlit as st
result = st.feedback("stars", key="test_feedback")
st.write(f"Result: {result}")
at = AppTest.from_function(script).run()
feedback = at.feedback[0]
# Initial state: no selection
assert feedback.value is None
# Select option 3
at = feedback.set_value(3).run()
feedback = at.feedback[0]
assert feedback.value == 3
# Clear the selection
at = feedback.set_value(None).run()
feedback = at.feedback[0]
assert feedback.value is None
def test_apptest_feedback_value_retained_on_rerun():
"""Test that feedback value is retained across reruns."""
from streamlit.testing.v1 import AppTest
def script():
import streamlit as st
st.feedback("faces", key="test_feedback")
st.button("Rerun")
at = AppTest.from_function(script).run()
# Set a value
at = at.feedback[0].set_value(2).run()
assert at.feedback[0].value == 2
# Trigger a rerun via button click
at = at.button[0].click().run()
# Value should be retained
assert at.feedback[0].value == 2
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/elements/feedback_test.py",
"license": "Apache License 2.0",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/st_chart_builtin_colors.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test app for built-in color name support in charts.
This app tests that all built-in color names (red, orange, yellow, green,
blue, violet, gray, primary) are correctly resolved to theme color values.
"""
import numpy as np
import pandas as pd
import streamlit as st
np.random.seed(42)
# Create sample data for multi-series charts
df = pd.DataFrame(
{
"x": range(10),
"series1": np.random.randn(10).cumsum(),
"series2": np.random.randn(10).cumsum(),
}
)
# Layout all charts in columns so they fit on screen
col1, col2 = st.columns(2)
with col1:
st.write('**Line: `color=["red", "orange"]`**')
st.line_chart(df, x="x", y=["series1", "series2"], color=["red", "orange"])
st.write('**Bar: `color=["yellow", "green"]`**')
st.bar_chart(df, x="x", y=["series1", "series2"], color=["yellow", "green"])
with col2:
st.write('**Area: `color=["blue", "violet"]`**')
st.area_chart(df, x="x", y=["series1", "series2"], color=["blue", "violet"])
st.write('**Scatter: `color=["gray", "primary"]`**')
st.scatter_chart(df, x="x", y=["series1", "series2"], color=["gray", "primary"])
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_chart_builtin_colors.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/st_chart_builtin_colors_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E tests for built-in color name support in charts.
Tests that all built-in color names (red, orange, yellow, green, blue,
violet, gray, primary) are correctly resolved to custom theme color values.
"""
import os
import pytest
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import ImageCompareFunction
from e2e_playwright.shared.app_utils import expect_no_skeletons
@pytest.fixture(scope="module")
@pytest.mark.early
def configure_custom_theme_colors():
"""Configure a dark theme with pale/pastel colors for testing builtin color name resolution.
Colors are chosen to be obviously different from the default dark theme colors
(which are vibrant/saturated) by using pale/pastel versions instead.
"""
# Dark theme base
os.environ["STREAMLIT_THEME_BASE"] = "dark"
os.environ["STREAMLIT_THEME_RED_COLOR"] = "#ffb3b3" # Pale pink-red
os.environ["STREAMLIT_THEME_ORANGE_COLOR"] = "#ffd9b3" # Pale peach
os.environ["STREAMLIT_THEME_YELLOW_COLOR"] = "#ffffb3" # Pale cream
os.environ["STREAMLIT_THEME_GREEN_COLOR"] = "#b3ffb3" # Pale mint
os.environ["STREAMLIT_THEME_BLUE_COLOR"] = "#b3d9ff" # Pale sky blue
os.environ["STREAMLIT_THEME_VIOLET_COLOR"] = "#d9b3ff" # Pale lavender
os.environ["STREAMLIT_THEME_GRAY_COLOR"] = "#d9d9d9" # Light silver
os.environ["STREAMLIT_THEME_PRIMARY_COLOR"] = "#ffb3d9" # Pale pink
yield
del os.environ["STREAMLIT_THEME_BASE"]
del os.environ["STREAMLIT_THEME_RED_COLOR"]
del os.environ["STREAMLIT_THEME_ORANGE_COLOR"]
del os.environ["STREAMLIT_THEME_YELLOW_COLOR"]
del os.environ["STREAMLIT_THEME_GREEN_COLOR"]
del os.environ["STREAMLIT_THEME_BLUE_COLOR"]
del os.environ["STREAMLIT_THEME_VIOLET_COLOR"]
del os.environ["STREAMLIT_THEME_GRAY_COLOR"]
del os.environ["STREAMLIT_THEME_PRIMARY_COLOR"]
@pytest.mark.usefixtures("configure_custom_theme_colors")
def test_builtin_colors_with_custom_theme(
app: Page, assert_snapshot: ImageCompareFunction
):
"""Test that all built-in color names resolve to custom theme colors.
This test configures a dark theme with pale/pastel colors, then verifies all
4 chart types render with those custom colors via a snapshot. The test
covers all builtin color names: red, orange, yellow, green, blue, violet,
gray, and primary.
"""
# Set larger viewport to show all charts
app.set_viewport_size({"width": 1280, "height": 800})
# Wait for all elements to render
expect_no_skeletons(app, timeout=25000)
# Wait for all 4 charts to render (line, bar, area, scatter)
chart_elements = app.get_by_test_id("stVegaLiteChart")
expect(chart_elements).to_have_count(4)
# Ensure all charts have rendered their Vega graphics
expect(chart_elements.locator("[role='graphics-document']")).to_have_count(4)
# Take a single snapshot of all charts
assert_snapshot(app, name="st_chart_builtin_colors-custom_theme")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_chart_builtin_colors_test.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:scripts/verify_version.py | #!/usr/bin/env python
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify that the git tag matches the package version.
This script replaces the VerifyVersionCommand in setup.py for use with
pyproject.toml-based builds.
Usage:
TAG=1.53.0 python scripts/verify_version.py
# or
python scripts/verify_version.py --tag 1.53.0
"""
from __future__ import annotations
import argparse
import os
import sys
from pathlib import Path
# tomllib is available in Python 3.11+, use tomli as fallback for Python 3.10
try:
import tomllib
except ImportError:
import tomli as tomllib
def get_package_version() -> str:
"""Read the version from lib/pyproject.toml."""
pyproject_path = Path(__file__).parent.parent / "lib" / "pyproject.toml"
with open(pyproject_path, "rb") as f:
pyproject = tomllib.load(f)
version: str = pyproject["project"]["version"]
return version
def main() -> None:
"""Verify git tag matches pyproject.toml version."""
parser = argparse.ArgumentParser(
description="Verify that the git tag matches the package version"
)
parser.add_argument(
"--tag",
help="The git tag to verify (defaults to TAG environment variable)",
)
args = parser.parse_args()
# Get tag from argument or environment variable
tag = args.tag or os.getenv("TAG")
if not tag:
sys.exit(
"Error: No tag provided. Use --tag argument or set TAG environment variable."
)
version = get_package_version()
if tag != version:
sys.exit(f"Error: Git tag '{tag}' does not match package version '{version}'")
print(f"Version verified: {version}")
if __name__ == "__main__":
main()
| {
"repo_id": "streamlit/streamlit",
"file_path": "scripts/verify_version.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:scripts/get_changed_files.py | #!/usr/bin/env python
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get changed files by category for use in Makefile targets.
Usage:
python scripts/get_changed_files.py --python
python scripts/get_changed_files.py --python-tests
python scripts/get_changed_files.py --frontend
python scripts/get_changed_files.py --frontend-tests
python scripts/get_changed_files.py --e2e
# Include committed changes compared to base branch (for PR-like comparison):
python scripts/get_changed_files.py --all --base-branch
python scripts/get_changed_files.py --all --base-branch main
Output is space-separated file paths, suitable for passing to commands.
Note: Files with spaces in their paths are not supported and will be ignored.
This is a limitation of the space-separated output format used for shell consumption.
"""
from __future__ import annotations
import argparse
import re
import subprocess
import sys
from pathlib import Path
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
REPO_ROOT = Path(__file__).parent.parent.resolve()
# Default base branch for comparison
DEFAULT_BASE_BRANCH = "develop"
# Directory prefixes
LIB_SOURCE_PREFIX = "lib/streamlit/"
LIB_TESTS_PREFIX = "lib/tests/streamlit/"
FRONTEND_PREFIX = "frontend/"
E2E_PREFIX = "e2e_playwright/"
# Paths to exclude from checks
EXCLUDED_PATHS = ("/vendor/", "lib/streamlit/proto/")
# File extension patterns
PYTHON_EXTENSIONS = r"\.(py|pyi)$"
FRONTEND_EXTENSIONS = r"\.(ts|tsx|js|jsx)$"
# Test file patterns
PYTHON_TEST_SUFFIX = "_test.py"
FRONTEND_TEST_PATTERN = r"\.test\.(ts|tsx)$"
# ---------------------------------------------------------------------------
# Helper functions
# ---------------------------------------------------------------------------
def _is_excluded(path: str) -> bool:
"""Check if path should be excluded from checks.
Excludes:
- Paths in EXCLUDED_PATHS (vendor, proto)
- Paths containing spaces (not supported due to space-separated output format)
"""
if " " in path:
return True
return any(excluded in path for excluded in EXCLUDED_PATHS)
def get_changed_files(base_branch: str | None = None) -> list[str]:
"""Get all changed files (staged, unstaged, and untracked, excluding deleted).
Args:
base_branch: If provided, also include files changed between this branch
and HEAD (useful for getting all changes in a PR).
Note:
Files with spaces in their paths are excluded (not supported due to
space-separated output format).
"""
files: set[str] = set()
# Get modified files (staged + unstaged) compared to HEAD
result = subprocess.run(
["git", "diff", "--name-only", "HEAD", "--diff-filter=d"],
capture_output=True,
text=True,
cwd=REPO_ROOT,
check=False,
)
if result.returncode == 0:
files.update(f.strip() for f in result.stdout.strip().split("\n") if f.strip())
# Get untracked files (new files not yet added to git)
result = subprocess.run(
["git", "ls-files", "--others", "--exclude-standard"],
capture_output=True,
text=True,
cwd=REPO_ROOT,
check=False,
)
if result.returncode == 0:
files.update(f.strip() for f in result.stdout.strip().split("\n") if f.strip())
# If base_branch is provided, also get committed changes since diverging from base
if base_branch:
# Use merge-base to find common ancestor, then diff from there to HEAD
result = subprocess.run(
["git", "merge-base", base_branch, "HEAD"],
capture_output=True,
text=True,
cwd=REPO_ROOT,
check=False,
)
if result.returncode == 0:
merge_base = result.stdout.strip()
result = subprocess.run(
["git", "diff", "--name-only", merge_base, "HEAD", "--diff-filter=d"],
capture_output=True,
text=True,
cwd=REPO_ROOT,
check=False,
)
if result.returncode == 0:
files.update(
f.strip() for f in result.stdout.strip().split("\n") if f.strip()
)
# Filter out files with spaces (not supported due to space-separated output)
return sorted(f for f in files if " " not in f)
def get_python_files(files: list[str]) -> list[str]:
"""Get Python files (excluding vendor, proto, e2e tests)."""
result = []
for f in files:
if _is_excluded(f):
continue
# Skip e2e tests - they're handled separately by --e2e
if f.startswith(E2E_PREFIX):
continue
if re.search(PYTHON_EXTENSIONS, f):
result.append(f)
return result
def get_python_test_files(files: list[str]) -> list[str]:
"""Get Python unit test files from lib/tests/, including mapped tests from changed source files.
Note: E2E tests (e2e_playwright/) are excluded - use --e2e for those.
"""
test_files = set()
for f in files:
if _is_excluded(f):
continue
if not f.endswith(".py"):
continue
# Skip e2e tests - they're handled separately by --e2e
if f.startswith(E2E_PREFIX):
continue
# Direct test file (must be in lib/ directory)
if f.endswith(PYTHON_TEST_SUFFIX) and f.startswith("lib/"):
test_files.add(f)
continue
# Map source file to test file: lib/streamlit/foo.py -> lib/tests/streamlit/foo_test.py
if f.startswith(LIB_SOURCE_PREFIX):
test_file = f.replace(LIB_SOURCE_PREFIX, LIB_TESTS_PREFIX)
test_file = re.sub(r"\.py$", PYTHON_TEST_SUFFIX, test_file)
if (REPO_ROOT / test_file).exists():
test_files.add(test_file)
return sorted(test_files)
def get_frontend_files(files: list[str]) -> list[str]:
"""Get frontend files (excluding vendor)."""
result = []
for f in files:
if _is_excluded(f):
continue
if f.startswith(FRONTEND_PREFIX) and re.search(FRONTEND_EXTENSIONS, f):
result.append(f)
return result
def get_frontend_test_files(files: list[str]) -> list[str]:
"""Get frontend test files, including mapped tests from changed source files.
Maps source files to test files in the same directory:
- Component.tsx -> Component.test.tsx
- utils.ts -> utils.test.ts
"""
test_files: set[str] = set()
for f in files:
if _is_excluded(f):
continue
if not f.startswith(FRONTEND_PREFIX):
continue
if not re.search(FRONTEND_EXTENSIONS, f):
continue
# Direct test file
if re.search(FRONTEND_TEST_PATTERN, f):
test_files.add(f)
continue
# Map source file to test file: Component.tsx -> Component.test.tsx
# Extract base name and extension
match = re.search(r"^(.+)\.(tsx?|jsx?)$", f)
if match:
base = match.group(1)
ext = match.group(2)
test_file = f"{base}.test.{ext}"
if (REPO_ROOT / test_file).exists():
test_files.add(test_file)
return sorted(test_files)
def get_e2e_files(files: list[str]) -> list[str]:
"""Get e2e test files."""
result = []
for f in files:
if f.startswith(E2E_PREFIX) and f.endswith(PYTHON_TEST_SUFFIX):
result.append(f)
return result
def main() -> int:
parser = argparse.ArgumentParser(
description="Get changed files by category",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
"--python",
action="store_true",
help="Python files (source and tests, excludes e2e)",
)
parser.add_argument(
"--python-tests",
action="store_true",
help="Python test files (includes mapped tests from source)",
)
parser.add_argument(
"--frontend",
action="store_true",
help="Frontend files (source and tests)",
)
parser.add_argument(
"--frontend-tests",
action="store_true",
help="Frontend test files",
)
parser.add_argument(
"--e2e",
action="store_true",
help="E2E test files",
)
parser.add_argument(
"--all",
action="store_true",
help="All changed files (for display)",
)
parser.add_argument(
"--strip-prefix",
type=str,
metavar="PREFIX",
help="Strip prefix from output paths (e.g., 'frontend/' or 'lib/')",
)
parser.add_argument(
"--base-branch",
type=str,
metavar="BRANCH",
nargs="?",
const=DEFAULT_BASE_BRANCH,
help=(
f"Compare against base branch to include committed changes "
f"(default: {DEFAULT_BASE_BRANCH} if flag is used without value)"
),
)
args = parser.parse_args()
# Require at least one category
if not any(
[
args.python,
args.python_tests,
args.frontend,
args.frontend_tests,
args.e2e,
args.all,
]
):
parser.error("At least one category flag is required")
changed_files = get_changed_files(base_branch=args.base_branch)
if not changed_files:
return 0
result_files: list[str] = []
if args.all:
result_files = changed_files
else:
if args.python:
result_files.extend(get_python_files(changed_files))
if args.python_tests:
result_files.extend(get_python_test_files(changed_files))
if args.frontend:
result_files.extend(get_frontend_files(changed_files))
if args.frontend_tests:
result_files.extend(get_frontend_test_files(changed_files))
if args.e2e:
result_files.extend(get_e2e_files(changed_files))
# Remove duplicates and sort
result_files = sorted(set(result_files))
# Strip prefix if requested
if args.strip_prefix:
result_files = [f.removeprefix(args.strip_prefix) for f in result_files]
# Output space-separated for shell consumption
if result_files:
print(" ".join(result_files))
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "streamlit/streamlit",
"file_path": "scripts/get_changed_files.py",
"license": "Apache License 2.0",
"lines": 294,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/asgi_app.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E test for st.App running the mega_tester_app.py script."""
from streamlit.starlette import App
# Create the ASGI app pointing to the mega_tester_app.py script
app = App("mega_tester_app.py")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/asgi_app.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/asgi_app_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E tests for st.App running the mega_tester_app.py script via streamlit run.
This test verifies that st.App works correctly when run with `streamlit run`
and can successfully execute a complex Streamlit script without console errors.
"""
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from playwright.sync_api import expect
from e2e_playwright.shared.app_utils import expect_no_skeletons
if TYPE_CHECKING:
from playwright.sync_api import ConsoleMessage, Page
def is_expected_error(msg: ConsoleMessage, browser_name: str) -> bool:
"""Check if a console error is expected and should be ignored."""
# Mapbox error is expected and should be ignored:
if (
msg.text == "Failed to load resource: net::ERR_CONNECTION_REFUSED"
and "events.mapbox.com" in msg.location["url"]
):
return True
# There is an expected error with pydeck and firefox related to WebGL rendering
# This seems to be an issue with firefox used with playwright:
return bool(
re.search(r"deck:.*is null undefined", msg.text) and browser_name == "firefox"
)
def test_no_console_errors(app: Page, browser_name: str):
"""Test that st.App running mega_tester_app does not log any console errors."""
console_errors: list[dict[str, str | int]] = []
def on_console_message(msg: ConsoleMessage) -> None:
# Possible message types: "log", "debug", "info", "error", "warning", ...
if msg.type == "error" and not is_expected_error(msg, browser_name):
console_errors.append(
{
"message": msg.text,
"url": msg.location["url"],
"line": msg.location["lineNumber"],
"column": msg.location["columnNumber"],
}
)
app.on("console", on_console_message)
# Make sure that all elements are rendered and no skeletons are shown:
expect_no_skeletons(app, timeout=25000)
# There should be only one exception in the app (the st.exception demo):
expect(app.get_by_test_id("stException")).to_have_count(1)
# Check that title is visible:
expect(app.get_by_text("🎈 Mega tester app")).to_be_visible()
# There should be no unexpected console errors:
assert not console_errors, "Console errors were logged " + str(console_errors)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/asgi_app_test.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/starlette.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starlette integration for Streamlit.
This module provides the ASGI-compatible App class for running Streamlit
applications with any ASGI server (uvicorn, hypercorn, etc.).
Example
-------
>>> from streamlit.starlette import App
>>> app = App("main.py")
Run with uvicorn:
.. code-block:: bash
uvicorn myapp:app --host 0.0.0.0 --port 8501
"""
from streamlit.web.server.starlette.starlette_app import App
__all__ = ["App"]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/starlette.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/streamlit/web/server/app_discovery.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App discovery utilities for detecting ASGI app instances in scripts.
This module provides functions to discover if a Python script contains an
ASGI application instance (like st.App, FastAPI, or Starlette), enabling
the CLI to auto-detect whether to run in traditional mode or ASGI mode.
By design, this supports not only Streamlit's st.App but also other ASGI
frameworks like FastAPI and Starlette. This allows `streamlit run` to serve
as a unified entry point for any ASGI app, providing a consistent developer
experience for projects that combine Streamlit with other frameworks or use
ASGI apps directly.
The detection uses AST (Abstract Syntax Tree) parsing to safely analyze
the source code without executing it.
"""
from __future__ import annotations
import ast
import operator
from dataclasses import dataclass
from typing import TYPE_CHECKING, Final
from streamlit.logger import get_logger
if TYPE_CHECKING:
from pathlib import Path
_LOGGER: Final = get_logger(__name__)
# Preferred variable names to look for when discovering ASGI app instances.
# These are checked in order of priority.
_PREFERRED_APP_NAMES: Final[tuple[str, ...]] = ("app", "streamlit_app")
# Known ASGI app classes with their fully qualified module paths.
# Each entry is a dotted path like "module.submodule.ClassName".
# Only classes matching these paths will be detected as ASGI apps.
#
# Note: FastAPI and Starlette are intentionally included here. This enables
# `streamlit run` to serve as a unified entry point for ASGI apps, which is
# useful for projects that mount Streamlit within other frameworks or want
# to use `streamlit run` for any ASGI application.
_KNOWN_ASGI_APP_CLASSES: Final[tuple[str, ...]] = (
# Streamlit App
"streamlit.starlette.App",
"streamlit.web.server.starlette.App",
"streamlit.web.server.starlette.starlette_app.App",
# FastAPI
"fastapi.FastAPI",
"fastapi.applications.FastAPI",
# Starlette
"starlette.applications.Starlette",
)
@dataclass
class AppDiscoveryResult:
"""Result of ASGI app discovery.
Attributes
----------
is_asgi_app
True if the script contains an ASGI app instance.
app_name
The name of the app instance variable (e.g., "app").
import_string
The import string for uvicorn (e.g., "module:app").
"""
is_asgi_app: bool
app_name: str | None
import_string: str | None
def _get_call_name_parts(node: ast.Call) -> tuple[str, ...] | None:
"""Extract the name parts from a Call node's func attribute.
For example:
- `App(...)` returns ("App",)
- `st.App(...)` returns ("st", "App")
- `streamlit.starlette.App(...)` returns ("streamlit", "starlette", "App")
Parameters
----------
node
An AST Call node.
Returns
-------
tuple[str, ...] | None
A tuple of name parts, or None if the call target is not a simple
name or attribute chain.
"""
func = node.func
parts: list[str] = []
while isinstance(func, ast.Attribute):
parts.append(func.attr)
func = func.value
if isinstance(func, ast.Name):
parts.append(func.id)
return tuple(reversed(parts))
return None
def _extract_imports(tree: ast.AST) -> dict[str, str]:
"""Extract import mappings from an AST.
Builds a mapping from local names to their fully qualified module paths.
For example:
- `from streamlit.starlette import App` → {"App": "streamlit.starlette.App"}
- `from streamlit import starlette` → {"starlette": "streamlit.starlette"}
- `import streamlit as st` → {"st": "streamlit"}
- `import fastapi` → {"fastapi": "fastapi"}
Parameters
----------
tree
The parsed AST of a Python module.
Returns
-------
dict[str, str]
A mapping from local names to their fully qualified module paths.
"""
imports: dict[str, str] = {}
for node in ast.walk(tree):
if isinstance(node, ast.Import):
# Handle: import x, import x as y
for alias in node.names:
local_name = alias.asname or alias.name
imports[local_name] = alias.name
elif isinstance(node, ast.ImportFrom) and node.module:
# Handle: from x.y import z, from x.y import z as w
for alias in node.names:
local_name = alias.asname or alias.name
imports[local_name] = f"{node.module}.{alias.name}"
return imports
def _resolve_call_to_module_path(
parts: tuple[str, ...], imports: dict[str, str]
) -> str | None:
"""Resolve a call's name parts to a fully qualified module path.
Uses the import mapping to resolve the first part of the call chain,
then appends any remaining parts.
For example, with imports {"App": "streamlit.starlette.App"}:
- ("App",) → "streamlit.starlette.App"
With imports {"st": "streamlit"}:
- ("st", "starlette", "App") → "streamlit.starlette.App"
Parameters
----------
parts
The name parts from a Call node (e.g., ("st", "App")).
imports
The import mapping from _extract_imports.
Returns
-------
str | None
The fully qualified module path, or None if resolution fails.
"""
if not parts:
return None
first_part = parts[0]
remaining_parts = parts[1:]
if first_part in imports:
# The first part was imported, resolve it
base_path = imports[first_part]
if remaining_parts:
return f"{base_path}.{'.'.join(remaining_parts)}"
return base_path
# Not imported - could be a fully qualified name or unknown
# For fully qualified names like streamlit.starlette.App(),
# just join all parts
return ".".join(parts)
def _is_asgi_app_call(node: ast.Call, imports: dict[str, str]) -> bool:
"""Check if a Call node represents a known ASGI app constructor.
This function resolves the call to its fully qualified module path
using the import mapping, then checks if it matches any known
ASGI app class.
Parameters
----------
node
An AST Call node.
imports
The import mapping from _extract_imports.
Returns
-------
bool
True if the call is a known ASGI app constructor.
"""
parts = _get_call_name_parts(node)
if parts is None:
return False
resolved_path = _resolve_call_to_module_path(parts, imports)
if resolved_path is None:
return False
return resolved_path in _KNOWN_ASGI_APP_CLASSES
def _get_module_string_from_path(path: Path) -> str:
"""Convert a file path to a module import string.
Since `streamlit run` adds the script's directory to sys.path via
_fix_sys_path, the module string should just be the script's stem,
not a fully qualified package path.
Parameters
----------
path
Path to the Python file.
Returns
-------
str
The module string suitable for uvicorn (e.g., "myapp").
"""
resolved = path.resolve()
# Handle __init__.py files - use the directory name
if resolved.is_file() and resolved.stem == "__init__":
return resolved.parent.stem
return resolved.stem
def _find_asgi_app_assignments(source: str) -> dict[str, int]:
"""Find all variable assignments to ASGI app constructors in source code.
This function parses the source code, extracts import statements to
understand the module context, then finds assignments to known ASGI
app constructors.
Parameters
----------
source
Python source code to analyze.
Returns
-------
dict[str, int]
A mapping of variable names to their line numbers where ASGI app
instances are assigned.
"""
try:
tree = ast.parse(source)
except SyntaxError as e:
_LOGGER.debug("Failed to parse source: %s", e)
return {}
# Extract imports to resolve call names to their source modules
imports = _extract_imports(tree)
app_assignments: dict[str, int] = {}
for node in ast.walk(tree):
# Check for simple assignment: app = App(...)
if (
isinstance(node, ast.Assign)
and isinstance(node.value, ast.Call)
and _is_asgi_app_call(node.value, imports)
):
for target in node.targets:
if isinstance(target, ast.Name):
app_assignments[target.id] = node.lineno
# Check for annotated assignment: app: App = App(...)
elif (
isinstance(node, ast.AnnAssign)
and node.value
and isinstance(node.value, ast.Call)
and _is_asgi_app_call(node.value, imports)
and isinstance(node.target, ast.Name)
):
app_assignments[node.target.id] = node.lineno
return app_assignments
def discover_asgi_app(
path: Path,
app_name: str | None = None,
) -> AppDiscoveryResult:
"""Discover if a Python file contains an ASGI app instance using AST parsing.
This function safely analyzes the source code without executing it.
It tracks import statements to verify that detected App classes actually
come from known ASGI frameworks (streamlit, fastapi, starlette), preventing
false positives from custom classes with the same name.
Supported import patterns:
- `from streamlit.starlette import App`
- `import streamlit` (for `streamlit.starlette.App`)
- `from fastapi import FastAPI`
- `from starlette.applications import Starlette`
The app variable can have any name (e.g., `app`, `my_dashboard`, `server`).
Preferred names checked first: "app", "streamlit_app".
Parameters
----------
path
Path to the Python script to check.
app_name
Optional specific variable name to look for. If provided, only that
name is checked. If not provided, checks preferred names first
("app", "streamlit_app"), then falls back to any
discovered ASGI app.
Returns
-------
AppDiscoveryResult
Discovery result indicating whether an ASGI app was found and how
to import it.
Examples
--------
>>> result = discover_asgi_app(Path("streamlit_app.py"))
>>> if result.is_asgi_app:
... print(f"Found ASGI app: {result.import_string}")
"""
if not path.exists():
_LOGGER.debug("Path does not exist: %s", path)
return AppDiscoveryResult(is_asgi_app=False, app_name=None, import_string=None)
try:
source = path.read_text(encoding="utf-8")
except (OSError, UnicodeDecodeError) as e:
_LOGGER.debug("Failed to read file %s: %s", path, e)
return AppDiscoveryResult(is_asgi_app=False, app_name=None, import_string=None)
app_assignments = _find_asgi_app_assignments(source)
if not app_assignments:
_LOGGER.debug("No ASGI app assignments found in %s", path)
return AppDiscoveryResult(is_asgi_app=False, app_name=None, import_string=None)
module_str = _get_module_string_from_path(path)
# If app_name is provided, check for that specific name
if app_name:
if app_name in app_assignments:
_LOGGER.debug(
"Found ASGI app at %s:%s (line %d)",
module_str,
app_name,
app_assignments[app_name],
)
return AppDiscoveryResult(
is_asgi_app=True,
app_name=app_name,
import_string=f"{module_str}:{app_name}",
)
_LOGGER.debug("No ASGI app found with name '%s'", app_name)
return AppDiscoveryResult(is_asgi_app=False, app_name=None, import_string=None)
# Check preferred names first
for preferred_name in _PREFERRED_APP_NAMES:
if preferred_name in app_assignments:
_LOGGER.debug(
"Found ASGI app at %s:%s (preferred name, line %d)",
module_str,
preferred_name,
app_assignments[preferred_name],
)
return AppDiscoveryResult(
is_asgi_app=True,
app_name=preferred_name,
import_string=f"{module_str}:{preferred_name}",
)
# Fall back to the first discovered app (by line number)
first_app = min(app_assignments.items(), key=operator.itemgetter(1))
_LOGGER.debug(
"Found ASGI app at %s:%s (fallback, line %d)",
module_str,
first_app[0],
first_app[1],
)
return AppDiscoveryResult(
is_asgi_app=True,
app_name=first_app[0],
import_string=f"{module_str}:{first_app[0]}",
)
__all__ = ["AppDiscoveryResult", "discover_asgi_app"]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/app_discovery.py",
"license": "Apache License 2.0",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/web/server/app_discovery_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the streamlit.web.server.app_discovery module."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from streamlit.web.server.app_discovery import (
AppDiscoveryResult,
_extract_imports,
_find_asgi_app_assignments,
_get_call_name_parts,
_get_module_string_from_path,
_is_asgi_app_call,
_resolve_call_to_module_path,
discover_asgi_app,
)
if TYPE_CHECKING:
from pathlib import Path
class TestGetCallNameParts:
"""Tests for _get_call_name_parts function."""
def test_simple_name(self) -> None:
"""Test extracting name from simple call like App(...)."""
import ast
tree = ast.parse("App()")
call = tree.body[0].value # type: ignore
assert _get_call_name_parts(call) == ("App",)
def test_single_attribute(self) -> None:
"""Test extracting name from call like st.App(...)."""
import ast
tree = ast.parse("st.App()")
call = tree.body[0].value # type: ignore
assert _get_call_name_parts(call) == ("st", "App")
def test_nested_attribute(self) -> None:
"""Test extracting name from call like streamlit.starlette.App(...)."""
import ast
tree = ast.parse("streamlit.starlette.App()")
call = tree.body[0].value # type: ignore
assert _get_call_name_parts(call) == ("streamlit", "starlette", "App")
class TestExtractImports:
"""Tests for _extract_imports function."""
def test_import_statement(self) -> None:
"""Test extracting from 'import x' style imports."""
import ast
tree = ast.parse("import streamlit")
imports = _extract_imports(tree)
assert imports == {"streamlit": "streamlit"}
def test_import_with_alias(self) -> None:
"""Test extracting from 'import x as y' style imports."""
import ast
tree = ast.parse("import streamlit as st")
imports = _extract_imports(tree)
assert imports == {"st": "streamlit"}
def test_from_import(self) -> None:
"""Test extracting from 'from x import y' style imports."""
import ast
tree = ast.parse("from streamlit.starlette import App")
imports = _extract_imports(tree)
assert imports == {"App": "streamlit.starlette.App"}
def test_from_import_with_alias(self) -> None:
"""Test extracting from 'from x import y as z' style imports."""
import ast
tree = ast.parse("from fastapi import FastAPI as FA")
imports = _extract_imports(tree)
assert imports == {"FA": "fastapi.FastAPI"}
class TestResolveCallToModulePath:
"""Tests for _resolve_call_to_module_path function."""
def test_resolves_imported_name(self) -> None:
"""Test resolving a name that was imported."""
imports = {"App": "streamlit.starlette.App"}
result = _resolve_call_to_module_path(("App",), imports)
assert result == "streamlit.starlette.App"
def test_resolves_aliased_module(self) -> None:
"""Test resolving a call through an aliased module."""
imports = {"st": "streamlit"}
result = _resolve_call_to_module_path(("st", "starlette", "App"), imports)
assert result == "streamlit.starlette.App"
def test_returns_joined_parts_for_unknown(self) -> None:
"""Test that unknown names are joined as-is."""
imports = {}
result = _resolve_call_to_module_path(
("streamlit", "starlette", "App"), imports
)
assert result == "streamlit.starlette.App"
class TestIsAsgiAppCall:
"""Tests for _is_asgi_app_call function."""
@pytest.mark.parametrize(
("code", "imports"),
[
# Streamlit App with proper import
("App()", {"App": "streamlit.starlette.App"}),
# Fully qualified streamlit.starlette.App
("streamlit.starlette.App()", {"streamlit": "streamlit"}),
# FastAPI with proper import
("FastAPI()", {"FastAPI": "fastapi.FastAPI"}),
("fastapi.FastAPI()", {"fastapi": "fastapi"}),
# Starlette with proper import
("Starlette()", {"Starlette": "starlette.applications.Starlette"}),
],
)
def test_recognizes_asgi_app_patterns(
self, code: str, imports: dict[str, str]
) -> None:
"""Test that known ASGI app patterns are recognized with proper imports."""
import ast
tree = ast.parse(code)
call = tree.body[0].value # type: ignore
assert _is_asgi_app_call(call, imports) is True
@pytest.mark.parametrize(
("code", "imports"),
[
# App without import - could be user's custom class
("App()", {}),
# App imported from unknown module
("App()", {"App": "my_custom_lib.App"}),
# Random class
("SomeOtherClass()", {}),
("my_function()", {}),
("random.thing.Call()", {}),
],
)
def test_rejects_non_asgi_patterns(
self, code: str, imports: dict[str, str]
) -> None:
"""Test that non-ASGI patterns and unimported App are rejected."""
import ast
tree = ast.parse(code)
call = tree.body[0].value # type: ignore
assert _is_asgi_app_call(call, imports) is False
class TestFindAsgiAppAssignments:
"""Tests for _find_asgi_app_assignments function."""
def test_finds_simple_assignment_with_import(self) -> None:
"""Test finding assignment when App is properly imported."""
source = """
from streamlit.starlette import App
app = App("main.py")
"""
result = _find_asgi_app_assignments(source)
assert result == {"app": 3}
def test_finds_annotated_assignment_with_import(self) -> None:
"""Test finding annotated assignment with proper import."""
source = """
from streamlit.starlette import App
app: App = App("main.py")
"""
result = _find_asgi_app_assignments(source)
assert result == {"app": 3}
def test_finds_multiple_assignments_with_imports(self) -> None:
"""Test finding multiple ASGI app assignments with proper imports."""
source = """
from streamlit.starlette import App
from fastapi import FastAPI
from starlette.applications import Starlette
app = App("main.py")
another = FastAPI()
third = Starlette()
"""
result = _find_asgi_app_assignments(source)
assert "app" in result
assert "another" in result
assert "third" in result
def test_ignores_app_without_import(self) -> None:
"""Test that App() without proper import is ignored (prevents false positives)."""
source = """
x = 1
y = SomeClass()
app = App("main.py")
"""
result = _find_asgi_app_assignments(source)
# App without import should NOT be detected
assert result == {}
def test_ignores_app_from_wrong_module(self) -> None:
"""Test that App from a custom module is not detected."""
source = """
from my_custom_lib import App
app = App("main.py")
"""
result = _find_asgi_app_assignments(source)
# App from unknown module should NOT be detected
assert result == {}
def test_handles_syntax_error(self) -> None:
"""Test that syntax errors return empty dict."""
source = "this is not valid python {"
result = _find_asgi_app_assignments(source)
assert result == {}
class TestGetModuleStringFromPath:
"""Tests for _get_module_string_from_path function."""
def test_simple_file(self, tmp_path: Path) -> None:
"""Test module string for a simple file."""
script = tmp_path / "main.py"
script.touch()
result = _get_module_string_from_path(script)
assert result == "main"
def test_nested_file_without_init(self, tmp_path: Path) -> None:
"""Test module string for nested file without __init__.py."""
subdir = tmp_path / "myapp"
subdir.mkdir()
script = subdir / "main.py"
script.touch()
result = _get_module_string_from_path(script)
assert result == "main"
def test_nested_file_with_init(self, tmp_path: Path) -> None:
"""Test module string for nested file with __init__.py (package).
Since streamlit run adds the script's directory to sys.path,
we only return the script's stem, not the full package path.
"""
subdir = tmp_path / "myapp"
subdir.mkdir()
(subdir / "__init__.py").touch()
script = subdir / "main.py"
script.touch()
result = _get_module_string_from_path(script)
# Only return the stem since _fix_sys_path adds script's dir to sys.path
assert result == "main"
class TestDiscoverAsgiApp:
"""Tests for discover_asgi_app function."""
def test_discovers_app_named_app(self, tmp_path: Path) -> None:
"""Test discovery of ASGI app named 'app'."""
script = tmp_path / "streamlit_app.py"
script.write_text("""
from streamlit.starlette import App
app = App("main.py")
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is True
assert result.app_name == "app"
assert "streamlit_app:app" in (result.import_string or "")
def test_discovers_app_named_streamlit_app(self, tmp_path: Path) -> None:
"""Test discovery of ASGI app named 'streamlit_app'."""
script = tmp_path / "my_module.py"
script.write_text("""
from streamlit.starlette import App
streamlit_app = App("main.py")
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is True
assert result.app_name == "streamlit_app"
def test_prefers_app_over_other_names(self, tmp_path: Path) -> None:
"""Test that 'app' is preferred over other ASGI app instances."""
script = tmp_path / "streamlit_app.py"
script.write_text("""
from streamlit.starlette import App
my_custom_app = App("main.py")
app = App("main.py")
another_app = App("main.py")
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is True
assert result.app_name == "app"
def test_discovers_custom_named_app(self, tmp_path: Path) -> None:
"""Test discovery of ASGI app with a custom name."""
script = tmp_path / "streamlit_app.py"
script.write_text("""
from streamlit.starlette import App
my_dashboard = App("main.py")
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is True
assert result.app_name == "my_dashboard"
def test_discovers_specific_app_name(self, tmp_path: Path) -> None:
"""Test discovery of ASGI app with a specific name provided."""
script = tmp_path / "streamlit_app.py"
script.write_text("""
from streamlit.starlette import App
app = App("main.py")
secondary_app = App("main.py")
""")
result = discover_asgi_app(script, app_name="secondary_app")
assert result.is_asgi_app is True
assert result.app_name == "secondary_app"
def test_returns_false_for_no_app(self, tmp_path: Path) -> None:
"""Test that discovery returns False when no ASGI app is found."""
script = tmp_path / "streamlit_app.py"
script.write_text("""
import streamlit as st
st.write("Hello")
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is False
assert result.app_name is None
assert result.import_string is None
def test_returns_false_for_app_without_import(self, tmp_path: Path) -> None:
"""Test that App without proper import is not detected (prevents false positives)."""
script = tmp_path / "streamlit_app.py"
script.write_text('app = App("main.py")')
result = discover_asgi_app(script)
assert result.is_asgi_app is False
def test_returns_false_for_custom_app_class(self, tmp_path: Path) -> None:
"""Test that a custom App class from user's module is not detected."""
script = tmp_path / "streamlit_app.py"
script.write_text("""
from my_custom_lib import App
app = App("main.py")
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is False
def test_returns_false_for_nonexistent_file(self, tmp_path: Path) -> None:
"""Test that discovery returns False for nonexistent files."""
script = tmp_path / "nonexistent.py"
result = discover_asgi_app(script)
assert result.is_asgi_app is False
def test_returns_false_for_syntax_error(self, tmp_path: Path) -> None:
"""Test that discovery returns False for files with syntax errors."""
script = tmp_path / "bad_script.py"
script.write_text("this is not valid python {")
result = discover_asgi_app(script)
assert result.is_asgi_app is False
def test_returns_false_for_specific_name_not_found(self, tmp_path: Path) -> None:
"""Test that discovery returns False when specific name is not found."""
script = tmp_path / "streamlit_app.py"
script.write_text("""
from streamlit.starlette import App
app = App("main.py")
""")
result = discover_asgi_app(script, app_name="nonexistent")
assert result.is_asgi_app is False
def test_discovers_fastapi_app(self, tmp_path: Path) -> None:
"""Test that FastAPI apps are discovered with proper import."""
script = tmp_path / "main.py"
script.write_text("""
from fastapi import FastAPI
app = FastAPI()
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is True
assert result.app_name == "app"
def test_discovers_starlette_app(self, tmp_path: Path) -> None:
"""Test that Starlette apps are discovered with proper import."""
script = tmp_path / "main.py"
script.write_text("""
from starlette.applications import Starlette
app = Starlette(routes=[])
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is True
assert result.app_name == "app"
def test_discovers_fully_qualified_app(self, tmp_path: Path) -> None:
"""Test discovery of fully qualified ASGI app like streamlit.starlette.App."""
script = tmp_path / "main.py"
script.write_text("""
import streamlit
app = streamlit.starlette.App("main.py")
""")
result = discover_asgi_app(script)
assert result.is_asgi_app is True
assert result.app_name == "app"
class TestAppDiscoveryResult:
"""Tests for AppDiscoveryResult dataclass."""
def test_result_with_app_found(self) -> None:
"""Test AppDiscoveryResult when app is found."""
result = AppDiscoveryResult(
is_asgi_app=True,
app_name="app",
import_string="mymodule:app",
)
assert result.is_asgi_app is True
assert result.app_name == "app"
assert result.import_string == "mymodule:app"
def test_result_with_no_app_found(self) -> None:
"""Test AppDiscoveryResult when no app is found."""
result = AppDiscoveryResult(
is_asgi_app=False,
app_name=None,
import_string=None,
)
assert result.is_asgi_app is False
assert result.app_name is None
assert result.import_string is None
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/app_discovery_test.py",
"license": "Apache License 2.0",
"lines": 378,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/shared/input_utils.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared input helpers for Streamlit Playwright E2E tests.
These utilities centralize common input interactions and assertions used across
E2E tests, with a focus on stability and guarding against regressions from
global hotkeys.
"""
from __future__ import annotations
import string
from typing import TYPE_CHECKING
from playwright.sync_api import Locator, Page, expect
if TYPE_CHECKING:
from collections.abc import Callable
# A curated set of characters that covers common key inputs while staying stable in
# Playwright typing semantics (no enter/tab/newline, which can submit forms or move focus).
_COMMON_TYPED_CHARACTERS = (
string.ascii_lowercase
+ string.ascii_uppercase
+ string.digits
+ " "
+ ".,-_@/:;?!'\"()[]{}<>+=*&^%$#~`|\\"
)
def type_common_characters_into_input(
input_locator: Locator,
*,
after_each: Callable[[str], None] | None = None,
) -> str:
"""Type a broad set of common characters into a focused input.
This is intended for regression tests where we want to ensure that typing
into an editable target remains isolated from global hotkeys.
Parameters
----------
input_locator : Locator
The input element to type into.
after_each : Callable[[str], None] | None, optional
Optional callback invoked after each typed character (useful for
asserting that global hotkey side effects did not occur), by default
None.
Returns
-------
str
The full string that was typed.
"""
expect(input_locator).to_be_visible()
input_locator.click()
input_locator.fill("")
expected_value = ""
for ch in _COMMON_TYPED_CHARACTERS:
input_locator.type(ch)
expected_value += ch
expect(input_locator).to_have_value(expected_value)
if after_each is not None:
after_each(ch)
return expected_value
def expect_global_hotkeys_not_fired(
app: Page,
*,
expected_runs: int | None = None,
runs_locator: Locator | None = None,
) -> None:
"""Assert that global hotkeys did not trigger their UI side effects.
This helper verifies that global hotkeys such as rerun and clear-cache did
not fire while interacting with an input widget during a test.
Parameters
----------
app : Page
The Playwright page representing the Streamlit app under test.
expected_runs : int | None, optional
If provided, the expected number of script runs. When set, the function
asserts that a corresponding UI element reflecting this count is
visible, by default None.
runs_locator : Locator | None, optional
Optional locator for the element that displays the run count. If not
provided, a default locator is derived from ``expected_runs``, by
default None.
Returns
-------
None
This function is used for assertions and does not return a value.
Examples
--------
Assert that typing did not trigger a rerun or open the clear-cache dialog:
>>> expect_global_hotkeys_not_fired(app, expected_runs=1)
"""
# Rerun hotkey: must not start a script run while we're typing.
expect(app.get_by_test_id("stApp")).to_have_attribute(
"data-test-script-state",
"notRunning",
)
if expected_runs is not None:
locator = (
runs_locator
if runs_locator is not None
else app.get_by_text(f"Runs: {expected_runs}", exact=True)
)
expect(locator).to_be_visible()
# Clear-cache hotkey: must not open the clear-cache dialog while we're typing.
expect(app.get_by_test_id("stClearCacheDialog")).not_to_be_visible()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/shared/input_utils.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_server.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uvicorn server wrappers for running Streamlit applications (using Starlette).
This module provides two classes for running Streamlit apps with uvicorn:
1. **UvicornServer** (async): For embedding in an existing event loop.
Used by the `Server` class when `server.useStarlette=true`.
2. **UvicornRunner** (sync): For standalone CLI usage with blocking execution.
Used by `run_asgi_app()` when running `st.App` via `streamlit run`.
Why Two Classes?
----------------
These classes serve different architectural needs:
- **UvicornServer** integrates with Streamlit's existing `Server` class architecture,
which manages an event loop and coordinates multiple components (runtime, server,
signal handlers). It uses `uvicorn.Server` with manual socket binding for fine-grained
control and runs as a background task.
- **UvicornRunner** is designed for `st.App` mode where the app handles its own
runtime lifecycle via ASGI lifespan. It uses `uvicorn.run()` which manages its own
event loop and signal handlers - perfect for CLI "just run it" usage.
"""
from __future__ import annotations
import asyncio
import errno
import socket
import sys
from typing import TYPE_CHECKING, Any, Final
from streamlit import config
from streamlit.config_option import ConfigOption
from streamlit.logger import get_logger
from streamlit.runtime.runtime_util import get_max_message_size_bytes
from streamlit.web.server.starlette.starlette_app import create_starlette_app
from streamlit.web.server.starlette.starlette_server_config import (
DEFAULT_SERVER_ADDRESS,
DEFAULT_WEBSOCKET_PING_INTERVAL,
DEFAULT_WEBSOCKET_PING_TIMEOUT,
MAX_PORT_SEARCH_RETRIES,
)
if TYPE_CHECKING:
import uvicorn
from streamlit.runtime import Runtime
_LOGGER: Final = get_logger(__name__)
class RetriesExceededError(Exception):
"""Raised when the server cannot find an available port after max retries."""
# ---------------------------------------------------------------------------
# Private utility functions for uvicorn configuration
# ---------------------------------------------------------------------------
def _get_server_address() -> str:
"""Get the server address from config, with default fallback."""
return config.get_option("server.address") or DEFAULT_SERVER_ADDRESS
def _get_server_port() -> int:
"""Get the server port from config."""
return int(config.get_option("server.port"))
def _is_port_manually_set() -> bool:
"""Check if the server port was explicitly configured by the user."""
return config.is_manually_set("server.port")
def _server_address_is_unix_socket() -> bool:
"""Check if the server address is configured as a Unix socket."""
address = config.get_option("server.address")
return address is not None and address.startswith("unix://")
def _validate_ssl_config() -> tuple[str | None, str | None]:
"""Validate and return SSL configuration.
Returns a tuple of (cert_file, key_file). Both are None if SSL is disabled,
or both are set if SSL is enabled. Exits if only one is set.
"""
cert_file = config.get_option("server.sslCertFile")
key_file = config.get_option("server.sslKeyFile")
# Validate SSL options: both must be set together or neither
if bool(cert_file) != bool(key_file):
_LOGGER.error(
"Options 'server.sslCertFile' and 'server.sslKeyFile' must "
"be set together. Set missing options or delete existing options."
)
sys.exit(1)
return cert_file, key_file
def _get_websocket_settings() -> tuple[int, int]:
"""Get the WebSocket ping interval and timeout settings.
Returns a tuple of (ping_interval, ping_timeout) in seconds.
"""
configured_interval = config.get_option("server.websocketPingInterval")
if configured_interval is not None:
interval = int(configured_interval)
# For uvicorn, we set timeout equal to interval for consistency
return interval, interval
return DEFAULT_WEBSOCKET_PING_INTERVAL, DEFAULT_WEBSOCKET_PING_TIMEOUT
def _get_uvicorn_config_kwargs() -> dict[str, Any]:
"""Get common uvicorn configuration kwargs.
Returns a dict of kwargs that can be passed to uvicorn.Config or uvicorn.run().
Does NOT include app, host, or port - those must be provided separately.
"""
cert_file, key_file = _validate_ssl_config()
ws_ping_interval, ws_ping_timeout = _get_websocket_settings()
ws_max_size = get_max_message_size_bytes()
ws_per_message_deflate = config.get_option("server.enableWebsocketCompression")
return {
"ssl_certfile": cert_file,
"ssl_keyfile": key_file,
"ws": "auto",
"ws_ping_interval": ws_ping_interval,
"ws_ping_timeout": ws_ping_timeout,
"ws_max_size": ws_max_size,
"ws_per_message_deflate": ws_per_message_deflate,
"use_colors": False,
"log_config": None,
}
def _bind_socket(address: str, port: int, backlog: int) -> socket.socket:
"""Bind a non-blocking TCP socket to the given address and port.
We pre-bind the socket ourselves (rather than letting uvicorn do it) to:
1. Detect port conflicts before creating the uvicorn.Server instance
2. Enable port retry logic when the configured port is already in use
3. Have explicit control over socket options (SO_REUSEADDR, IPV6_V6ONLY)
Parameters
----------
address
The IP address to bind to (e.g., "127.0.0.1" or "::").
port
The port number to bind to.
backlog
The maximum number of queued connections.
Returns
-------
socket.socket
A bound, listening, non-blocking socket.
"""
if ":" in address:
family = socket.AF_INET6
else:
family = socket.AF_INET
sock = socket.socket(family=family)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if family == socket.AF_INET6:
# Allow both IPv4 and IPv6 clients when binding to "::".
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
sock.bind((address, port))
sock.listen(backlog)
sock.setblocking(False)
sock.set_inheritable(True)
return sock
except BaseException:
sock.close()
raise
# ---------------------------------------------------------------------------
# Server classes
# ---------------------------------------------------------------------------
class UvicornServer:
"""Async uvicorn server for embedding in an existing event loop.
This class is used by Streamlit's `Server` class when `server.useStarlette=true`.
It wraps `uvicorn.Server` and provides:
- `start()`: Async method that returns when the server is ready to accept connections
- Background task execution: Server runs in background while caller continues
- `stop()`: Gracefully signal the server to shut down
- `stopped`: Event that fires when the server has fully stopped
This async design allows the `Server` class to coordinate multiple components
(runtime lifecycle, signal handlers, stop/stopped semantics) in its event loop.
Parameters
----------
runtime
The Streamlit Runtime instance. Used to create the Starlette application
via `create_starlette_app(runtime)`.
Examples
--------
Used internally by Server._start_starlette():
>>> server = UvicornServer(runtime)
>>> await server.start() # Returns when ready
>>> # ... server running in background ...
>>> server.stop()
>>> await server.stopped.wait()
"""
def __init__(self, runtime: Runtime) -> None:
self._runtime = runtime
self._server: uvicorn.Server | None = None
self._server_task: asyncio.Task[None] | None = None
self._stopped_event = asyncio.Event()
self._socket: socket.socket | None = None
async def start(self) -> None:
"""Start the server and return when ready to accept connections."""
try:
import uvicorn
except ModuleNotFoundError as exc: # pragma: no cover
raise RuntimeError(
"uvicorn is required for server.useStarlette but is not installed. "
"Install it via `pip install streamlit[starlette]`."
) from exc
if _server_address_is_unix_socket():
raise RuntimeError(
"Unix sockets are not supported with Starlette currently."
)
app = create_starlette_app(self._runtime)
# Get server configuration
configured_address = _get_server_address()
configured_port = _get_server_port()
uvicorn_kwargs = _get_uvicorn_config_kwargs()
last_exception: BaseException | None = None
for attempt in range(MAX_PORT_SEARCH_RETRIES + 1):
port = configured_port + attempt
uvicorn_config = uvicorn.Config(
app,
host=configured_address,
port=port,
**uvicorn_kwargs,
)
try:
self._socket = _bind_socket(
configured_address,
port,
uvicorn_config.backlog,
)
except OSError as exc:
last_exception = exc
# EADDRINUSE: port in use by another process
# EACCES: port reserved by system (common on Windows, see #13521)
if exc.errno in {errno.EADDRINUSE, errno.EACCES}:
if _is_port_manually_set():
_LOGGER.error("Port %s is not available", port) # noqa: TRY400
sys.exit(1)
_LOGGER.debug(
"Port %s not available, trying to use the next one.", port
)
if attempt == MAX_PORT_SEARCH_RETRIES:
raise RetriesExceededError(
f"Cannot start Streamlit server. Port {port} is not available, "
f"and Streamlit was unable to find a free port after "
f"{MAX_PORT_SEARCH_RETRIES} attempts."
) from exc
continue
raise
self._server = uvicorn.Server(uvicorn_config)
config.set_option("server.port", port, ConfigOption.STREAMLIT_DEFINITION)
_LOGGER.debug(
"Starting uvicorn server on %s:%s",
configured_address,
port,
)
startup_complete = asyncio.Event()
startup_exception: BaseException | None = None
async def serve_with_signal() -> None:
"""Serve the application with proper lifecycle management.
This ensures the server is shut down gracefully when the task is
cancelled or an exception occurs.
"""
nonlocal startup_exception
if self._server is None or self._socket is None:
raise RuntimeError("Server or socket not initialized")
try:
# Initialize config and lifespan (normally done in _serve)
server_config = self._server.config
if not server_config.loaded:
server_config.load()
self._server.lifespan = server_config.lifespan_class(server_config)
await self._server.startup(sockets=[self._socket])
if self._server.should_exit:
startup_exception = RuntimeError("Server startup failed")
startup_complete.set() # noqa: B023
return
startup_complete.set() # noqa: B023
await self._server.main_loop()
except BaseException as e:
# Catch BaseException to handle CancelledError (which is not
# an Exception). This ensures startup_complete is set even if
# the task is cancelled before startup completes, preventing
# a deadlock in start() which awaits startup_complete.
startup_exception = e
raise
finally:
try:
if self._server is not None:
await self._server.shutdown(sockets=[self._socket])
finally:
# Ensure socket cleanup and stopped event are always set,
# even if shutdown raises an exception.
if self._socket is not None:
self._socket.close()
self._socket = None
self._stopped_event.set()
# Always set startup_complete to prevent deadlock in start()
# if task is cancelled before normal startup_complete.set().
startup_complete.set() # noqa: B023
self._server_task = asyncio.create_task(
serve_with_signal(), name="uvicorn-server"
)
await startup_complete.wait()
if startup_exception is not None:
raise startup_exception
_LOGGER.info(
"Uvicorn server started on %s:%s",
configured_address,
port,
)
return
if last_exception is not None:
raise last_exception
def stop(self) -> None:
"""Signal the server to stop."""
if self._server is not None:
self._server.should_exit = True
@property
def stopped(self) -> asyncio.Event:
"""An event that is set when the server has fully stopped."""
return self._stopped_event
class UvicornRunner:
"""Sync uvicorn runner for standalone CLI usage.
This class is used by `run_asgi_app()` when running `st.App` via `streamlit run`.
It wraps `uvicorn.run()` which is a blocking call that:
- Creates and manages its own event loop
- Handles OS signals (SIGINT, SIGTERM) for graceful shutdown
- Runs until the server exits
This is ideal for `st.App` mode because:
- The `st.App` handles its own runtime lifecycle via ASGI lifespan hooks
- No external coordination is needed - uvicorn manages everything
- Simple "run and block" semantics for CLI usage
Parameters
----------
app
Either an ASGI app instance or an import string (e.g., "myapp:app").
Import strings are preferred as they allow uvicorn to handle the import.
Examples
--------
Used by bootstrap.run_asgi_app():
>>> runner = UvicornRunner("myapp:app")
>>> runner.run() # Blocks until server exits
"""
def __init__(self, app: str) -> None:
self._app = app
def run(self) -> None:
"""Run the server synchronously (blocking until exit).
This method blocks until the server exits, either from a signal
(Ctrl+C, SIGTERM) or an error. It handles port retry automatically
if the configured port is not available.
"""
try:
import uvicorn
except ModuleNotFoundError as exc: # pragma: no cover
raise RuntimeError(
"uvicorn is required for running st.App. "
"Install it with: pip install uvicorn"
) from exc
if _server_address_is_unix_socket():
raise RuntimeError("Unix sockets are not supported with st.App currently.")
# Get server configuration
configured_address = _get_server_address()
configured_port = _get_server_port()
uvicorn_kwargs = _get_uvicorn_config_kwargs()
# Port retry loop - try successive ports if the configured one is busy
for attempt in range(MAX_PORT_SEARCH_RETRIES + 1):
port = configured_port + attempt
if attempt > 0:
config.set_option(
"server.port", port, ConfigOption.STREAMLIT_DEFINITION
)
# TODO(lukasmasuch): Print the URL with the selected port.
try:
_LOGGER.debug(
"Starting uvicorn runner on %s:%s",
configured_address,
port,
)
uvicorn.run(
self._app,
host=configured_address,
port=port,
**uvicorn_kwargs,
)
return # Server exited normally
except OSError as exc:
# EADDRINUSE: port in use by another process
# EACCES: port reserved by system (common on Windows)
if exc.errno in {errno.EADDRINUSE, errno.EACCES}:
if _is_port_manually_set():
_LOGGER.error("Port %s is not available", port) # noqa: TRY400
sys.exit(1)
_LOGGER.debug(
"Port %s not available, trying to use the next one.", port
)
if attempt == MAX_PORT_SEARCH_RETRIES:
_LOGGER.error( # noqa: TRY400
"Cannot start Streamlit server. Port %s is not available, "
"and Streamlit was unable to find a free port after "
"%s attempts.",
port,
MAX_PORT_SEARCH_RETRIES,
)
sys.exit(1)
continue
raise
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_server.py",
"license": "Apache License 2.0",
"lines": 394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_server_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for starlette_server module."""
from __future__ import annotations
import asyncio
import errno
import socket
from typing import TYPE_CHECKING
from unittest import mock
from unittest.mock import AsyncMock, patch
import pytest
if TYPE_CHECKING:
from collections.abc import Coroutine
from typing import Any
from streamlit import config
from streamlit.runtime import Runtime
from streamlit.web.server.server import Server
from streamlit.web.server.starlette.starlette_server import (
RetriesExceededError,
UvicornRunner,
_bind_socket,
_get_websocket_settings,
_is_port_manually_set,
_server_address_is_unix_socket,
)
from tests.testutil import patch_config_options
class TestBindSocket:
"""Tests for _bind_socket function."""
def test_creates_ipv4_socket(self) -> None:
"""Test that IPv4 address creates AF_INET socket."""
mock_sock = mock.MagicMock()
with patch("socket.socket", return_value=mock_sock) as mock_socket_cls:
result = _bind_socket("127.0.0.1", 8501, 100)
mock_socket_cls.assert_called_once_with(family=socket.AF_INET)
mock_sock.setsockopt.assert_called_with(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
mock_sock.bind.assert_called_once_with(("127.0.0.1", 8501))
mock_sock.listen.assert_called_once_with(100)
mock_sock.setblocking.assert_called_once_with(False)
mock_sock.set_inheritable.assert_called_once_with(True)
assert result == mock_sock
def test_creates_ipv6_socket(self) -> None:
"""Test that IPv6 address creates AF_INET6 socket."""
mock_sock = mock.MagicMock()
with patch("socket.socket", return_value=mock_sock) as mock_socket_cls:
result = _bind_socket("::", 8501, 100)
mock_socket_cls.assert_called_once_with(family=socket.AF_INET6)
# Should set IPV6_V6ONLY to 0
mock_sock.setsockopt.assert_any_call(
socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0
)
assert result == mock_sock
def test_detects_ipv6_by_colon(self) -> None:
"""Test that addresses with colons are treated as IPv6."""
mock_sock = mock.MagicMock()
with patch("socket.socket", return_value=mock_sock) as mock_socket_cls:
_bind_socket("::1", 8501, 100)
mock_socket_cls.assert_called_once_with(family=socket.AF_INET6)
def test_closes_socket_on_bind_failure(self) -> None:
"""Test that socket is closed when bind raises an exception."""
mock_sock = mock.MagicMock()
mock_sock.bind.side_effect = OSError(errno.EADDRINUSE, "Address already in use")
with patch("socket.socket", return_value=mock_sock):
with pytest.raises(OSError, match="Address already in use"):
_bind_socket("127.0.0.1", 8501, 100)
mock_sock.close.assert_called_once()
def test_closes_socket_on_listen_failure(self) -> None:
"""Test that socket is closed when listen raises an exception."""
mock_sock = mock.MagicMock()
mock_sock.listen.side_effect = OSError("Listen failed")
with patch("socket.socket", return_value=mock_sock):
with pytest.raises(OSError, match="Listen failed"):
_bind_socket("127.0.0.1", 8501, 100)
mock_sock.close.assert_called_once()
class TestGetWebsocketSettings:
"""Tests for _get_websocket_settings function."""
@patch_config_options({"server.websocketPingInterval": None})
def test_default_settings(self) -> None:
"""Test that default settings are returned when not configured."""
interval, timeout = _get_websocket_settings()
assert interval == 30
assert timeout == 30
@patch_config_options({"server.websocketPingInterval": 45})
def test_custom_interval(self) -> None:
"""Test that custom interval is respected."""
interval, timeout = _get_websocket_settings()
assert interval == 45
assert timeout == 45
@patch_config_options({"server.websocketPingInterval": 10})
def test_low_interval_accepted(self) -> None:
"""Test that low interval values are accepted (no Tornado constraints)."""
interval, timeout = _get_websocket_settings()
assert interval == 10
assert timeout == 10
class TestServerPortIsManuallySet:
"""Tests for _is_port_manually_set function."""
def test_returns_true_when_manually_set(self) -> None:
"""Test that True is returned when port is manually configured."""
with patch("streamlit.config.is_manually_set", return_value=True):
result = _is_port_manually_set()
assert result is True
def test_returns_false_when_default(self) -> None:
"""Test that False is returned when port is not manually configured."""
with patch("streamlit.config.is_manually_set", return_value=False):
result = _is_port_manually_set()
assert result is False
class TestServerAddressIsUnixSocket:
"""Tests for _server_address_is_unix_socket function."""
@patch_config_options({"server.address": "unix:///tmp/streamlit.sock"})
def test_returns_true_for_unix_socket(self) -> None:
"""Test that True is returned for Unix socket address."""
result = _server_address_is_unix_socket()
assert result is True
@patch_config_options({"server.address": "127.0.0.1"})
def test_returns_false_for_ip_address(self) -> None:
"""Test that False is returned for IP address."""
result = _server_address_is_unix_socket()
assert result is False
@patch_config_options({"server.address": None})
def test_returns_false_for_none(self) -> None:
"""Test that False is returned when address is None."""
result = _server_address_is_unix_socket()
assert result is False
@patch_config_options({"server.address": ""})
def test_returns_false_for_empty_string(self) -> None:
"""Test that False is returned for empty string."""
result = _server_address_is_unix_socket()
assert result is False
class TestSslConfiguration:
"""Tests for SSL configuration validation in StarletteServer."""
def setUp(self) -> None:
"""Set up test fixtures."""
Runtime._instance = None
self.original_port = config.get_option("server.port")
config.set_option("server.port", 8650)
self.loop = asyncio.new_event_loop()
def tearDown(self) -> None:
"""Tear down test fixtures."""
Runtime._instance = None
config.set_option("server.port", self.original_port)
self.loop.close()
@pytest.fixture(autouse=True)
def setup_and_teardown(self) -> None:
"""Pytest fixture for setup and teardown."""
self.setUp()
yield
self.tearDown()
def _create_server(self) -> Server:
"""Create a Server instance for testing."""
server = Server("mock/script/path", is_hello=False)
server._runtime._eventloop = self.loop
return server
def _run_async(self, coro: Coroutine[Any, Any, None]) -> None:
"""Run an async coroutine in the test event loop."""
self.loop.run_until_complete(coro)
@patch_config_options(
{"server.sslCertFile": "/tmp/cert.pem", "server.sslKeyFile": None}
)
def test_exits_when_only_cert_file_set(self) -> None:
"""Test that server exits when only sslCertFile is set without sslKeyFile."""
server = self._create_server()
with pytest.raises(SystemExit):
self._run_async(server._start_starlette())
@patch_config_options(
{"server.sslCertFile": None, "server.sslKeyFile": "/tmp/key.pem"}
)
def test_exits_when_only_key_file_set(self) -> None:
"""Test that server exits when only sslKeyFile is set without sslCertFile."""
server = self._create_server()
with pytest.raises(SystemExit):
self._run_async(server._start_starlette())
@patch_config_options({"server.sslCertFile": None, "server.sslKeyFile": None})
def test_no_ssl_when_neither_option_set(self) -> None:
"""Test that server starts without SSL when neither option is set."""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Config") as uvicorn_config_cls,
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
# Verify uvicorn.Config was called with ssl_certfile=None, ssl_keyfile=None
uvicorn_config_cls.assert_called_once()
call_kwargs = uvicorn_config_cls.call_args[1]
assert call_kwargs["ssl_certfile"] is None
assert call_kwargs["ssl_keyfile"] is None
@patch_config_options(
{"server.sslCertFile": "/tmp/cert.pem", "server.sslKeyFile": "/tmp/key.pem"}
)
def test_ssl_options_passed_to_uvicorn(self) -> None:
"""Test that SSL options are passed to uvicorn when both are set."""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Config") as uvicorn_config_cls,
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
# Verify uvicorn.Config was called with the correct SSL options
uvicorn_config_cls.assert_called_once()
call_kwargs = uvicorn_config_cls.call_args[1]
assert call_kwargs["ssl_certfile"] == "/tmp/cert.pem"
assert call_kwargs["ssl_keyfile"] == "/tmp/key.pem"
class TestStartStarletteServer:
"""Integration tests for the Server._start_starlette() method."""
def setUp(self) -> None:
"""Set up test fixtures."""
Runtime._instance = None
self.original_port = config.get_option("server.port")
config.set_option("server.port", 8600)
self.loop = asyncio.new_event_loop()
def tearDown(self) -> None:
"""Tear down test fixtures."""
Runtime._instance = None
config.set_option("server.port", self.original_port)
self.loop.close()
@pytest.fixture(autouse=True)
def setup_and_teardown(self) -> None:
"""Pytest fixture for setup and teardown."""
self.setUp()
yield
self.tearDown()
def _create_server(self) -> Server:
"""Create a Server instance for testing."""
server = Server("mock/script/path", is_hello=False)
server._runtime._eventloop = self.loop
return server
def _run_async(self, coro: Coroutine[Any, Any, None]) -> None:
"""Run an async coroutine in the test event loop."""
self.loop.run_until_complete(coro)
def test_retries_on_port_in_use(self) -> None:
"""Test that server retries the next port when the first is busy."""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
side_effect=[OSError(errno.EADDRINUSE, "busy"), mock_socket],
) as bind_socket,
patch(
"streamlit.web.server.starlette.starlette_server._is_port_manually_set",
return_value=False,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
assert bind_socket.call_count == 2
uvicorn_instance.startup.assert_awaited_once()
assert config.get_option("server.port") == 8601
def test_honors_manual_port_setting(self) -> None:
"""Test that server exits when manual port is busy."""
server = self._create_server()
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
side_effect=OSError(errno.EADDRINUSE, "busy"),
),
patch(
"streamlit.web.server.starlette.starlette_server._is_port_manually_set",
return_value=True,
),
):
with pytest.raises(SystemExit):
self._run_async(server._start_starlette())
def test_raises_on_max_retries_exceeded(self) -> None:
"""Test that RetriesExceededError is raised after max retries."""
server = self._create_server()
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
side_effect=OSError(errno.EADDRINUSE, "busy"),
),
patch(
"streamlit.web.server.starlette.starlette_server._is_port_manually_set",
return_value=False,
),
):
with pytest.raises(RetriesExceededError):
self._run_async(server._start_starlette())
def test_raises_on_unix_socket(self) -> None:
"""Test that RuntimeError is raised for Unix socket addresses."""
server = self._create_server()
with patch_config_options({"server.address": "unix:///tmp/streamlit.sock"}):
with pytest.raises(RuntimeError, match="Unix sockets are not supported"):
self._run_async(server._start_starlette())
def test_retries_on_permission_denied(self) -> None:
"""Test that server retries on EACCES (permission denied) errors.
On Windows, system-reserved ports return EACCES instead of EADDRINUSE.
See: https://github.com/streamlit/streamlit/issues/13521
"""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
side_effect=[OSError(errno.EACCES, "permission denied"), mock_socket],
) as bind_socket,
patch(
"streamlit.web.server.starlette.starlette_server._is_port_manually_set",
return_value=False,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
assert bind_socket.call_count == 2
uvicorn_instance.startup.assert_awaited_once()
assert config.get_option("server.port") == 8601
def test_propagates_non_retryable_errors(self) -> None:
"""Test that non-retryable errors (not EADDRINUSE/EACCES) are propagated."""
server = self._create_server()
with patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
side_effect=OSError(errno.ENOENT, "no such file"),
):
with pytest.raises(OSError, match="no such file") as exc_info:
self._run_async(server._start_starlette())
assert exc_info.value.errno == errno.ENOENT
def test_uses_default_address_when_not_configured(self) -> None:
"""Test that 0.0.0.0 is used when address is not configured."""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch_config_options({"server.address": None}),
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
) as bind_socket,
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
bind_socket.assert_called_once()
call_args = bind_socket.call_args[0]
assert call_args[0] == "0.0.0.0"
def test_uses_configured_address(self) -> None:
"""Test that configured address is used."""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch_config_options({"server.address": "192.168.1.100"}),
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
) as bind_socket,
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
bind_socket.assert_called_once()
call_args = bind_socket.call_args[0]
assert call_args[0] == "192.168.1.100"
class TestServerLifecycle:
"""Tests for server lifecycle behavior required by bootstrap.
These tests verify that the Starlette server correctly implements
the lifecycle semantics that bootstrap.py depends on:
- start() returns after server is ready (doesn't block forever)
- stop() signals graceful shutdown
- stopped property completes after shutdown
"""
def setUp(self) -> None:
"""Set up test fixtures."""
Runtime._instance = None
self.original_port = config.get_option("server.port")
config.set_option("server.port", 8700)
self.loop = asyncio.new_event_loop()
def tearDown(self) -> None:
"""Tear down test fixtures."""
Runtime._instance = None
config.set_option("server.port", self.original_port)
self.loop.close()
@pytest.fixture(autouse=True)
def setup_and_teardown(self) -> None:
"""Pytest fixture for setup and teardown."""
self.setUp()
yield
self.tearDown()
def _create_server(self) -> Server:
"""Create a Server instance for testing."""
server = Server("mock/script/path", is_hello=False)
server._runtime._eventloop = self.loop
return server
def _run_async(self, coro: Coroutine[Any, Any, None]) -> None:
"""Run an async coroutine in the test event loop."""
self.loop.run_until_complete(coro)
def test_start_returns_after_server_ready(self) -> None:
"""Test that start() returns after server is ready, not after shutdown.
This is critical for bootstrap.py which expects to run _on_server_start()
and set up signal handlers after start() returns.
"""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
start_returned = False
async def verify_start_returns() -> None:
nonlocal start_returned
await server._start_starlette()
# If we get here, start() returned (didn't block forever)
start_returned = True
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(verify_start_returns())
assert start_returned, "start() should return after server is ready"
uvicorn_instance.startup.assert_awaited_once()
def test_stop_signals_server_shutdown(self) -> None:
"""Test that stop() signals the uvicorn server to exit."""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
# Verify server is stored and can be stopped
assert server._starlette_server is not None
server.stop()
# Verify should_exit was set to True
assert uvicorn_instance.should_exit is True
def test_stopped_property_returns_awaitable(self) -> None:
"""Test that stopped property returns an awaitable for Starlette mode."""
import inspect
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
# Verify stopped returns an awaitable
stopped = server.stopped
assert stopped is not None
# Should be a coroutine or awaitable
assert inspect.iscoroutine(stopped) or hasattr(stopped, "__await__")
# Close the coroutine to avoid warning
if inspect.iscoroutine(stopped):
stopped.close()
def test_starlette_server_stored_on_server_instance(self) -> None:
"""Test that StarletteServer is stored on Server instance, not global."""
server1 = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.main_loop = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
uvicorn_server_cls.return_value = uvicorn_instance
# Before start, _starlette_server should be None
assert server1._starlette_server is None
self._run_async(server1._start_starlette())
# After start, _starlette_server should be set
assert server1._starlette_server is not None
# Verify it's instance-specific (not a module global)
from streamlit.web.server.starlette.starlette_server import UvicornServer
assert isinstance(server1._starlette_server, UvicornServer)
def test_raises_on_startup_failure(self) -> None:
"""Test that RuntimeError is raised when uvicorn startup fails."""
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
# Simulate startup failure by setting should_exit to True after startup
uvicorn_instance.should_exit = True
uvicorn_server_cls.return_value = uvicorn_instance
with pytest.raises(RuntimeError, match="Server startup failed"):
self._run_async(server._start_starlette())
def test_stopped_event_set_after_main_loop_completes(self) -> None:
"""Test that stopped event is set after the server main loop completes."""
from streamlit.web.server.starlette.starlette_server import UvicornServer
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.startup = AsyncMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
# Make main_loop complete immediately
uvicorn_instance.main_loop = AsyncMock(return_value=None)
uvicorn_server_cls.return_value = uvicorn_instance
self._run_async(server._start_starlette())
starlette_server: UvicornServer = server._starlette_server # type: ignore
assert starlette_server is not None
# Give the background task time to complete
self._run_async(asyncio.sleep(0.1))
# The stopped event should be set after main_loop completes
assert starlette_server.stopped.is_set()
def test_no_deadlock_on_task_cancellation(self) -> None:
"""Test that start() doesn't deadlock if task is cancelled during startup.
This tests a fix for a potential deadlock where CancelledError (which is
a BaseException, not Exception) would bypass the exception handler that
sets startup_complete, causing start() to hang forever on await
startup_complete.wait().
The fix ensures startup_complete is set in the finally block.
"""
from streamlit.web.server.starlette.starlette_server import UvicornServer
server = self._create_server()
mock_socket = mock.MagicMock(spec=socket.socket)
async def test_cancellation() -> None:
with (
patch(
"streamlit.web.server.starlette.starlette_server._bind_socket",
return_value=mock_socket,
),
patch("uvicorn.Server") as uvicorn_server_cls,
):
uvicorn_instance = mock.MagicMock()
uvicorn_instance.shutdown = AsyncMock()
uvicorn_instance.should_exit = False
# Make startup raise CancelledError to simulate task cancellation
uvicorn_instance.startup = AsyncMock(
side_effect=asyncio.CancelledError()
)
uvicorn_server_cls.return_value = uvicorn_instance
starlette_server = UvicornServer(server._runtime)
# This should raise CancelledError, not deadlock
with pytest.raises(asyncio.CancelledError):
await asyncio.wait_for(starlette_server.start(), timeout=2.0)
# The stopped event should still be set (cleanup happened)
assert starlette_server.stopped.is_set()
self._run_async(test_cancellation())
class TestUvicornRunner:
"""Tests for UvicornRunner class (sync blocking runner for st.App mode)."""
def test_run_calls_uvicorn_with_correct_args(self) -> None:
"""Test that run() calls uvicorn.run with correct arguments."""
with (
patch_config_options({"server.address": "0.0.0.0", "server.port": 8502}),
patch(
"streamlit.web.server.starlette.starlette_server._get_uvicorn_config_kwargs",
return_value={"ssl_certfile": None, "ssl_keyfile": None},
),
patch("uvicorn.run") as mock_uvicorn_run,
):
runner = UvicornRunner("myapp:app")
runner.run()
mock_uvicorn_run.assert_called_once()
call_kwargs = mock_uvicorn_run.call_args
assert call_kwargs[0][0] == "myapp:app"
assert call_kwargs[1]["host"] == "0.0.0.0"
assert call_kwargs[1]["port"] == 8502
def test_run_retries_on_port_in_use(self) -> None:
"""Test that run() retries on EADDRINUSE."""
call_count = 0
def mock_run(*args: Any, **kwargs: Any) -> None:
nonlocal call_count
call_count += 1
if call_count == 1:
raise OSError(errno.EADDRINUSE, "Address already in use")
# Second call succeeds
with (
patch_config_options({"server.address": "127.0.0.1", "server.port": 8501}),
patch(
"streamlit.web.server.starlette.starlette_server._get_uvicorn_config_kwargs",
return_value={},
),
patch(
"streamlit.web.server.starlette.starlette_server._is_port_manually_set",
return_value=False,
),
patch("uvicorn.run", side_effect=mock_run),
):
runner = UvicornRunner("myapp:app")
runner.run()
assert call_count == 2
def test_run_exits_when_port_manually_set_and_unavailable(self) -> None:
"""Test that run() exits when port is manually set and unavailable."""
with (
patch_config_options({"server.address": "127.0.0.1", "server.port": 8501}),
patch(
"streamlit.web.server.starlette.starlette_server._get_uvicorn_config_kwargs",
return_value={},
),
patch(
"streamlit.web.server.starlette.starlette_server._is_port_manually_set",
return_value=True,
),
patch(
"uvicorn.run",
side_effect=OSError(errno.EADDRINUSE, "Address already in use"),
),
pytest.raises(SystemExit),
):
runner = UvicornRunner("myapp:app")
runner.run()
def test_run_rejects_unix_sockets(self) -> None:
"""Test that run() raises for Unix socket addresses."""
with (
patch_config_options({"server.address": "unix://test.sock"}),
patch(
"streamlit.web.server.starlette.starlette_server._get_uvicorn_config_kwargs",
return_value={},
),
):
runner = UvicornRunner("myapp:app")
with pytest.raises(RuntimeError, match="Unix sockets are not supported"):
runner.run()
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_server_test.py",
"license": "Apache License 2.0",
"lines": 696,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_app.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starlette application for serving a Streamlit app."""
from __future__ import annotations
from contextlib import asynccontextmanager
from pathlib import Path
from typing import TYPE_CHECKING, Any, Final
from streamlit import config
from streamlit.web.server.server_util import get_cookie_secret
from streamlit.web.server.starlette.starlette_app_utils import (
generate_random_hex_string,
)
from streamlit.web.server.starlette.starlette_auth_routes import create_auth_routes
from streamlit.web.server.starlette.starlette_routes import (
BASE_ROUTE_COMPONENT,
BASE_ROUTE_CORE,
BASE_ROUTE_MEDIA,
BASE_ROUTE_UPLOAD_FILE,
create_app_static_serving_routes,
create_bidi_component_routes,
create_component_routes,
create_health_routes,
create_host_config_routes,
create_media_routes,
create_metrics_routes,
create_script_health_routes,
create_upload_routes,
)
from streamlit.web.server.starlette.starlette_server_config import (
GZIP_COMPRESSLEVEL,
GZIP_MINIMUM_SIZE,
SESSION_COOKIE_NAME,
)
from streamlit.web.server.starlette.starlette_static_routes import (
create_streamlit_static_assets_routes,
)
from streamlit.web.server.starlette.starlette_websocket import create_websocket_routes
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Callable, Mapping, Sequence
from contextlib import AbstractAsyncContextManager
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.routing import BaseRoute
from starlette.types import ExceptionHandler, Receive, Scope, Send
from streamlit.runtime import Runtime
from streamlit.runtime.media_file_manager import MediaFileManager
from streamlit.runtime.memory_media_file_storage import MemoryMediaFileStorage
from streamlit.runtime.memory_uploaded_file_manager import MemoryUploadedFileManager
# Reserved route prefixes that users cannot override
_RESERVED_ROUTE_PREFIXES: Final[tuple[str, ...]] = (
f"/{BASE_ROUTE_CORE}/",
f"/{BASE_ROUTE_MEDIA}/",
f"/{BASE_ROUTE_COMPONENT}/",
)
def create_streamlit_routes(runtime: Runtime) -> list[BaseRoute]:
"""Create the Streamlit-internal routes for the application.
This function creates all the routes required for Streamlit's core functionality
including WebSocket communication, health checks, media serving, file uploads,
and static file serving.
Parameters
----------
runtime
The Streamlit Runtime instance that manages the application state.
Returns
-------
list[BaseRoute]
A list of Starlette route objects for Streamlit's core functionality.
"""
# Extract runtime components
media_manager: MediaFileManager = runtime.media_file_mgr
upload_mgr: MemoryUploadedFileManager = runtime.uploaded_file_mgr # type: ignore
media_storage: MemoryMediaFileStorage = media_manager._storage # type: ignore
component_registry = runtime.component_registry
bidi_component_manager = runtime.bidi_component_registry
base_url = config.get_option("server.baseUrlPath")
dev_mode = bool(config.get_option("global.developmentMode"))
# Build routes list
routes: list[Any] = []
# Add core routes
routes.extend(create_health_routes(runtime, base_url))
routes.extend(create_metrics_routes(runtime, base_url))
routes.extend(create_host_config_routes(base_url))
routes.extend(create_media_routes(media_storage, base_url))
routes.extend(create_upload_routes(runtime, upload_mgr, base_url))
routes.extend(create_component_routes(component_registry, base_url))
routes.extend(create_bidi_component_routes(bidi_component_manager, base_url))
# Add WebSocket route:
routes.extend(create_websocket_routes(runtime, base_url))
# Add auth routes:
routes.extend(create_auth_routes(base_url))
# Add app static routes if enabled:
if config.get_option("server.enableStaticServing"):
# TODO(lukasmasuch): Expose main_script_path as property on runtime class
# or make the runtime config available so that we don't need to access the private
# attribute.
main_script_path = getattr(runtime, "_main_script_path", None)
routes.extend(create_app_static_serving_routes(main_script_path, base_url))
# Add script health check routes if enabled
if config.get_option("server.scriptHealthCheckEnabled"):
routes.extend(create_script_health_routes(runtime, base_url))
# Add static files mount (only in production mode):
if not dev_mode:
routes.extend(create_streamlit_static_assets_routes(base_url=base_url))
return routes
def create_streamlit_middleware() -> list[Middleware]:
"""Create the Streamlit-internal middleware stack.
This function creates the middleware required for Streamlit's core functionality
including path security, session management, and GZip compression.
Returns
-------
list[Middleware]
A list of Starlette Middleware objects for Streamlit's core functionality.
"""
from starlette.middleware import Middleware
from starlette.middleware.sessions import SessionMiddleware
from streamlit.web.server.starlette.starlette_gzip_middleware import (
MediaAwareGZipMiddleware,
)
from streamlit.web.server.starlette.starlette_path_security_middleware import (
PathSecurityMiddleware,
)
middleware: list[Middleware] = []
# FIRST: Path security middleware to block dangerous paths before any other processing.
middleware.append(Middleware(PathSecurityMiddleware))
# Add session middleware
middleware.append(
Middleware(
SessionMiddleware, # ty: ignore[invalid-argument-type]
secret_key=get_cookie_secret() or generate_random_hex_string(),
same_site="lax",
https_only=bool(config.get_option("server.sslCertFile")),
session_cookie=SESSION_COOKIE_NAME,
)
)
# Add GZip compression middleware.
# We use a custom MediaAwareGZipMiddleware that excludes audio/video content
# from compression. Compressing binary media content breaks playback in browsers,
# especially with range requests. Using a custom middleware instead of setting
# Content-Encoding: identity provides better browser compatibility, as some
# browsers (especially WebKit) have issues with explicit identity encoding.
middleware.append(
Middleware(
MediaAwareGZipMiddleware,
minimum_size=GZIP_MINIMUM_SIZE,
compresslevel=GZIP_COMPRESSLEVEL,
)
)
return middleware
def create_starlette_app(runtime: Runtime) -> Starlette:
"""Create a Starlette application for serving Streamlit.
This factory function creates a fully configured Starlette app that provides
the full web-server functionality required for Streamlit:
- WebSocket endpoint for client-server communication
- Health check endpoints
- Media file serving with range request support
- File upload handling
- Custom component serving
- Static file serving with SPA fallback
- XSRF protection
- Session middleware
- GZip compression
"""
try:
from starlette.applications import Starlette
except ModuleNotFoundError as exc: # pragma: no cover - import guard
raise RuntimeError(
"Starlette is not installed. Run `pip install streamlit[starlette]` "
"or disable `server.useStarlette`."
) from exc
# Define lifespan context manager for startup/shutdown events
@asynccontextmanager
async def _lifespan(_app: Starlette) -> AsyncIterator[None]:
# Startup
await runtime.start()
yield
# Shutdown
runtime.stop()
# Get routes and middleware from helper functions
routes = create_streamlit_routes(runtime)
middleware = create_streamlit_middleware()
# Create the Starlette application with lifespan handler
return Starlette(routes=routes, middleware=middleware, lifespan=_lifespan)
class App:
"""ASGI-compatible Streamlit application.
.. warning::
This feature is experimental and may change or be removed in future
versions without warning. Use at your own risk.
This class provides a way to configure and run Streamlit applications
with custom routes, middleware, lifespan hooks, and exception handlers.
Parameters
----------
script_path : str | Path
Path to the main Streamlit script. Can be absolute or relative. Relative
paths are resolved based on context: when started via ``streamlit run``,
they resolve relative to the main script; when started directly via uvicorn
or another ASGI server, they resolve relative to the current working directory.
lifespan : Callable[[App], AbstractAsyncContextManager[dict[str, Any] | None]] | None
Async context manager for startup/shutdown logic. The context manager
receives the App instance and can yield a dictionary of state that will
be accessible via ``app.state``.
routes : Sequence[BaseRoute] | None
Additional routes to mount alongside Streamlit. User routes are checked
against reserved Streamlit routes and will raise ValueError if they conflict.
middleware : Sequence[Middleware] | None
Middleware stack to apply to all requests. User middleware runs before
Streamlit's internal middleware.
exception_handlers : Mapping[Any, ExceptionHandler] | None
Custom exception handlers for user routes.
debug : bool
Enable debug mode for the underlying Starlette application.
Examples
--------
Basic usage:
>>> from streamlit.web.server.starlette import App
>>> app = App("main.py")
With lifespan hooks:
>>> from contextlib import asynccontextmanager
>>> from streamlit.web.server.starlette import App
>>>
>>> @asynccontextmanager
... async def lifespan(app):
... print("Starting up...")
... yield {"model": "loaded"}
... print("Shutting down...")
>>>
>>> app = App("main.py", lifespan=lifespan)
With custom routes:
>>> from starlette.routing import Route
>>> from starlette.responses import JSONResponse
>>> from streamlit.web.server.starlette import App
>>>
>>> async def health(request):
... return JSONResponse({"status": "ok"})
>>>
>>> app = App("main.py", routes=[Route("/health", health)])
"""
def __init__(
self,
script_path: str | Path,
*,
lifespan: (
Callable[[App], AbstractAsyncContextManager[dict[str, Any] | None]] | None
) = None,
routes: Sequence[BaseRoute] | None = None,
middleware: Sequence[Middleware] | None = None,
exception_handlers: Mapping[Any, ExceptionHandler] | None = None,
debug: bool = False,
) -> None:
self._script_path = Path(script_path)
self._user_lifespan = lifespan
self._user_routes = list(routes) if routes else []
self._user_middleware = list(middleware) if middleware else []
self._exception_handlers = (
dict(exception_handlers) if exception_handlers else {}
)
self._debug = debug
self._runtime: Runtime | None = None
self._starlette_app: Starlette | None = None
self._state: dict[str, Any] = {}
self._external_lifespan: bool = False
# Validate user routes don't conflict with reserved routes
self._validate_routes()
def _validate_routes(self) -> None:
"""Validate that user routes don't conflict with reserved Streamlit routes."""
for route in self._user_routes:
path = getattr(route, "path", None)
if path:
for reserved in _RESERVED_ROUTE_PREFIXES:
if path.startswith(reserved) or path == reserved.rstrip("/"):
raise ValueError(
f"Route '{path}' conflicts with reserved Streamlit route "
f"prefix '{reserved}'. Use a different path like '/api/...'."
)
@property
def script_path(self) -> Path:
"""The entry point script path."""
return self._script_path
@property
def state(self) -> dict[str, Any]:
"""Application state, populated by lifespan context manager."""
return self._state
def lifespan(self) -> Callable[[Any], AbstractAsyncContextManager[None]]:
"""Get a lifespan context manager for mounting on external ASGI frameworks.
Use this when mounting st.App as a sub-application on another framework
like FastAPI. The Streamlit runtime lifecycle will be managed by the
parent framework's lifespan instead of st.App's internal lifespan.
Returns
-------
Callable[[Any], AbstractAsyncContextManager[None]]
A lifespan context manager compatible with Starlette/FastAPI.
Examples
--------
Mount st.App on FastAPI:
>>> from fastapi import FastAPI
>>> from streamlit.starlette import App
>>>
>>> streamlit_app = App("dashboard.py")
>>> fastapi_app = FastAPI(lifespan=streamlit_app.lifespan())
>>> fastapi_app.mount("/dashboard", streamlit_app)
"""
# Create runtime now (but don't start it - lifespan will do that)
if self._runtime is None:
self._runtime = self._create_runtime()
# Mark that lifespan is externally managed
self._external_lifespan = True
return self._combined_lifespan
def _resolve_script_path(self) -> Path:
"""Resolve the script path to an absolute path.
Resolution order:
1. If already absolute, return as-is
2. If CLI set main_script_path (via `streamlit run`), resolve relative to it
3. Otherwise, resolve relative to current working directory (e.g. when started via uvicorn)
"""
if self._script_path.is_absolute():
return self._script_path
# Check if CLI set the main script path (streamlit run)
# This is set in cli.py before config is loaded
if config._main_script_path:
return (Path(config._main_script_path).parent / self._script_path).resolve()
# Fallback: resolve relative to cwd (direct uvicorn usage)
return self._script_path.resolve()
def _create_runtime(self) -> Runtime:
"""Create the Streamlit runtime (but don't start it yet)."""
from streamlit.runtime import Runtime, RuntimeConfig
from streamlit.runtime.memory_media_file_storage import MemoryMediaFileStorage
from streamlit.runtime.memory_session_storage import MemorySessionStorage
from streamlit.runtime.memory_uploaded_file_manager import (
MemoryUploadedFileManager,
)
from streamlit.web.cache_storage_manager_config import (
create_default_cache_storage_manager,
)
script_path = self._resolve_script_path()
# Validate that the script file exists
if not script_path.is_file():
raise FileNotFoundError(
f"Streamlit script not found: '{script_path}'. "
f"Please verify that the path '{self._script_path}' is correct."
)
media_file_storage = MemoryMediaFileStorage(f"/{BASE_ROUTE_MEDIA}")
uploaded_file_mgr = MemoryUploadedFileManager(f"/{BASE_ROUTE_UPLOAD_FILE}")
return Runtime(
RuntimeConfig(
script_path=str(script_path),
media_file_storage=media_file_storage,
uploaded_file_manager=uploaded_file_mgr,
cache_storage_manager=create_default_cache_storage_manager(),
is_hello=False,
session_storage=MemorySessionStorage(
ttl_seconds=config.get_option("server.disconnectedSessionTTL")
),
),
)
@asynccontextmanager
async def _combined_lifespan(self, _app: Starlette) -> AsyncIterator[None]:
"""Combine Streamlit runtime lifecycle with user's lifespan.
The runtime must already be created (via _create_runtime) before this
lifespan runs. This lifespan handles starting and stopping the runtime.
"""
from streamlit.web.bootstrap import prepare_streamlit_environment
if self._runtime is None:
raise RuntimeError(
"Runtime not initialized. Call _create_runtime before lifespan."
)
# Set server mode for metrics tracking.
# We need to detect if the app is mounted on another framework (FastAPI, etc.)
# based on the _external_lifespan flag, which is set when lifespan() is called.
if self._external_lifespan:
# App is mounted on another framework - this takes precedence over CLI mode
# because it reflects the actual architectural pattern being used.
config._server_mode = "asgi-mounted"
elif config._server_mode is None:
# Standalone st.App started directly via external ASGI server (not CLI)
config._server_mode = "asgi-server"
# If config._server_mode is already "starlette-app" (set by CLI) and
# _external_lifespan is False, keep it as "starlette-app"
# Prepare the Streamlit environment (secrets, pydeck, static folder check)
# Use resolved path to ensure correct directory for static folder check
prepare_streamlit_environment(str(self._resolve_script_path()))
# Start runtime (enables full cache support)
await self._runtime.start()
try:
# Run user's lifespan
if self._user_lifespan:
async with self._user_lifespan(self) as state:
if state:
self._state.update(state)
yield
else:
yield
finally:
# Stop runtime
self._runtime.stop()
def _build_starlette_app(self) -> Starlette:
"""Build the Starlette application with all routes and middleware."""
from starlette.applications import Starlette
from streamlit.runtime import RuntimeState
# If lifespan() was called, the parent framework manages the lifecycle.
# Check if the runtime was actually started by the parent framework.
# If not, the user likely called lifespan() but then used the app standalone,
# which would result in the runtime never starting.
if self._external_lifespan:
runtime_not_started = (
self._runtime is None or self._runtime.state == RuntimeState.INITIAL
)
if runtime_not_started:
raise RuntimeError(
"Cannot use App as standalone ASGI application after calling "
"lifespan(). The lifespan() method should only be used when "
"mounting this App on another ASGI framework like FastAPI."
)
# Create the runtime if not already created
if self._runtime is None:
self._runtime = self._create_runtime()
# Get Streamlit's internal routes
streamlit_routes = create_streamlit_routes(self._runtime)
# User routes come first (higher priority), then Streamlit routes
# This allows users to override non-reserved routes like static files
all_routes = self._user_routes + streamlit_routes
# Get Streamlit's internal middleware
streamlit_middleware = create_streamlit_middleware()
# User middleware wraps Streamlit middleware (runs first on request,
# last on response)
all_middleware = self._user_middleware + streamlit_middleware
# If external lifespan, the parent manages lifecycle; otherwise use internal
app_lifespan = None if self._external_lifespan else self._combined_lifespan
return Starlette(
debug=self._debug,
routes=all_routes,
middleware=all_middleware,
exception_handlers=self._exception_handlers,
lifespan=app_lifespan,
)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
"""ASGI interface."""
if self._starlette_app is None:
self._starlette_app = self._build_starlette_app()
await self._starlette_app(scope, receive, send)
__all__ = ["App", "create_starlette_app"]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_app.py",
"license": "Apache License 2.0",
"lines": 450,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_app_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import asyncio
import json
from contextlib import asynccontextmanager
from http import HTTPStatus
from pathlib import Path
from typing import TYPE_CHECKING, Any
import pytest
from starlette.middleware import Middleware
from starlette.responses import JSONResponse
from starlette.routing import Route
from starlette.testclient import TestClient
from streamlit import file_util
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.runtime.media_file_manager import MediaFileManager, MediaFileMetadata
from streamlit.runtime.media_file_storage import MediaFileKind
from streamlit.runtime.memory_media_file_storage import MemoryMediaFileStorage
from streamlit.runtime.memory_uploaded_file_manager import MemoryUploadedFileManager
from streamlit.runtime.stats import CacheStat, CounterStat, GaugeStat
from streamlit.runtime.uploaded_file_manager import UploadedFileRec
from streamlit.web.server.routes import STATIC_ASSET_CACHE_MAX_AGE_SECONDS
from streamlit.web.server.starlette import starlette_app_utils
from streamlit.web.server.starlette.starlette_app import (
_RESERVED_ROUTE_PREFIXES,
App,
create_starlette_app,
)
from streamlit.web.server.stats_request_handler import StatsRequestHandler
from tests.testutil import patch_config_options
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from starlette.requests import Request
class _DummyStatsManager:
def __init__(self) -> None:
self._stats: dict[str, list[CacheStat | CounterStat | GaugeStat]] = {
"cache_memory_bytes": [CacheStat("test_cache", "", 1)],
"session_events_total": [
CounterStat(
family_name="session_events_total",
value=5,
labels={"type": "connect"},
help="Total count of session events by type.",
)
],
"active_sessions": [
GaugeStat(
family_name="active_sessions",
value=3,
help="Current number of active sessions.",
)
],
}
def get_stats(
self, family_names: list[str] | None = None
) -> dict[str, list[CacheStat | CounterStat | GaugeStat]]:
if family_names is None:
return self._stats
return {k: self._stats.get(k, []) for k in family_names}
class _DummyComponentRegistry:
def __init__(self) -> None:
self._paths: dict[str, str] = {}
def register(self, name: str, path: str) -> None:
self._paths[name] = path
def get_component_path(self, name: str) -> str | None:
return self._paths.get(name)
class _DummyBidiComponentRegistry:
def __init__(self) -> None:
self._paths: dict[str, str] = {}
def register(self, name: str, path: str) -> None:
self._paths[name] = path
def get(self, name: str) -> str | None:
return self._paths.get(name)
def get_component_path(self, name: str) -> str | None:
return self._paths.get(name)
class _DummyRuntime:
def __init__(self, component_dir: Path) -> None:
self.media_file_mgr = MediaFileManager(MemoryMediaFileStorage("/media"))
self.uploaded_file_mgr = MemoryUploadedFileManager("/_stcore/upload_file")
self.component_registry = _DummyComponentRegistry()
self.component_registry.register("comp", str(component_dir))
self.bidi_component_registry = _DummyBidiComponentRegistry()
self.bidi_component_registry.register("comp", str(component_dir))
self.stats_mgr = _DummyStatsManager()
self._active_sessions: set[str] = {"session123"}
self.stopped = False
self.last_backmsg = None
self.last_user_info: dict[str, str | bool | None] | None = None
self.last_existing_session_id: str | None = None
self.script_health = (True, "ok")
@property
def is_ready_for_browser_connection(self) -> asyncio.Future[tuple[bool, str]]:
loop = asyncio.get_event_loop()
fut: asyncio.Future[tuple[bool, str]] = loop.create_future()
fut.set_result((True, "ok"))
return fut
def does_script_run_without_error(self) -> asyncio.Future[tuple[bool, str]]:
loop = asyncio.get_event_loop()
fut: asyncio.Future[tuple[bool, str]] = loop.create_future()
fut.set_result(self.script_health)
return fut
def is_active_session(self, session_id: str) -> bool:
return session_id in self._active_sessions
def connect_session(
self,
client: object,
user_info: dict[str, str | bool | None],
existing_session_id: str | None = None,
session_id_override: str | None = None,
) -> str:
session_id = existing_session_id or session_id_override or "session-new"
self._active_sessions.add(session_id)
self.last_user_info = dict(user_info)
self.last_existing_session_id = existing_session_id
return session_id
def disconnect_session(self, session_id: str) -> None:
self._active_sessions.discard(session_id)
def handle_backmsg(self, session_id: str, msg: object) -> None:
self.last_backmsg = (session_id, msg)
def handle_backmsg_deserialization_exception(
self, session_id: str, exc: BaseException
) -> None:
self.last_backmsg = (session_id, exc)
async def start(self) -> None: # pragma: no cover - lifecycle stub
return None
def stop(self) -> None: # pragma: no cover - lifecycle stub
self.stopped = True
@pytest.fixture
def starlette_client(tmp_path: Path) -> Iterator[tuple[TestClient, _DummyRuntime]]:
static_dir = tmp_path / "static"
static_dir.mkdir()
# Starlette's StaticFiles requires index.html to exist when html=True
(static_dir / "index.html").write_text("<html>test</html>")
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
(component_dir / "bundle.js").write_text("console.log('component');")
with patch_config_options(
{
"server.baseUrlPath": "",
"global.developmentMode": False,
# Disable XSRF for basic tests (matches Tornado test behavior)
"server.enableXsrfProtection": False,
}
):
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
with TestClient(app) as client:
yield client, runtime
monkeypatch.undo()
def test_health_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None:
"""Test that the health endpoint returns 200 with 'ok' message."""
client, _ = starlette_client
response = client.get("/_stcore/health")
assert response.status_code == 200
assert response.text == "ok"
def test_metrics_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None:
"""Test that the metrics endpoint returns stats in text format."""
client, _ = starlette_client
response = client.get("/_stcore/metrics")
assert response.status_code == 200
assert "cache_memory_bytes" in response.text
assert "session_events_total" in response.text
assert "active_sessions" in response.text
def test_metrics_endpoint_filters_single_family(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that the metrics endpoint filters by a single family."""
client, _ = starlette_client
response = client.get("/_stcore/metrics?families=session_events_total")
assert response.status_code == 200
assert "session_events_total" in response.text
assert "cache_memory_bytes" not in response.text
assert "# TYPE active_sessions" not in response.text
def test_metrics_endpoint_filters_multiple_families(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that the metrics endpoint filters by multiple families."""
client, _ = starlette_client
response = client.get(
"/_stcore/metrics?families=session_events_total&families=active_sessions"
)
assert response.status_code == 200
assert "session_events_total" in response.text
assert "active_sessions" in response.text
assert "cache_memory_bytes" not in response.text
def test_metrics_endpoint_unknown_family_returns_eof(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that unknown family returns only EOF marker."""
client, _ = starlette_client
response = client.get("/_stcore/metrics?families=unknown_family")
assert response.status_code == 200
assert response.text.strip() == "# EOF"
def test_metrics_endpoint_protobuf(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that the metrics endpoint returns stats in protobuf format when requested."""
client, runtime = starlette_client
expected = runtime.stats_mgr.get_stats()
response = client.get(
"/_stcore/metrics",
headers={"Accept": "application/x-protobuf"},
)
assert response.status_code == 200
assert response.headers["content-type"] == "application/x-protobuf"
expected_proto = StatsRequestHandler._stats_to_proto(expected).SerializeToString()
assert response.content == expected_proto
def test_media_endpoint_serves_file(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that the media endpoint serves files correctly."""
client, runtime = starlette_client
storage = runtime.media_file_mgr._storage
file_id = storage.load_and_get_id(
b"data", "text/plain", MediaFileKind.MEDIA, "foo.txt"
)
runtime.media_file_mgr._file_metadata[file_id] = MediaFileMetadata(
MediaFileKind.MEDIA
)
media_url = storage.get_url(file_id)
response = client.get(media_url)
assert response.status_code == 200
assert response.content == b"data"
def test_media_endpoint_download_headers(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that downloadable files have Content-Disposition attachment header."""
client, runtime = starlette_client
storage = runtime.media_file_mgr._storage
file_id = storage.load_and_get_id(
b"binary",
"application/octet-stream",
MediaFileKind.DOWNLOADABLE,
"fancy name.bin",
)
runtime.media_file_mgr._file_metadata[file_id] = MediaFileMetadata(
MediaFileKind.DOWNLOADABLE
)
media_url = storage.get_url(file_id)
response = client.get(media_url)
assert response.status_code == 200
assert (
response.headers["Content-Disposition"]
== 'attachment; filename="fancy name.bin"'
)
def test_media_endpoint_supports_range_requests(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Ensure the media endpoint serves byte ranges for streaming clients."""
client, runtime = starlette_client
storage = runtime.media_file_mgr._storage
file_id = storage.load_and_get_id(
b"abcdefghij", "video/mp4", MediaFileKind.MEDIA, "clip.mp4"
)
runtime.media_file_mgr._file_metadata[file_id] = MediaFileMetadata(
MediaFileKind.MEDIA
)
media_url = storage.get_url(file_id)
response = client.get(media_url, headers={"Range": "bytes=2-5"})
assert response.status_code == HTTPStatus.PARTIAL_CONTENT
assert response.content == b"cdef"
assert response.headers["Content-Range"] == "bytes 2-5/10"
assert response.headers["Accept-Ranges"] == "bytes"
def test_media_endpoint_rejects_invalid_ranges(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Ensure the media endpoint rejects unsatisfiable range headers."""
client, runtime = starlette_client
storage = runtime.media_file_mgr._storage
file_id = storage.load_and_get_id(
b"abcd", "video/mp4", MediaFileKind.MEDIA, "clip.mp4"
)
runtime.media_file_mgr._file_metadata[file_id] = MediaFileMetadata(
MediaFileKind.MEDIA
)
media_url = storage.get_url(file_id)
response = client.get(media_url, headers={"Range": "bytes=50-60"})
assert response.status_code == HTTPStatus.REQUESTED_RANGE_NOT_SATISFIABLE
assert response.headers["Content-Range"] == "bytes */4"
def test_media_endpoint_supports_head_requests(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Ensure the media endpoint supports HEAD requests for browser probing."""
client, runtime = starlette_client
storage = runtime.media_file_mgr._storage
file_id = storage.load_and_get_id(
b"abcdefghij", "video/mp4", MediaFileKind.MEDIA, "clip.mp4"
)
runtime.media_file_mgr._file_metadata[file_id] = MediaFileMetadata(
MediaFileKind.MEDIA
)
media_url = storage.get_url(file_id)
response = client.head(media_url)
assert response.status_code == 200
assert response.headers["Content-Length"] == "10"
assert response.headers["Accept-Ranges"] == "bytes"
assert response.headers["Content-Type"] == "video/mp4"
# HEAD requests should not return body
assert response.content == b""
def test_media_endpoint_no_content_encoding_for_video(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Ensure video files are not gzip-compressed."""
client, runtime = starlette_client
storage = runtime.media_file_mgr._storage
file_id = storage.load_and_get_id(
b"video-data", "video/mp4", MediaFileKind.MEDIA, "clip.mp4"
)
runtime.media_file_mgr._file_metadata[file_id] = MediaFileMetadata(
MediaFileKind.MEDIA
)
media_url = storage.get_url(file_id)
response = client.get(media_url)
assert response.status_code == 200
# Media routes use Content-Encoding: identity to prevent gzip compression.
# Both None and "identity" indicate no encoding is applied.
assert response.headers.get("Content-Encoding") in {None, "identity"}
def test_media_endpoint_no_content_encoding_for_audio(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Ensure audio files are not gzip-compressed."""
client, runtime = starlette_client
storage = runtime.media_file_mgr._storage
file_id = storage.load_and_get_id(
b"audio-data", "audio/mpeg", MediaFileKind.MEDIA, "sound.mp3"
)
runtime.media_file_mgr._file_metadata[file_id] = MediaFileMetadata(
MediaFileKind.MEDIA
)
media_url = storage.get_url(file_id)
response = client.get(media_url)
assert response.status_code == 200
# Media routes use Content-Encoding: identity to prevent gzip compression.
# Both None and "identity" indicate no encoding is applied.
assert response.headers.get("Content-Encoding") in {None, "identity"}
def test_media_endpoint_no_content_encoding_for_range_requests(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Ensure video range requests are not gzip-compressed."""
client, runtime = starlette_client
storage = runtime.media_file_mgr._storage
file_id = storage.load_and_get_id(
b"video-data-here", "video/mp4", MediaFileKind.MEDIA, "clip.mp4"
)
runtime.media_file_mgr._file_metadata[file_id] = MediaFileMetadata(
MediaFileKind.MEDIA
)
media_url = storage.get_url(file_id)
response = client.get(media_url, headers={"Range": "bytes=0-4"})
assert response.status_code == HTTPStatus.PARTIAL_CONTENT
# Range requests for media don't include Content-Encoding
assert response.headers.get("Content-Encoding") in {None, "identity"}
def test_upload_put_adds_file(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that file uploads are stored correctly."""
client, runtime = starlette_client
response = client.put(
"_stcore/upload_file/session123/fileid",
files={"file": ("foo.txt", b"payload", "text/plain")},
)
assert response.status_code == 204
stored = runtime.uploaded_file_mgr.file_storage["session123"]["fileid"]
assert stored.data == b"payload"
def test_upload_put_enforces_max_size(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that uploads exceeding server.maxUploadSize are rejected."""
client, _ = starlette_client
# Configure small max size (1MB)
with patch_config_options({"server.maxUploadSize": 1}):
# 1. Check Content-Length header rejection
response = client.put(
"_stcore/upload_file/session123/fileid",
files={"file": ("foo.txt", b"x" * (1024 * 1024 + 100), "text/plain")},
# TestClient automatically sets Content-Length
)
assert response.status_code == 413
assert response.text == "File too large"
def test_component_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None:
"""Test that custom component files are served correctly."""
client, _ = starlette_client
response = client.get("/component/comp/index.html")
assert response.status_code == 200
assert response.text == "component"
def test_component_endpoint_sets_content_type(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Ensure the component endpoint sends the correct MIME type for JS assets."""
client, _ = starlette_client
response = client.get("/component/comp/bundle.js")
assert response.status_code == 200
assert response.headers["content-type"] is not None
assert "javascript" in response.headers["content-type"]
def test_bidi_component_endpoint(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test the bidirectional component endpoint."""
client, _ = starlette_client
response = client.get("/_stcore/bidi-components/comp/index.html")
assert response.status_code == 200
assert response.text == "component"
def test_script_health_endpoint(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test the script health check endpoint."""
client, runtime = starlette_client
# Default enabled
with patch_config_options({"server.scriptHealthCheckEnabled": True}):
# Re-create app to apply config change
app = create_starlette_app(runtime)
with TestClient(app) as client:
response = client.get("/_stcore/script-health-check")
assert response.status_code == 200
assert response.text == "ok"
# Simulate failure
runtime.script_health = (False, "error")
response = client.get("/_stcore/script-health-check")
assert response.status_code == 503
assert response.text == "error"
def test_websocket_rejects_text_frames(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that the WebSocket endpoint rejects text frames."""
client, _ = starlette_client
# Starlette's receive_bytes() raises KeyError when text frame is received
# instead of binary, because the message dict contains "text" not "bytes".
with pytest.raises(KeyError):
with client.websocket_connect("/_stcore/stream") as websocket:
# Sending a text frame should fail - endpoint expects binary protobufs
websocket.send_text("Hello")
websocket.receive_text()
def test_upload_delete_removes_file(
starlette_client: tuple[TestClient, _DummyRuntime],
) -> None:
"""Test that file deletions remove files from storage."""
client, runtime = starlette_client
runtime.uploaded_file_mgr.file_storage.setdefault("session123", {})["fileid"] = (
UploadedFileRec(
file_id="fileid",
name="foo.txt",
type="text/plain",
data=b"payload",
)
)
response = client.delete("/_stcore/upload_file/session123/fileid")
assert response.status_code == 204
assert "fileid" not in runtime.uploaded_file_mgr.file_storage["session123"]
@patch_config_options(
{"server.enableXsrfProtection": True, "global.developmentMode": False}
)
def test_upload_rejects_without_xsrf_token(tmp_path: Path) -> None:
"""Test that uploads are rejected without a valid XSRF token when protection is enabled."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
# PUT without XSRF token should fail
response = client.put(
"_stcore/upload_file/session123/fileid",
files={"file": ("foo.txt", b"payload", "text/plain")},
)
assert response.status_code == 403
assert "XSRF" in response.text
# DELETE without XSRF token should fail
response = client.delete("_stcore/upload_file/session123/fileid")
assert response.status_code == 403
assert "XSRF" in response.text
monkeypatch.undo()
@patch_config_options(
{"server.enableXsrfProtection": True, "global.developmentMode": False}
)
def test_upload_accepts_with_valid_xsrf_token(tmp_path: Path) -> None:
"""Test that uploads succeed with a valid XSRF token when protection is enabled."""
from streamlit.web.server.starlette import starlette_app_utils
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
# Generate a valid XSRF token
xsrf_token = starlette_app_utils.generate_xsrf_token_string()
client.cookies.set("_streamlit_xsrf", xsrf_token)
# PUT with valid XSRF token should succeed
response = client.put(
"_stcore/upload_file/session123/fileid",
files={"file": ("foo.txt", b"payload", "text/plain")},
headers={"X-Xsrftoken": xsrf_token},
)
assert response.status_code == 204
monkeypatch.undo()
@patch_config_options({"global.developmentMode": False})
def test_host_config_excludes_localhost_when_not_dev(tmp_path: Path) -> None:
"""Test that localhost is excluded from allowed origins in production mode."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
response = client.get("/_stcore/host-config")
assert response.status_code == HTTPStatus.OK
body = response.json()
assert "http://localhost" not in body["allowedOrigins"]
monkeypatch.undo()
@patch_config_options({"global.developmentMode": True})
def test_host_config_includes_localhost_in_dev(tmp_path: Path) -> None:
"""Test that localhost is included in allowed origins in development mode."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
response = client.get("/_stcore/host-config")
assert response.status_code == HTTPStatus.OK
body = response.json()
assert "http://localhost" in body["allowedOrigins"]
monkeypatch.undo()
@patch_config_options(
{
"global.developmentMode": False,
"client.allowedOrigins": [
"https://custom.example.com",
"https://another.example.com",
],
}
)
def test_host_config_custom_allowed_origins(tmp_path: Path) -> None:
"""Test that custom client.allowedOrigins values are used."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
response = client.get("/_stcore/host-config")
assert response.status_code == HTTPStatus.OK
body = response.json()
assert body["allowedOrigins"] == [
"https://custom.example.com",
"https://another.example.com",
]
# Verify defaults are NOT included when custom values are set
assert "https://*.streamlit.app" not in body["allowedOrigins"]
monkeypatch.undo()
@patch_config_options(
{
"global.developmentMode": False,
"client.allowedOrigins": [],
}
)
def test_host_config_empty_allowed_origins(tmp_path: Path) -> None:
"""Test that empty client.allowedOrigins results in empty list."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
response = client.get("/_stcore/host-config")
assert response.status_code == HTTPStatus.OK
body = response.json()
assert body["allowedOrigins"] == []
monkeypatch.undo()
@patch_config_options(
{
"global.developmentMode": True,
"client.allowedOrigins": [
"https://custom.example.com",
"https://another.example.com",
],
}
)
def test_host_config_custom_origins_with_dev_mode(tmp_path: Path) -> None:
"""Test that localhost is appended to custom origins in dev mode."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
response = client.get("/_stcore/host-config")
assert response.status_code == HTTPStatus.OK
body = response.json()
# Custom origins should be present
assert "https://custom.example.com" in body["allowedOrigins"]
assert "https://another.example.com" in body["allowedOrigins"]
# localhost should be appended in dev mode
assert "http://localhost" in body["allowedOrigins"]
monkeypatch.undo()
@patch_config_options({"global.developmentMode": True})
def test_static_files_skipped_in_dev_mode(tmp_path: Path) -> None:
"""Test that static file serving is skipped in development mode."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
# Static mount should be absent; Starlette returns 404 for root request.
response = client.get("/")
assert response.status_code == HTTPStatus.NOT_FOUND
@patch_config_options(
{
"server.enableXsrfProtection": True,
"global.developmentMode": False,
"server.cookieSecret": "test-signing-secret",
}
)
def test_websocket_auth_cookie_yields_user_info(tmp_path: Path) -> None:
"""Test that auth cookies are properly parsed when valid XSRF token is provided."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
# Create auth cookie payload
cookie_payload = json.dumps(
{
"origin": "http://testserver",
"is_logged_in": True,
"email": "user@example.com",
}
)
cookie_value = starlette_app_utils.create_signed_value(
"test-signing-secret",
"_streamlit_user",
cookie_payload,
)
# Generate a valid XSRF token (same token for cookie and subprotocol)
xsrf_token = starlette_app_utils.generate_xsrf_token_string()
# Set both cookies
client.cookies.set("_streamlit_user", cookie_value.decode("utf-8"))
client.cookies.set("_streamlit_xsrf", xsrf_token)
# Connect with XSRF token in subprotocol (second position)
with client.websocket_connect(
"/_stcore/stream",
headers={"Origin": "http://testserver"},
subprotocols=["streamlit", xsrf_token],
) as websocket:
websocket.close(code=1000)
assert runtime.last_user_info is not None
assert runtime.last_user_info.get("is_logged_in") is True
assert runtime.last_user_info.get("email") == "user@example.com"
monkeypatch.undo()
@patch_config_options({"server.enableXsrfProtection": False})
def test_websocket_accepts_existing_session(tmp_path: Path) -> None:
"""Test that WebSocket reconnection with existing session ID works."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
runtime = _DummyRuntime(component_dir)
runtime._active_sessions.add("existing-456")
app = create_starlette_app(runtime)
client = TestClient(app)
with client.websocket_connect(
"_stcore/stream", subprotocols=["streamlit", "unused", "existing-456"]
) as websocket:
websocket.close(code=1000)
assert runtime.last_existing_session_id == "existing-456"
@patch_config_options({"global.developmentMode": False})
def test_static_files_fall_back_to_index(tmp_path: Path) -> None:
"""Ensure unknown paths return index.html so multipage routes work."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>home</html>")
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
with TestClient(app) as client:
response = client.get("/page/does/not/exist")
assert response.status_code == HTTPStatus.OK
assert response.text == "<html>home</html>"
assert response.headers["cache-control"] == "no-cache"
monkeypatch.undo()
@patch_config_options({"global.developmentMode": False})
def test_static_files_apply_cache_headers(tmp_path: Path) -> None:
"""Ensure hashed static assets receive long-lived cache headers."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>home</html>")
(static_dir / "app.123456.js").write_text("console.log('test')")
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
with TestClient(app) as client:
response = client.get("/app.123456.js")
assert response.status_code == HTTPStatus.OK
assert (
response.headers["cache-control"]
== f"public, immutable, max-age={STATIC_ASSET_CACHE_MAX_AGE_SECONDS}"
)
monkeypatch.undo()
@patch_config_options(
{
"server.enableXsrfProtection": True,
"global.developmentMode": False,
"server.cookieSecret": "test-signing-secret",
}
)
def test_websocket_rejects_auth_cookie_without_valid_xsrf(tmp_path: Path) -> None:
"""Test that auth cookies are not parsed without valid XSRF token."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
# Create a valid auth cookie using Starlette's signing (itsdangerous-based)
cookie_payload = json.dumps(
{
"origin": "http://testserver",
"is_logged_in": True,
"email": "user@example.com",
}
)
cookie_value = starlette_app_utils.create_signed_value(
"test-signing-secret",
"_streamlit_user",
cookie_payload,
)
# Set auth cookie but no XSRF cookie
client.cookies.set("_streamlit_user", cookie_value.decode("utf-8"))
# Connect without providing XSRF token in subprotocol
with client.websocket_connect(
"/_stcore/stream",
headers={"Origin": "http://testserver"},
subprotocols=["streamlit"], # No XSRF token in second position
) as websocket:
websocket.close(code=1000)
# User info should NOT include auth data because XSRF validation failed
assert runtime.last_user_info is not None
assert runtime.last_user_info.get("is_logged_in") is not True
assert runtime.last_user_info.get("email") is None
monkeypatch.undo()
@patch_config_options(
{
"global.developmentMode": False,
"global.e2eTest": False,
"server.enableXsrfProtection": False,
}
)
def test_websocket_ignores_debug_disconnect_in_production(tmp_path: Path) -> None:
"""Test that debug_disconnect_websocket is ignored in production mode."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
with client.websocket_connect("/_stcore/stream") as websocket:
# Send a debug_disconnect_websocket message
back_msg = BackMsg()
back_msg.debug_disconnect_websocket = True
websocket.send_bytes(back_msg.SerializeToString())
# Send a valid rerun message to verify connection is still alive
back_msg2 = BackMsg()
back_msg2.rerun_script.query_string = ""
websocket.send_bytes(back_msg2.SerializeToString())
# Close gracefully
websocket.close(code=1000)
# The runtime should have received the rerun message (connection wasn't closed)
assert runtime.last_backmsg is not None
_session_id, msg = runtime.last_backmsg
assert msg.WhichOneof("type") == "rerun_script"
monkeypatch.undo()
@patch_config_options(
{
"global.developmentMode": False,
"global.e2eTest": False,
"server.enableXsrfProtection": False,
}
)
def test_websocket_ignores_debug_shutdown_in_production(tmp_path: Path) -> None:
"""Test that debug_shutdown_runtime is ignored in production mode."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
with client.websocket_connect("/_stcore/stream") as websocket:
# Send a debug_shutdown_runtime message
back_msg = BackMsg()
back_msg.debug_shutdown_runtime = True
websocket.send_bytes(back_msg.SerializeToString())
# Send a valid rerun message to verify connection is still alive
back_msg2 = BackMsg()
back_msg2.rerun_script.query_string = ""
websocket.send_bytes(back_msg2.SerializeToString())
# Close gracefully
websocket.close(code=1000)
# Runtime should NOT be stopped
assert runtime.stopped is False
monkeypatch.undo()
@patch_config_options(
{
"global.developmentMode": True,
"global.e2eTest": False,
"server.enableXsrfProtection": False,
}
)
def test_websocket_allows_debug_shutdown_in_dev_mode(tmp_path: Path) -> None:
"""Test that debug_shutdown_runtime works in development mode."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
with client.websocket_connect("/_stcore/stream") as websocket:
# Send a debug_shutdown_runtime message
back_msg = BackMsg()
back_msg.debug_shutdown_runtime = True
websocket.send_bytes(back_msg.SerializeToString())
# Runtime should be stopped
assert runtime.stopped is True
# ---------------------------------------------------------------------------
# Tests for the App class (st.App ASGI entry point)
# ---------------------------------------------------------------------------
class TestAppInit:
"""Tests for App initialization."""
def test_app_accepts_string_path(self) -> None:
"""Test that App accepts a string script path."""
from pathlib import Path
app = App("main.py")
assert app.script_path == Path("main.py")
def test_app_accepts_path_object(self) -> None:
"""Test that App accepts a Path object as script path."""
from pathlib import Path
app = App(Path("main.py"))
assert app.script_path == Path("main.py")
def test_app_state_is_empty_initially(self) -> None:
"""Test that App state is empty on initialization."""
app = App("main.py")
assert app.state == {}
def test_app_stores_user_routes(self) -> None:
"""Test that App stores user-provided routes."""
async def handler(request: Any) -> None:
pass
routes = [Route("/api/health", handler)]
app = App("main.py", routes=routes)
assert len(app._user_routes) == 1
def test_app_stores_user_middleware(self) -> None:
"""Test that App stores user-provided middleware."""
from starlette.middleware.cors import CORSMiddleware
middleware = [Middleware(CORSMiddleware, allow_origins=["*"])]
app = App("main.py", middleware=middleware)
assert len(app._user_middleware) == 1
def test_app_stores_exception_handlers(self) -> None:
"""Test that App stores user-provided exception handlers."""
async def handler(request: Any, exc: Exception) -> None:
pass
handlers = {ValueError: handler}
app = App("main.py", exception_handlers=handlers)
assert ValueError in app._exception_handlers
def test_app_stores_debug_flag(self) -> None:
"""Test that App stores the debug flag."""
app = App("main.py", debug=True)
assert app._debug is True
class TestAppRouteValidation:
"""Tests for route validation in App."""
@pytest.mark.parametrize("reserved_prefix", _RESERVED_ROUTE_PREFIXES)
def test_app_rejects_reserved_route_prefix(self, reserved_prefix: str) -> None:
"""Test that App rejects routes that conflict with reserved prefixes."""
async def handler(request: Any) -> None:
pass
route_path = f"{reserved_prefix}custom"
routes = [Route(route_path, handler)]
with pytest.raises(ValueError, match="conflicts with reserved Streamlit route"):
App("main.py", routes=routes)
def test_app_rejects_reserved_route_without_trailing_slash(self) -> None:
"""Test that App rejects reserved routes without trailing slash."""
async def handler(request: Any) -> None:
pass
routes = [Route("/_stcore", handler)]
with pytest.raises(ValueError, match="conflicts with reserved Streamlit route"):
App("main.py", routes=routes)
def test_app_accepts_non_reserved_routes(self) -> None:
"""Test that App accepts routes that don't conflict with reserved prefixes."""
async def handler(request: Any) -> None:
pass
routes = [
Route("/api/health", handler),
Route("/webhook", handler),
Route("/custom/route", handler),
]
app = App("main.py", routes=routes)
assert len(app._user_routes) == 3
class TestAppLifespan:
"""Tests for App lifespan handling."""
def test_app_stores_user_lifespan(self) -> None:
"""Test that App stores the user-provided lifespan context manager."""
@asynccontextmanager
async def lifespan(app: App) -> AsyncIterator[dict[str, Any]]:
yield {"key": "value"}
app = App("main.py", lifespan=lifespan)
assert app._user_lifespan is not None
def test_lifespan_method_creates_runtime(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that lifespan() creates the runtime if not already created."""
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
app = App(script)
assert app._runtime is None
app.lifespan()
assert app._runtime is not None
# Runtime should be created but not started yet (lifespan will start it)
from streamlit.runtime import RuntimeState
assert app._runtime.state == RuntimeState.INITIAL
def test_lifespan_method_sets_external_lifespan_flag(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that lifespan() sets _external_lifespan to True."""
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
app = App(script)
assert app._external_lifespan is False
app.lifespan()
assert app._external_lifespan is True
def test_lifespan_method_returns_combined_lifespan(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that lifespan() returns the _combined_lifespan method."""
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
app = App(script)
result = app.lifespan()
# Should return the bound method _combined_lifespan
assert result == app._combined_lifespan
assert callable(result)
def test_lifespan_method_is_idempotent(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that calling lifespan() multiple times returns the same result."""
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
app = App(script)
# Call lifespan() multiple times
result1 = app.lifespan()
result2 = app.lifespan()
# Should return the same bound method
assert result1 == result2
# Runtime should only be created once
assert app._runtime is not None
def test_external_lifespan_flag_defaults_to_false(self) -> None:
"""Test that _external_lifespan defaults to False."""
app = App("main.py")
assert app._external_lifespan is False
def test_standalone_use_after_lifespan_raises_error(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that using app standalone after calling lifespan() raises RuntimeError."""
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
app = App(script)
# Call lifespan() which sets _external_lifespan = True
app.lifespan()
# Now trying to use the app standalone (which builds the starlette app)
# should raise a RuntimeError
with pytest.raises(RuntimeError, match="Cannot use App as standalone"):
# Trigger __call__ which builds the starlette app
asyncio.run(app({"type": "http"}, None, None))
class TestAppServerModeTracking:
"""Tests for server mode tracking in App."""
@pytest.fixture(autouse=True)
def reset_server_mode(self) -> Iterator[None]:
"""Reset the server mode before and after each test."""
from streamlit import config
original_mode = config._server_mode
config._server_mode = None
yield
config._server_mode = original_mode
def test_standalone_app_via_cli_sets_starlette_app_mode(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that standalone st.App via CLI keeps 'starlette-app' mode."""
from streamlit import config
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
# Simulate CLI setting the mode (bootstrap.run_asgi_app does this)
config._server_mode = "starlette-app"
app = App(script)
with TestClient(app) as client:
# _combined_lifespan runs and should NOT change mode
# since _external_lifespan is False
response = client.get("/_stcore/health")
assert response.status_code == HTTPStatus.OK
# Mode should remain starlette-app
assert config._server_mode == "starlette-app"
def test_mounted_app_via_cli_sets_asgi_mounted_mode(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that mounted st.App via CLI changes to 'asgi-mounted' mode."""
from streamlit import config
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
# Simulate CLI setting the mode (bootstrap.run_asgi_app does this)
config._server_mode = "starlette-app"
app = App(script)
# Simulate mounting: calling lifespan() sets _external_lifespan = True
app.lifespan()
# Create a wrapper app that uses the lifespan
from starlette.applications import Starlette
wrapper = Starlette(lifespan=app.lifespan())
wrapper.mount("/streamlit", app)
with TestClient(wrapper) as client:
# The combined lifespan runs and should change mode to asgi-mounted
response = client.get("/streamlit/_stcore/health")
assert response.status_code == HTTPStatus.OK
# Mode should be changed to asgi-mounted
assert config._server_mode == "asgi-mounted"
def test_standalone_app_via_external_asgi_sets_asgi_server_mode(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that standalone st.App via external ASGI sets 'asgi-server' mode."""
from streamlit import config
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
# No CLI, so server_mode is None (simulating direct uvicorn usage)
assert config._server_mode is None
app = App(script)
with TestClient(app) as client:
# _combined_lifespan runs and should set mode to asgi-server
response = client.get("/_stcore/health")
assert response.status_code == HTTPStatus.OK
# Mode should be asgi-server
assert config._server_mode == "asgi-server"
def test_mounted_app_via_external_asgi_sets_asgi_mounted_mode(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that mounted st.App via external ASGI sets 'asgi-mounted' mode."""
from streamlit import config
script = tmp_path / "app.py"
script.write_text("import streamlit as st\nst.write('hello')")
# No CLI, so server_mode is None (simulating direct uvicorn usage)
assert config._server_mode is None
app = App(script)
# Simulate mounting: calling lifespan() sets _external_lifespan = True
lifespan_cm = app.lifespan()
# Create a wrapper app that uses the lifespan
from starlette.applications import Starlette
wrapper = Starlette(lifespan=lifespan_cm)
wrapper.mount("/streamlit", app)
with TestClient(wrapper) as client:
# The combined lifespan runs and should set mode to asgi-mounted
response = client.get("/streamlit/_stcore/health")
assert response.status_code == HTTPStatus.OK
# Mode should be asgi-mounted
assert config._server_mode == "asgi-mounted"
class TestAppScriptPathResolution:
"""Tests for script path resolution in App."""
def test_absolute_path_is_returned_unchanged(self, tmp_path: Path) -> None:
"""Test that absolute script paths are returned unchanged."""
script_path = tmp_path / "main.py"
script_path.touch()
app = App(script_path)
resolved = app._resolve_script_path()
assert resolved == script_path
def test_relative_path_is_resolved_to_cwd(self) -> None:
"""Test that relative script paths are resolved relative to cwd."""
app = App("main.py")
# The relative path should be resolved to an absolute path
resolved = app._resolve_script_path()
assert resolved.is_absolute()
assert resolved.name == "main.py"
# Without config._main_script_path set, should resolve relative to cwd
assert resolved == (Path.cwd() / "main.py").resolve()
def test_relative_path_uses_main_script_path_when_set(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that relative paths resolve relative to main_script_path when set by CLI."""
from streamlit import config
# Simulate CLI setting the main script path
main_script = tmp_path / "app" / "server.py"
main_script.parent.mkdir(parents=True, exist_ok=True)
main_script.touch()
monkeypatch.setattr(config, "_main_script_path", str(main_script))
app = App("pages/dashboard.py")
resolved = app._resolve_script_path()
# Should resolve relative to main_script_path's parent directory
expected = (tmp_path / "app" / "pages" / "dashboard.py").resolve()
assert resolved == expected
# Should NOT resolve relative to cwd
assert resolved != (Path.cwd() / "pages" / "dashboard.py").resolve()
def test_nonexistent_script_raises_file_not_found(
self, tmp_path: Path, reset_runtime: None
) -> None:
"""Test that a descriptive FileNotFoundError is raised for non-existent scripts."""
nonexistent_script = tmp_path / "does_not_exist.py"
app = App(nonexistent_script)
with pytest.raises(FileNotFoundError) as exc_info:
app._create_runtime()
# Error message should include the path and be descriptive
assert "does_not_exist.py" in str(exc_info.value)
assert "not found" in str(exc_info.value).lower()
class TestAppExports:
"""Tests for App module exports."""
def test_app_is_exported_from_starlette_package(self) -> None:
"""Test that App is exported from the web.server.starlette package."""
from streamlit.web.server.starlette import App as ExportedApp
assert ExportedApp is App
def test_app_is_exported_from_streamlit_starlette(self) -> None:
"""Test that App is exported from the streamlit.starlette shortcut."""
from streamlit.starlette import App as ShortcutApp
assert ShortcutApp is App
def test_reserved_route_prefixes_constant(self) -> None:
"""Test that reserved route prefixes constant is defined correctly."""
assert "/_stcore/" in _RESERVED_ROUTE_PREFIXES
assert "/media/" in _RESERVED_ROUTE_PREFIXES
assert "/component/" in _RESERVED_ROUTE_PREFIXES
# --- Integration Tests for App class ---
@pytest.fixture
def simple_script(tmp_path: Path) -> Path:
"""Create a simple Streamlit script for testing."""
script = tmp_path / "main.py"
script.write_text('import streamlit as st\nst.write("Hello")\n')
return script
@pytest.fixture
def reset_runtime() -> Iterator[None]:
"""Reset the Runtime singleton before and after each test."""
from streamlit.runtime import Runtime
Runtime._instance = None
yield
Runtime._instance = None
class TestAppAsgi:
"""Integration tests for App as an ASGI application."""
@pytest.fixture(autouse=True)
def _reset_runtime(self, reset_runtime: None) -> None:
"""Auto-use the reset_runtime fixture for all tests in this class."""
@pytest.fixture(autouse=True)
def _mock_static_dir(self, tmp_path: Path) -> Iterator[None]:
"""Mock the static directory for all tests in this class."""
static_dir = tmp_path / "static"
static_dir.mkdir()
# Starlette's StaticFiles requires index.html to exist when html=True
(static_dir / "index.html").write_text("<html>test</html>")
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
yield
monkeypatch.undo()
@patch_config_options(
{
"server.baseUrlPath": "",
"global.developmentMode": False,
"server.enableXsrfProtection": False,
}
)
def test_app_serves_health_endpoint(self, simple_script: Path) -> None:
"""Test that App serves Streamlit's health endpoint."""
app = App(simple_script)
with TestClient(app) as client:
response = client.get("/_stcore/health")
assert response.status_code == 200
assert response.text == "ok"
@patch_config_options(
{
"server.baseUrlPath": "",
"global.developmentMode": False,
"server.enableXsrfProtection": False,
}
)
def test_app_serves_custom_routes(self, simple_script: Path) -> None:
"""Test that App serves user-provided custom routes."""
async def api_health(request: Request) -> JSONResponse:
return JSONResponse({"status": "healthy"})
routes = [Route("/api/health", api_health)]
app = App(simple_script, routes=routes)
with TestClient(app) as client:
response = client.get("/api/health")
assert response.status_code == 200
assert response.json() == {"status": "healthy"}
@patch_config_options(
{
"server.baseUrlPath": "",
"global.developmentMode": False,
"server.enableXsrfProtection": False,
}
)
def test_app_lifespan_populates_state(self, simple_script: Path) -> None:
"""Test that user lifespan can populate app state."""
startup_count = 0
shutdown_count = 0
@asynccontextmanager
async def lifespan(app: App) -> AsyncIterator[dict[str, Any]]:
nonlocal startup_count, shutdown_count
startup_count += 1
yield {"model": "loaded", "version": "1.0"}
shutdown_count += 1
app = App(simple_script, lifespan=lifespan)
with TestClient(app) as client:
assert startup_count == 1
assert app.state == {"model": "loaded", "version": "1.0"}
# State should not contain unexpected keys
assert len(app.state) == 2
client.get("/_stcore/health") # Just verify it works
assert shutdown_count == 1
@patch_config_options(
{
"server.baseUrlPath": "",
"global.developmentMode": False,
"server.enableXsrfProtection": False,
}
)
def test_app_applies_custom_middleware(self, simple_script: Path) -> None:
"""Test that user-provided middleware is applied."""
middleware_call_count = 0
class TestMiddleware:
def __init__(self, app: Any) -> None:
self.app = app
async def __call__(
self, scope: dict[str, Any], receive: Any, send: Any
) -> None:
nonlocal middleware_call_count
if scope["type"] == "http":
middleware_call_count += 1
await self.app(scope, receive, send)
middleware = [Middleware(TestMiddleware)]
app = App(simple_script, middleware=middleware)
with TestClient(app) as client:
client.get("/_stcore/health")
# Middleware should be called exactly once for this request
assert middleware_call_count == 1
@patch_config_options(
{
"server.baseUrlPath": "",
"global.developmentMode": False,
"server.enableXsrfProtection": False,
}
)
def test_app_custom_routes_have_priority_over_fallback(
self, simple_script: Path
) -> None:
"""Test that custom routes take priority over Streamlit's fallback routes."""
async def custom_root(request: Request) -> JSONResponse:
return JSONResponse({"custom": True})
routes = [Route("/", custom_root)]
app = App(simple_script, routes=routes)
with TestClient(app) as client:
response = client.get("/")
assert response.status_code == 200
assert response.json() == {"custom": True}
@patch_config_options(
{
"server.baseUrlPath": "",
"global.developmentMode": False,
"server.enableXsrfProtection": False,
}
)
def test_app_lifespan_without_yield_state(self, simple_script: Path) -> None:
"""Test that lifespan works even when yielding None."""
startup_called = False
@asynccontextmanager
async def lifespan(app: App) -> AsyncIterator[None]:
nonlocal startup_called
startup_called = True
yield
app = App(simple_script, lifespan=lifespan)
with TestClient(app) as client:
assert startup_called
assert app.state == {}
client.get("/_stcore/health")
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_app_test.py",
"license": "Apache License 2.0",
"lines": 1335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_auth_routes.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa: RUF029 # Async route handlers are idiomatic even without await
"""Starlette app authentication routes."""
from __future__ import annotations
import json
import time
from typing import TYPE_CHECKING, Any, Final, cast
from streamlit.auth_util import (
build_logout_url,
clear_cookie_and_chunks,
decode_provider_token,
generate_default_provider_section,
get_cookie_with_chunks,
get_origin_from_redirect_uri,
get_redirect_uri,
get_secrets_auth_section,
get_validated_redirect_uri,
set_cookie_with_chunks,
)
from streamlit.errors import StreamlitAuthError
from streamlit.logger import get_logger
from streamlit.url_util import make_url_path
from streamlit.web.server.server_util import get_cookie_secret
from streamlit.web.server.starlette.starlette_app_utils import (
create_signed_value,
decode_signed_value,
)
from streamlit.web.server.starlette.starlette_server_config import (
TOKENS_COOKIE_NAME,
USER_COOKIE_NAME,
)
if TYPE_CHECKING:
from starlette.requests import Request
from starlette.responses import RedirectResponse, Response
from starlette.routing import Route
_LOGGER: Final = get_logger(__name__)
# Auth route path constants (without base URL prefix)
_ROUTE_AUTH_LOGIN: Final = "auth/login"
_ROUTE_AUTH_LOGOUT: Final = "auth/logout"
_ROUTE_OAUTH_CALLBACK: Final = "oauth2callback"
class _AsyncAuthCache:
"""Async cache for Authlib's Starlette integration.
Authlib's Starlette OAuth client expects an async cache interface.
This implementation tracks per-item expiration times to automatically
expire OAuth state entries, preventing unbounded memory growth from
abandoned auth flows.
Cache size is expected to be very small: one entry is created per login
attempt (not per user/session) and exists only during the OAuth flow—from
clicking "Login" until the OAuth callback completes (typically seconds).
Each entry is a few hundred bytes. Entries expire after 1 hour (Authlib's
default) or are consumed upon successful callback.
"""
# Fallback TTL if authlib doesn't provide an expiration time.
# This is the same TTL used internally in Authlib (1 hour).
_DEFAULT_TTL_SECONDS: Final = 3600
def __init__(self) -> None:
# Cache structure: {key: (value, expiration_timestamp)}
# where key is Authlib's state key (e.g., "_state_google_abc123"),
# value is the OAuth state data, and expiration_timestamp is a Unix timestamp.
self._cache: dict[str, tuple[Any, float]] = {}
def _evict_expired(self) -> None:
"""Evict expired items from the cache."""
now = time.time()
expired_keys = [k for k, (_, exp) in self._cache.items() if exp <= now]
for key in expired_keys:
del self._cache[key]
async def get(self, key: str) -> Any:
"""Get an item from the cache."""
self._evict_expired()
entry = self._cache.get(key)
return entry[0] if entry else None
async def set(self, key: str, value: Any, expires_in: int | None = None) -> None:
"""Set an item in the cache."""
self._evict_expired()
ttl = expires_in if expires_in is not None else self._DEFAULT_TTL_SECONDS
self._cache[key] = (value, time.time() + ttl)
async def delete(self, key: str) -> None:
"""Delete an item from the cache."""
self._cache.pop(key, None)
def get_dict(self) -> dict[str, Any]:
"""Get a dictionary of all items in the cache."""
self._evict_expired()
return {k: v for k, (v, _) in self._cache.items()}
# TODO(lukasmasuch): Reevaluate whether we can remove _AsyncAuthCache and rely on Authlib's
# built-in session storage via SessionMiddleware instead. This would simplify
# the code but would expose OAuth state data in signed cookies rather than
# keeping it server-side. See: https://docs.authlib.org/en/latest/client/starlette.html
#
# Note: For true multi-tenant support (multiple Streamlit apps in one process),
# this cache would need to be made per-runtime rather than module-level.
_STARLETTE_AUTH_CACHE: Final = _AsyncAuthCache()
def _normalize_nested_config(value: Any) -> Any:
"""Normalize nested configuration data for Authlib."""
if isinstance(value, dict):
return {k: _normalize_nested_config(v) for k, v in value.items()}
if isinstance(value, list):
return [_normalize_nested_config(item) for item in value]
return value
def _looks_like_provider_section(value: dict[str, Any]) -> bool:
"""Check if a dictionary looks like a provider section for Authlib."""
provider_keys = {
"client_id",
"client_secret",
"server_metadata_url",
"authorize_url",
"api_base_url",
"request_token_url",
}
return any(key in value for key in provider_keys)
class _AuthlibConfig(dict[str, Any]): # noqa: FURB189
"""Config adapter that exposes provider data via Authlib's flat lookup.
Authlib expects a flat configuration dictionary (e.g. "GOOGLE_CLIENT_ID").
Streamlit's secrets.toml structure is nested (e.g. [auth.google] client_id=...).
This class bridges the gap by normalizing nested keys into the format Authlib expects.
"""
def __init__(self, data: dict[str, Any]) -> None:
normalized = {k: _normalize_nested_config(v) for k, v in data.items()}
super().__init__(normalized)
self._provider_sections: dict[str, dict[str, Any]] = {
key.lower(): value
for key, value in normalized.items()
if isinstance(value, dict) and _looks_like_provider_section(value)
}
def get(self, key: Any, default: Any = None) -> Any:
if key in self:
return super().get(key, default)
if not isinstance(key, str):
return default
provider_key, sep, param = key.partition("_")
if not sep:
return default
provider_section = self._provider_sections.get(provider_key.lower())
if provider_section is None:
return default
return provider_section.get(param.lower(), default)
async def _redirect_to_base(base_url: str) -> RedirectResponse:
"""Redirect to the base URL."""
from starlette.responses import RedirectResponse
return RedirectResponse(make_url_path(base_url, "/"), status_code=302)
def _get_cookie_path() -> str:
"""Get the cookie path based on server.baseUrlPath configuration."""
from streamlit import config
base_path: str | None = config.get_option("server.baseUrlPath")
if base_path:
# Ensure path starts with "/" and doesn't have trailing slash
return "/" + base_path.strip("/")
return "/"
async def _set_auth_cookie(
response: Response, user_info: dict[str, Any], tokens: dict[str, Any]
) -> None:
"""Set the auth cookies with signed user info and tokens.
Note: This cookie uses itsdangerous signing which is NOT compatible with
Tornado's secure cookie format. Switching between backends will invalidate
existing auth cookies, requiring users to re-authenticate. This is expected
behavior when switching between Tornado and Starlette backends.
Cookies may be split into multiple chunks if they exceed browser limits.
"""
def set_single_cookie(cookie_name: str, value: str) -> None:
_set_single_cookie(response, cookie_name, value)
set_cookie_with_chunks(
set_single_cookie,
_create_signed_value_wrapper,
USER_COOKIE_NAME,
user_info,
)
set_cookie_with_chunks(
set_single_cookie,
_create_signed_value_wrapper,
TOKENS_COOKIE_NAME,
tokens,
)
def _set_single_cookie(
response: Response, cookie_name: str, serialized_value: str
) -> None:
"""Set a single signed cookie on the response.
Cookie flags are set explicitly for clarity and parity with Tornado:
- httponly=True: Prevents JavaScript access (security)
- samesite="lax": Allows cookie on same-site requests and top-level navigations
- secure is NOT set: Tornado deliberately avoids this due to Safari cookie bugs;
the OIDC flow only works in secure contexts anyway (localhost or HTTPS)
- path: Matches server.baseUrlPath for proper scoping
"""
cookie_secret = get_cookie_secret()
signed_value = create_signed_value(cookie_secret, cookie_name, serialized_value)
cookie_payload = signed_value.decode("utf-8")
response.set_cookie(
cookie_name,
cookie_payload,
httponly=True,
samesite="lax",
path=_get_cookie_path(),
)
def _create_signed_value_wrapper(cookie_name: str, value: str) -> bytes:
"""Create a signed cookie value using the cookie secret."""
cookie_secret = get_cookie_secret()
return create_signed_value(cookie_secret, cookie_name, value)
def _get_signed_cookie_from_request(request: Request, cookie_name: str) -> bytes | None:
"""Get and decode a signed cookie from the request.
This helper is used during logout to determine if cookies need chunk cleanup.
"""
cookie_value = request.cookies.get(cookie_name)
if cookie_value is None:
return None
cookie_secret = get_cookie_secret()
signed_value = cookie_value.encode("latin-1")
decoded = decode_signed_value(cookie_secret, cookie_name, signed_value)
return decoded
def _clear_auth_cookie(response: Response, request: Request) -> None:
"""Clear the auth cookies, including any split cookie chunks.
The path must match the path used when setting the cookie, otherwise
the browser won't delete it.
"""
cookie_path = _get_cookie_path()
def get_single_cookie(cookie_name: str) -> bytes | None:
return _get_signed_cookie_from_request(request, cookie_name)
def clear_single_cookie(cookie_name: str) -> None:
response.delete_cookie(cookie_name, path=cookie_path)
clear_cookie_and_chunks(
get_single_cookie,
clear_single_cookie,
USER_COOKIE_NAME,
)
clear_cookie_and_chunks(
get_single_cookie,
clear_single_cookie,
TOKENS_COOKIE_NAME,
)
def _create_oauth_client(provider: str) -> tuple[Any, str]:
"""Create an OAuth client for the given provider based on secrets.toml configuration."""
try:
from authlib.integrations import starlette_client
except ModuleNotFoundError: # pragma: no cover - optional dependency
raise StreamlitAuthError(
"Authentication requires Authlib>=1.3.2. "
"Install it via `pip install streamlit[auth]`."
)
auth_section = get_secrets_auth_section()
if auth_section:
redirect_uri = get_redirect_uri(auth_section) or "/"
config = auth_section.to_dict()
else:
config = {}
redirect_uri = "/"
provider_section = config.setdefault(provider, {})
# Guard against auth_section being None when secrets.toml exists but lacks [auth].
# Normal flows validate config first, but this protects against edge cases.
if not provider_section and provider == "default" and auth_section:
provider_section = generate_default_provider_section(auth_section)
config["default"] = provider_section
provider_client_kwargs = provider_section.setdefault("client_kwargs", {})
if "scope" not in provider_client_kwargs:
provider_client_kwargs["scope"] = "openid email profile"
if "prompt" not in provider_client_kwargs:
provider_client_kwargs["prompt"] = "select_account"
oauth = starlette_client.OAuth(
config=_AuthlibConfig(config), cache=_STARLETTE_AUTH_CACHE
)
oauth.register(provider)
return oauth.create_client(provider), redirect_uri # type: ignore[no-untyped-call]
def _parse_provider_token(provider_token: str | None) -> str | None:
"""Extract the provider from the provider token."""
if provider_token is None:
return None
try:
payload = decode_provider_token(provider_token)
except StreamlitAuthError:
return None
return payload["provider"]
def _get_provider_by_state(state_code_from_url: str | None) -> str | None:
"""Extract the provider from the state code from the URL."""
if state_code_from_url is None:
return None
current_cache_keys = list(_STARLETTE_AUTH_CACHE.get_dict().keys())
state_provider_mapping = {}
for key in current_cache_keys:
# Authlib's Starlette integration stores OAuth state in the cache using keys
# in the format: "_state_{provider}_{state_code}".
# Example: "_state_google_abc123" breaks down as:
# - "_state" = fixed prefix used by Authlib
# - "google" = provider name
# - "abc123" = state code (random token)
#
# This format is an implementation detail of Authlib and not a guaranteed API,
# so we handle parsing failures gracefully by skipping malformed keys.
# We have some unit tests that will fail in case the formats gets changed in
# an authlib update.
#
# Note: This split assumes no underscores in provider names or state codes.
# This is safe because: (1) provider names with underscores are explicitly
# blocked in validate_auth_credentials() in auth_util.py, and (2) Authlib's
# generate_token() uses only alphanumeric characters (a-zA-Z0-9) for state
# codes. See auth_util.py for the underscore validation.
try:
_, _, recorded_provider, code = key.split("_")
except ValueError:
# Skip cache keys that don't match the expected 4-part format.
continue
state_provider_mapping[code] = recorded_provider
provider: str | None = state_provider_mapping.get(state_code_from_url)
return provider
def _get_origin_from_secrets() -> str | None:
"""Extract the origin from the redirect URI in the secrets."""
return get_origin_from_redirect_uri()
def _get_cookie_value_from_request(request: Request, cookie_name: str) -> bytes | None:
"""Get a signed cookie value from the request, handling chunked cookies."""
def get_single_cookie(name: str) -> bytes | None:
return _get_signed_cookie_from_request(request, name)
return get_cookie_with_chunks(get_single_cookie, cookie_name)
def _get_provider_logout_url(request: Request) -> str | None:
"""Get the OAuth provider's logout URL from OIDC metadata.
Returns the end_session_endpoint URL with proper parameters for OIDC logout,
or None if the provider doesn't support it or required data is unavailable.
This function returns None (rather than raising exceptions) to allow graceful
fallback to a simple base URL redirect when OIDC logout isn't possible.
"""
cookie_value = _get_cookie_value_from_request(request, USER_COOKIE_NAME)
if not cookie_value:
return None
try:
user_info = json.loads(cookie_value)
provider = user_info.get("provider")
if not provider:
return None
client, _ = _create_oauth_client(provider)
# Load OIDC metadata - Authlib's Starlette client uses async methods
# but load_server_metadata is synchronous in both implementations
metadata = client.load_server_metadata()
end_session_endpoint = metadata.get("end_session_endpoint")
if not end_session_endpoint:
_LOGGER.info("No end_session_endpoint found for provider %s", provider)
return None
# Use redirect_uri (i.e. /oauth2callback) for post_logout_redirect_uri
# This is safer than redirecting to root as some providers seem to
# require URL to be in a whitelist - /oauth2callback should be whitelisted
redirect_uri = get_validated_redirect_uri()
if redirect_uri is None:
_LOGGER.info("Redirect url could not be determined")
return None
# Get id_token_hint from tokens cookie if available
id_token: str | None = None
tokens_cookie_value = _get_cookie_value_from_request(
request, TOKENS_COOKIE_NAME
)
if tokens_cookie_value:
try:
tokens = json.loads(tokens_cookie_value)
id_token = tokens.get("id_token")
except (json.JSONDecodeError, TypeError):
_LOGGER.exception("Error, invalid tokens cookie value.")
return None
return build_logout_url(
end_session_endpoint=end_session_endpoint,
client_id=client.client_id,
post_logout_redirect_uri=redirect_uri,
id_token=id_token,
)
except Exception as e:
_LOGGER.warning("Failed to get provider logout URL: %s", e)
return None
async def _auth_login(request: Request, base_url: str) -> Response:
"""Handle the login request from the authentication provider."""
provider = _parse_provider_token(request.query_params.get("provider"))
if provider is None:
return await _redirect_to_base(base_url)
client, redirect_uri = _create_oauth_client(provider)
try:
response = await client.authorize_redirect(request, redirect_uri)
return cast("Response", response)
except Exception: # pragma: no cover - error path
from starlette.responses import Response
# Return a generic message to avoid exposing internal error details to clients.
_LOGGER.warning("Error during OAuth authorization redirect.", exc_info=True)
return Response("Authentication error", status_code=400)
async def _auth_logout(request: Request, base_url: str) -> Response:
"""Logout the user by clearing the auth cookie and redirecting.
If the OAuth provider supports end_session_endpoint, redirects there for
proper OIDC logout. Otherwise, redirects to the base URL.
"""
from starlette.responses import RedirectResponse
provider_logout_url = _get_provider_logout_url(request)
if provider_logout_url:
response = RedirectResponse(provider_logout_url, status_code=302)
else:
response = await _redirect_to_base(base_url)
_clear_auth_cookie(response, request)
return response
async def _auth_callback(request: Request, base_url: str) -> Response:
"""Handle the OAuth callback from the authentication provider."""
state = request.query_params.get("state")
provider = _get_provider_by_state(state)
origin = _get_origin_from_secrets()
if origin is None:
_LOGGER.error(
"Error, misconfigured origin for `redirect_uri` in secrets.",
)
return await _redirect_to_base(base_url)
error = request.query_params.get("error")
if error:
error_description = request.query_params.get("error_description")
sanitized_error = error.replace("\n", "").replace("\r", "")
sanitized_error_description = (
error_description.replace("\n", "").replace("\r", "")
if error_description
else None
)
_LOGGER.error(
"Error during authentication: %s. Error description: %s",
sanitized_error,
sanitized_error_description,
)
return await _redirect_to_base(base_url)
if provider is None:
# See https://github.com/streamlit/streamlit/issues/13101
_LOGGER.warning(
"Missing provider for OAuth callback; this often indicates a stale "
"or replayed callback (for example, from browser back/forward "
"navigation).",
)
return await _redirect_to_base(base_url)
client, _ = _create_oauth_client(provider)
token = await client.authorize_access_token(request)
user = token.get("userinfo") or {}
response = await _redirect_to_base(base_url)
cookie_value = dict(user, origin=origin, is_logged_in=True, provider=provider)
tokens = {k: token[k] for k in ["id_token", "access_token"] if k in token}
if user:
await _set_auth_cookie(response, cookie_value, tokens)
else: # pragma: no cover - error path
_LOGGER.error(
"OAuth provider '%s' did not return user information during callback.",
provider,
)
return response
def create_auth_routes(base_url: str) -> list[Route]:
"""Create all authentication related routes for the Starlette app."""
from starlette.routing import Route
async def login(request: Request) -> Response:
return await _auth_login(request, base_url)
async def logout(request: Request) -> Response:
return await _auth_logout(request, base_url)
async def callback(request: Request) -> Response:
return await _auth_callback(request, base_url)
return [
Route(make_url_path(base_url, _ROUTE_AUTH_LOGIN), login, methods=["GET"]),
Route(make_url_path(base_url, _ROUTE_AUTH_LOGOUT), logout, methods=["GET"]),
Route(
make_url_path(base_url, _ROUTE_OAUTH_CALLBACK), callback, methods=["GET"]
),
]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_auth_routes.py",
"license": "Apache License 2.0",
"lines": 461,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_auth_routes_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from http.cookies import SimpleCookie
from typing import TYPE_CHECKING, Any
from starlette.applications import Starlette
from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import PlainTextResponse, RedirectResponse
from starlette.routing import Route
from starlette.testclient import TestClient
from streamlit.web.server.starlette import starlette_app_utils, starlette_auth_routes
from streamlit.web.server.starlette.starlette_auth_routes import (
_STARLETTE_AUTH_CACHE,
_get_cookie_path,
_get_origin_from_secrets,
_get_provider_by_state,
_parse_provider_token,
create_auth_routes,
)
from streamlit.web.server.starlette.starlette_server_config import (
TOKENS_COOKIE_NAME,
USER_COOKIE_NAME,
)
from tests.testutil import patch_config_options
if TYPE_CHECKING:
import pytest
def _build_app() -> Starlette:
async def root(_: Any) -> PlainTextResponse:
return PlainTextResponse("ok")
app = Starlette(routes=[*create_auth_routes(""), Route("/", root, methods=["GET"])])
app.add_middleware(SessionMiddleware, secret_key="test-secret")
return app
def test_redirect_without_provider(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that login redirects to root when no provider is specified."""
monkeypatch.setenv("STREAMLIT_OAUTH_PROVIDER", "")
with TestClient(_build_app()) as client:
response = client.get("/auth/login")
assert response.status_code == 200
assert response.text == "ok"
def test_logout_clears_cookie() -> None:
"""Test that logout clears the auth cookie and redirects to root."""
with TestClient(_build_app()) as client:
client.cookies.set("_streamlit_user", "value")
response = client.get("/auth/logout", follow_redirects=False)
assert response.status_code == 302
assert response.headers.get("set-cookie")
follow_up = client.get(response.headers["location"]) # follow redirect manually
assert follow_up.status_code == 200
def test_callback_handles_error_query(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that OAuth callback handles error query parameters gracefully."""
monkeypatch.setattr(
starlette_auth_routes,
"_get_origin_from_secrets",
lambda: "http://testserver",
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_provider_by_state",
lambda state: "default",
)
app = Starlette(routes=create_auth_routes(""))
with TestClient(app) as client:
response = client.get(
"/oauth2callback?state=abc&error=access_denied&error_description=nope",
follow_redirects=False,
)
assert response.status_code == 302
assert response.headers["location"].endswith("/")
def test_callback_missing_provider_redirects(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that OAuth callback redirects when provider cannot be determined."""
monkeypatch.setattr(
starlette_auth_routes,
"_get_origin_from_secrets",
lambda: "http://testserver",
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_provider_by_state",
lambda state: None,
)
app = Starlette(routes=create_auth_routes(""))
with TestClient(app) as client:
response = client.get("/oauth2callback?state=abc", follow_redirects=False)
assert response.status_code == 302
assert response.headers["location"].endswith("/")
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_auth_callback_sets_signed_cookie(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that successful OAuth callback sets a signed auth cookie."""
async def _dummy_authorize_access_token(self, request: Any) -> dict[str, Any]:
return {"userinfo": {"email": "user@example.com"}}
class _DummyClient:
async def authorize_access_token(self, request: Any) -> dict[str, Any]:
return await _dummy_authorize_access_token(self, request)
monkeypatch.setattr(
starlette_auth_routes,
"_create_oauth_client",
lambda provider: (_DummyClient(), "/redirect"),
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_provider_by_state",
lambda state: "default",
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_origin_from_secrets",
lambda: "http://testserver",
)
app = Starlette(routes=create_auth_routes(""))
with TestClient(app) as client:
response = client.get("/oauth2callback?state=abc", follow_redirects=False)
assert response.status_code == 302
assert response.headers["location"].endswith("/")
cookies = SimpleCookie()
cookies.load(response.headers["set-cookie"])
signed_value = cookies["_streamlit_user"].value
decoded = starlette_app_utils.decode_signed_value(
"test-secret", "_streamlit_user", signed_value
)
assert decoded is not None
payload = decoded.decode("utf-8")
assert "user@example.com" in payload
assert '"is_logged_in": true' in payload.lower()
def test_login_initializes_session(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that login endpoint initializes a session for OAuth flow."""
captured_session: dict[str, Any] | None = None
class _DummyClient:
async def authorize_redirect(
self, request: Any, redirect_uri: str
) -> RedirectResponse:
nonlocal captured_session
captured_session = dict(request.session)
return RedirectResponse(redirect_uri)
monkeypatch.setattr(
starlette_auth_routes,
"_parse_provider_token",
lambda token: "default",
)
monkeypatch.setattr(
starlette_auth_routes,
"_create_oauth_client",
lambda provider: (_DummyClient(), "/redirect"),
)
with TestClient(_build_app()) as client:
response = client.get("/auth/login?provider=dummy", follow_redirects=False)
assert response.status_code == 307
assert response.headers["location"] == "/redirect"
assert captured_session is not None
def test_callback_missing_origin_redirects(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test redirect when origin cannot be determined from secrets."""
monkeypatch.setattr(
starlette_auth_routes,
"_get_origin_from_secrets",
lambda: None, # Simulate missing redirect_uri
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_provider_by_state",
lambda state: "default",
)
app = Starlette(routes=create_auth_routes(""))
with TestClient(app) as client:
response = client.get("/oauth2callback?state=abc", follow_redirects=False)
assert response.status_code == 302
assert response.headers["location"].endswith("/")
class TestCookiePath:
"""Tests for _get_cookie_path function."""
@patch_config_options({"server.baseUrlPath": ""})
def test_returns_root_when_no_base_path(self) -> None:
"""Test that root path is returned when no base URL is configured."""
assert _get_cookie_path() == "/"
@patch_config_options({"server.baseUrlPath": "myapp"})
def test_returns_base_path_with_leading_slash(self) -> None:
"""Test that base path is returned with leading slash."""
assert _get_cookie_path() == "/myapp"
@patch_config_options({"server.baseUrlPath": "/myapp"})
def test_handles_leading_slash_in_config(self) -> None:
"""Test that leading slash in config is handled correctly."""
assert _get_cookie_path() == "/myapp"
@patch_config_options({"server.baseUrlPath": "myapp/"})
def test_removes_trailing_slash(self) -> None:
"""Test that trailing slash is removed from path."""
assert _get_cookie_path() == "/myapp"
@patch_config_options({"server.baseUrlPath": "/myapp/"})
def test_handles_both_leading_and_trailing_slashes(self) -> None:
"""Test that both leading and trailing slashes are handled."""
assert _get_cookie_path() == "/myapp"
class TestAuthCookieFlags:
"""Tests for auth cookie flags (httponly, samesite, path)."""
@patch_config_options(
{"server.cookieSecret": "test-secret", "server.baseUrlPath": ""}
)
def test_auth_cookie_has_correct_flags(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that auth cookie is set with correct security flags."""
async def _dummy_authorize_access_token(self, request: Any) -> dict[str, Any]:
return {"userinfo": {"email": "user@example.com"}}
class _DummyClient:
async def authorize_access_token(self, request: Any) -> dict[str, Any]:
return await _dummy_authorize_access_token(self, request)
monkeypatch.setattr(
starlette_auth_routes,
"_create_oauth_client",
lambda provider: (_DummyClient(), "/redirect"),
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_provider_by_state",
lambda state: "default",
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_origin_from_secrets",
lambda: "http://testserver",
)
app = Starlette(routes=create_auth_routes(""))
with TestClient(app) as client:
response = client.get("/oauth2callback?state=abc", follow_redirects=False)
assert response.status_code == 302
set_cookie_headers = response.headers.get_list("set-cookie")
user_cookie_header = next(
(h for h in set_cookie_headers if h.startswith("_streamlit_user=")),
None,
)
assert user_cookie_header is not None, "User cookie not found"
cookies = SimpleCookie()
cookies.load(user_cookie_header)
cookie = cookies["_streamlit_user"]
# Check httponly flag
assert cookie["httponly"] is True
# Check samesite flag
assert cookie["samesite"].lower() == "lax"
# Check path flag (should be "/" when no baseUrlPath)
assert cookie["path"] == "/"
@patch_config_options(
{"server.cookieSecret": "test-secret", "server.baseUrlPath": "myapp"}
)
def test_auth_cookie_path_matches_base_url(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that auth cookie path matches the configured baseUrlPath."""
async def _dummy_authorize_access_token(self, request: Any) -> dict[str, Any]:
return {"userinfo": {"email": "user@example.com"}}
class _DummyClient:
async def authorize_access_token(self, request: Any) -> dict[str, Any]:
return await _dummy_authorize_access_token(self, request)
monkeypatch.setattr(
starlette_auth_routes,
"_create_oauth_client",
lambda provider: (_DummyClient(), "/redirect"),
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_provider_by_state",
lambda state: "default",
)
monkeypatch.setattr(
starlette_auth_routes,
"_get_origin_from_secrets",
lambda: "http://testserver",
)
app = Starlette(routes=create_auth_routes("/myapp"))
with TestClient(app) as client:
response = client.get(
"/myapp/oauth2callback?state=abc", follow_redirects=False
)
assert response.status_code == 302
set_cookie_headers = response.headers.get_list("set-cookie")
user_cookie_header = next(
(h for h in set_cookie_headers if h.startswith("_streamlit_user=")),
None,
)
assert user_cookie_header is not None, "User cookie not found"
cookies = SimpleCookie()
cookies.load(user_cookie_header)
cookie = cookies["_streamlit_user"]
# Check path matches baseUrlPath
assert cookie["path"] == "/myapp"
@patch_config_options({"server.baseUrlPath": "myapp"})
def test_logout_clears_cookie_with_correct_path(self) -> None:
"""Test that logout clears the cookie with the same path it was set with."""
async def root(_: Any) -> PlainTextResponse:
return PlainTextResponse("ok")
app = Starlette(
routes=[
*create_auth_routes("/myapp"),
Route("/myapp/", root, methods=["GET"]),
]
)
with TestClient(app) as client:
client.cookies.set("_streamlit_user", "value", path="/myapp")
response = client.get("/myapp/auth/logout", follow_redirects=False)
assert response.status_code == 302
# The Set-Cookie header should include the path
set_cookie_header = response.headers.get("set-cookie", "")
assert "Path=/myapp" in set_cookie_header
class TestParseProviderToken:
"""Tests for _parse_provider_token function."""
def test_returns_none_for_none_input(self) -> None:
"""Test that None input returns None."""
assert _parse_provider_token(None) is None
def test_returns_none_for_invalid_token(self) -> None:
"""Test that an invalid/malformed token returns None."""
assert _parse_provider_token("invalid-token") is None
assert _parse_provider_token("") is None
def test_extracts_provider_from_valid_token(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that provider is extracted from a valid token."""
# Mock decode_provider_token where it's imported (in starlette_auth_routes)
monkeypatch.setattr(
starlette_auth_routes,
"decode_provider_token",
lambda token: {"provider": "google"},
)
assert _parse_provider_token("valid-token") == "google"
class TestGetProviderByState:
"""Tests for _get_provider_by_state function."""
def test_returns_none_for_none_state(self) -> None:
"""Test that None state returns None."""
assert _get_provider_by_state(None) is None
def test_returns_none_for_unknown_state(self) -> None:
"""Test that an unknown state code returns None."""
# Clear the cache first
_STARLETTE_AUTH_CACHE._cache.clear()
assert _get_provider_by_state("unknown_state") is None
def test_extracts_provider_from_cache(self) -> None:
"""Test that provider is extracted from a matching cache entry."""
import time
# Clear and populate the cache with a known entry (value, expiration)
_STARLETTE_AUTH_CACHE._cache.clear()
_STARLETTE_AUTH_CACHE._cache["_state_google_abc123"] = (
{"some": "data"},
time.time() + 3600,
)
assert _get_provider_by_state("abc123") == "google"
# Clean up
_STARLETTE_AUTH_CACHE._cache.clear()
def test_handles_malformed_cache_keys(self) -> None:
"""Test that malformed cache keys are skipped gracefully."""
import time
_STARLETTE_AUTH_CACHE._cache.clear()
future_exp = time.time() + 3600
# Add a malformed key (not 4 parts)
_STARLETTE_AUTH_CACHE._cache["malformed_key"] = ({"some": "data"}, future_exp)
# Add a valid key with state code "validstate123"
_STARLETTE_AUTH_CACHE._cache["_state_github_validstate123"] = (
{"some": "data"},
future_exp,
)
# Should find the valid key when querying with the state code
assert _get_provider_by_state("validstate123") == "github"
# Should return None for a state code that doesn't exist in the cache
assert _get_provider_by_state("nonexistentstate") is None
# Clean up
_STARLETTE_AUTH_CACHE._cache.clear()
class TestAsyncAuthCacheExpiration:
"""Tests for _AsyncAuthCache expiration behavior."""
def test_expired_items_are_evicted_on_get(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that expired items are removed when accessing the cache."""
_STARLETTE_AUTH_CACHE._cache.clear()
current_time = 1000.0
monkeypatch.setattr(starlette_auth_routes.time, "time", lambda: current_time)
_STARLETTE_AUTH_CACHE._cache["key1"] = ("value1", 1500.0)
_STARLETTE_AUTH_CACHE._cache["key2"] = ("value2", 900.0)
current_time = 1001.0
monkeypatch.setattr(starlette_auth_routes.time, "time", lambda: current_time)
assert _STARLETTE_AUTH_CACHE.get_dict() == {"key1": "value1"}
_STARLETTE_AUTH_CACHE._cache.clear()
def test_set_uses_expires_in_parameter(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that set() uses the provided expires_in value."""
import asyncio
_STARLETTE_AUTH_CACHE._cache.clear()
current_time = 1000.0
monkeypatch.setattr(starlette_auth_routes.time, "time", lambda: current_time)
asyncio.run(_STARLETTE_AUTH_CACHE.set("key1", "value1", expires_in=60))
assert _STARLETTE_AUTH_CACHE._cache["key1"] == ("value1", 1060.0)
_STARLETTE_AUTH_CACHE._cache.clear()
def test_set_uses_default_ttl_when_expires_in_is_none(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that set() uses default TTL when expires_in is not provided."""
import asyncio
_STARLETTE_AUTH_CACHE._cache.clear()
current_time = 1000.0
monkeypatch.setattr(starlette_auth_routes.time, "time", lambda: current_time)
asyncio.run(_STARLETTE_AUTH_CACHE.set("key1", "value1"))
assert _STARLETTE_AUTH_CACHE._cache["key1"] == ("value1", 4600.0)
_STARLETTE_AUTH_CACHE._cache.clear()
class TestGetOriginFromSecrets:
"""Tests for _get_origin_from_secrets function."""
def test_returns_none_when_no_origin(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that None is returned when get_origin_from_redirect_uri returns None."""
# Since _get_origin_from_secrets is now just a wrapper around
# get_origin_from_redirect_uri, we mock that function directly
monkeypatch.setattr(
starlette_auth_routes,
"get_origin_from_redirect_uri",
lambda: None,
)
assert _get_origin_from_secrets() is None
def test_extracts_origin_from_redirect_uri(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that origin is correctly returned from shared function."""
monkeypatch.setattr(
starlette_auth_routes,
"get_origin_from_redirect_uri",
lambda: "https://example.com",
)
assert _get_origin_from_secrets() == "https://example.com"
def test_handles_localhost_uri(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that localhost URIs are handled correctly."""
monkeypatch.setattr(
starlette_auth_routes,
"get_origin_from_redirect_uri",
lambda: "http://localhost:8501",
)
assert _get_origin_from_secrets() == "http://localhost:8501"
class TestGetProviderLogoutUrl:
"""Tests for _get_provider_logout_url function."""
def test_returns_none_when_no_user_cookie(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that None is returned when no user cookie exists."""
from unittest.mock import MagicMock
from streamlit.web.server.starlette.starlette_auth_routes import (
_get_provider_logout_url,
)
mock_request = MagicMock()
mock_request.cookies = {}
assert _get_provider_logout_url(mock_request) is None
def test_returns_none_when_no_provider_in_cookie(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that None is returned when cookie doesn't contain provider."""
from unittest.mock import MagicMock
from streamlit.web.server.starlette.starlette_auth_routes import (
_get_provider_logout_url,
)
# Mock cookie without provider field
monkeypatch.setattr(
starlette_auth_routes,
"_get_cookie_value_from_request",
lambda request, name: b'{"email": "test@example.com"}',
)
mock_request = MagicMock()
assert _get_provider_logout_url(mock_request) is None
def test_returns_none_when_no_end_session_endpoint(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that None is returned when provider has no end_session_endpoint."""
from unittest.mock import MagicMock
from streamlit.web.server.starlette.starlette_auth_routes import (
_get_provider_logout_url,
)
# Mock cookie with provider
monkeypatch.setattr(
starlette_auth_routes,
"_get_cookie_value_from_request",
lambda request, name: b'{"provider": "testprovider"}',
)
# Mock OAuth client that returns metadata without end_session_endpoint
class MockClient:
client_id = "test-client-id"
def load_server_metadata(self) -> dict[str, Any]:
return {"issuer": "https://example.com"}
monkeypatch.setattr(
starlette_auth_routes,
"_create_oauth_client",
lambda provider: (MockClient(), "/redirect"),
)
mock_request = MagicMock()
assert _get_provider_logout_url(mock_request) is None
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_returns_logout_url_with_end_session_endpoint(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that logout URL is returned when provider has end_session_endpoint."""
from unittest.mock import MagicMock
from streamlit.web.server.starlette.starlette_auth_routes import (
_get_provider_logout_url,
)
# Mock cookies - must differentiate between USER and TOKENS cookies
def mock_get_cookie(request: Any, name: str) -> bytes | None:
if name == USER_COOKIE_NAME:
return b'{"provider": "testprovider"}'
if name == TOKENS_COOKIE_NAME:
return b'{"id_token": "test-id-token", "access_token": "test-access"}'
return None
monkeypatch.setattr(
starlette_auth_routes,
"_get_cookie_value_from_request",
mock_get_cookie,
)
# Mock OAuth client with end_session_endpoint
class MockClient:
client_id = "test-client-id"
def load_server_metadata(self) -> dict[str, Any]:
return {
"issuer": "https://example.com",
"end_session_endpoint": "https://example.com/logout",
}
monkeypatch.setattr(
starlette_auth_routes,
"_create_oauth_client",
lambda provider: (MockClient(), "/redirect"),
)
# Mock get_validated_redirect_uri (now shared in auth_util)
monkeypatch.setattr(
starlette_auth_routes,
"get_validated_redirect_uri",
lambda: "http://localhost:8501/oauth2callback",
)
mock_request = MagicMock()
result = _get_provider_logout_url(mock_request)
assert result is not None
assert "https://example.com/logout" in result
assert "client_id=test-client-id" in result
assert "post_logout_redirect_uri" in result
# Verify that the validated redirect_uri is included in the logout URL
assert "localhost" in result
# Verify id_token_hint is included when tokens cookie has id_token
assert "id_token_hint=test-id-token" in result
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_returns_none_when_redirect_uri_invalid(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that None is returned when redirect_uri doesn't end with /oauth2callback."""
from unittest.mock import MagicMock
from streamlit.web.server.starlette.starlette_auth_routes import (
_get_provider_logout_url,
)
# Mock user cookie with provider
monkeypatch.setattr(
starlette_auth_routes,
"_get_cookie_value_from_request",
lambda request, name: b'{"provider": "testprovider"}',
)
# Mock OAuth client with end_session_endpoint
class MockClient:
client_id = "test-client-id"
def load_server_metadata(self) -> dict[str, Any]:
return {
"issuer": "https://example.com",
"end_session_endpoint": "https://example.com/logout",
}
monkeypatch.setattr(
starlette_auth_routes,
"_create_oauth_client",
lambda provider: (MockClient(), "/redirect"),
)
# Mock get_validated_redirect_uri to return None (invalid redirect_uri)
monkeypatch.setattr(
starlette_auth_routes,
"get_validated_redirect_uri",
lambda: None,
)
mock_request = MagicMock()
result = _get_provider_logout_url(mock_request)
# Should return None when redirect_uri is invalid
assert result is None
class TestLogoutWithProviderRedirect:
"""Tests for logout behavior with provider end_session_endpoint."""
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_logout_redirects_to_provider_when_end_session_available(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that logout redirects to provider logout URL when available."""
# Mock _get_provider_logout_url to return a URL
monkeypatch.setattr(
starlette_auth_routes,
"_get_provider_logout_url",
lambda request: (
"https://provider.com/logout?post_logout_redirect_uri=http%3A%2F%2Flocalhost%3A8501%2Foauth2callback"
),
)
app = Starlette(routes=create_auth_routes(""))
with TestClient(app) as client:
response = client.get("/auth/logout", follow_redirects=False)
assert response.status_code == 302
assert "provider.com/logout" in response.headers["location"]
def test_logout_redirects_to_base_when_no_end_session(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Test that logout redirects to base URL when no end_session_endpoint."""
# Mock _get_provider_logout_url to return None
monkeypatch.setattr(
starlette_auth_routes,
"_get_provider_logout_url",
lambda request: None,
)
app = Starlette(routes=create_auth_routes(""))
with TestClient(app) as client:
response = client.get("/auth/logout", follow_redirects=False)
assert response.status_code == 302
assert response.headers["location"].endswith("/")
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_auth_routes_test.py",
"license": "Apache License 2.0",
"lines": 613,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_websocket.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WebSocket handling for the Starlette server."""
from __future__ import annotations
import asyncio
import json
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Final
from urllib.parse import urlparse
from streamlit import config
from streamlit.auth_util import get_cookie_with_chunks, get_expose_tokens_config
from streamlit.logger import get_logger
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.runtime.runtime_util import serialize_forward_msg
from streamlit.runtime.session_manager import (
ClientContext,
SessionClient,
SessionClientDisconnectedError,
)
from streamlit.web.server.server_util import (
get_cookie_secret,
is_url_from_allowed_origins,
is_xsrf_enabled,
)
from streamlit.web.server.starlette import starlette_app_utils
from streamlit.web.server.starlette.starlette_server_config import (
TOKENS_COOKIE_NAME,
USER_COOKIE_NAME,
WEBSOCKET_MAX_SEND_QUEUE_SIZE,
XSRF_COOKIE_NAME,
)
if TYPE_CHECKING:
from collections.abc import Iterable, Mapping
from starlette.datastructures import Headers
from starlette.routing import BaseRoute
from starlette.websockets import WebSocket
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime import Runtime
_LOGGER: Final = get_logger(__name__)
# WebSocket stream route path (without base URL prefix).
_ROUTE_WEBSOCKET_STREAM: Final = "_stcore/stream"
def _parse_subprotocols(
headers: Headers,
) -> tuple[str | None, str | None, str | None]:
"""Parse the Sec-WebSocket-Protocol header.
Returns a tuple of (selected_subprotocol, xsrf_token, existing_session_id).
The subprotocol header is repurposed to pass tokens from client to server:
- First entry: subprotocol to select (e.g., "streamlit")
- Second entry: XSRF token for authentication validation
- Third entry: existing session ID for reconnection
Positional semantics are preserved: empty/whitespace entries are treated as
None rather than being filtered out (which would shift positions).
"""
raw = headers.get("sec-websocket-protocol")
if not raw:
return None, None, None
# Split and strip, preserving positions (empty strings become None)
entries = [value.strip() for value in raw.split(",")]
selected = entries[0] if entries and entries[0] else None
xsrf_token = entries[1] if len(entries) >= 2 and entries[1] else None
existing_session = entries[2] if len(entries) >= 3 and entries[2] else None
return selected, xsrf_token, existing_session
def _gather_user_info(headers: Headers) -> dict[str, str | bool | None]:
"""Extract user info from trusted headers."""
user_info: dict[str, str | bool | None] = {}
mapping = config.get_option("server.trustedUserHeaders")
if not isinstance(mapping, dict):
return user_info
for header_name, user_key in mapping.items():
values = headers.getlist(header_name)
user_info[user_key] = values[0] if values else None
return user_info
def _is_origin_allowed(origin: str | None, host: str | None) -> bool:
"""Check if the WebSocket Origin header is allowed.
This mirrors Tornado's WebSocketHandler.check_origin behavior, which allows
same-origin connections by default and delegates to is_url_from_allowed_origins
for cross-origin requests.
Parameters
----------
origin: str | None
The origin of the WebSocket connection.
host: str | None
The host of the WebSocket connection.
Returns
-------
bool
True if:
- The origin is None (browser didn't send Origin header, allowed per spec)
- The origin matches the host (same-origin request)
- The origin is in the allowed origins list (is_url_from_allowed_origins)
"""
# If no Origin header is present, allow the connection.
# Per the WebSocket spec, browsers should always send Origin, but non-browser
# clients may not. Tornado allows connections without Origin by default.
if origin is None:
return True
# Check same-origin: compare origin host with request host
parsed_origin = urlparse(origin)
origin_host = parsed_origin.netloc
# If origin host matches request host, it's same-origin
if origin_host == host:
return True
# Delegate to the standard allowed origins check
return is_url_from_allowed_origins(origin)
def _parse_user_cookie_signed(cookie_value: str | bytes, origin: str) -> dict[str, Any]:
"""Parse and validate a signed user cookie.
Note: This only understands cookies signed with itsdangerous (Starlette).
Cookies signed by Tornado's set_secure_cookie will fail to decode and
return an empty dict, requiring users to re-authenticate after switching
backends. This is expected behavior when switching between Tornado and Starlette backends.
"""
secret = get_cookie_secret()
signed_value = cookie_value
if isinstance(signed_value, str):
# HTTP cookies use ISO-8859-1 (latin-1) encoding per the HTTP spec.
# To recover the original bytes from a decoded cookie string, we must
# encode back to latin-1 (not UTF-8).
signed_value = signed_value.encode("latin-1")
decoded = starlette_app_utils.decode_signed_value(
secret, USER_COOKIE_NAME, signed_value
)
if decoded is None:
return {}
return _parse_decoded_user_cookie(decoded, origin)
def _parse_decoded_user_cookie(decoded_cookie: bytes, origin: str) -> dict[str, Any]:
"""Parse an already-decoded user cookie and validate the origin.
This is used when the cookie has already been decoded (e.g., from chunked cookie
retrieval). Validates the origin against the request origin for security.
"""
try:
payload = json.loads(decoded_cookie.decode("utf-8"))
except (UnicodeDecodeError, json.JSONDecodeError):
_LOGGER.exception("Error decoding auth cookie payload")
return {}
parsed_origin = urlparse(origin)
if not parsed_origin.scheme or not parsed_origin.netloc:
return {}
expected_origin = f"{parsed_origin.scheme}://{parsed_origin.netloc}"
cookie_origin = payload.get("origin")
if cookie_origin != expected_origin:
_LOGGER.error(
"Origin mismatch, the origin of websocket request is not the "
"same origin of redirect_uri in secrets.toml",
)
return {}
user_info: dict[str, Any] = {"is_logged_in": payload.get("is_logged_in", False)}
payload.pop("origin", None)
payload.pop("is_logged_in", None)
user_info.update(payload)
return user_info
def _get_signed_cookie_with_chunks(
cookies: dict[str, str], cookie_name: str
) -> bytes | None:
"""Get a signed cookie value, reconstructing from chunks if necessary.
Large cookies may be split into multiple chunks (e.g., `_streamlit_user`,
`_streamlit_user__1`, `_streamlit_user__2`) due to browser cookie size limits.
This function handles both single and chunked cookies transparently.
Parameters
----------
cookies
Dictionary of cookie names to their string values.
cookie_name
The base name of the cookie to retrieve.
Returns
-------
bytes | None
The decoded (unsigned) cookie value, or None if the cookie doesn't exist
or has an invalid signature.
Notes
-----
Uses itsdangerous signing which is NOT compatible with Tornado's format.
Cookies signed by Tornado will fail to decode.
"""
secret = get_cookie_secret()
def get_single_cookie(name: str) -> bytes | None:
raw_value = cookies.get(name)
if raw_value is None:
return None
# HTTP cookies use ISO-8859-1 (latin-1) encoding per the HTTP spec
signed_value = raw_value.encode("latin-1")
return starlette_app_utils.decode_signed_value(secret, name, signed_value)
return get_cookie_with_chunks(get_single_cookie, cookie_name)
class StarletteClientContext(ClientContext):
"""Starlette-specific implementation of ClientContext.
Captures headers, cookies, and client info from the initial WebSocket handshake.
Values are cached at construction time since they represent the initial request
context and should not change during the connection lifetime.
"""
def __init__(self, websocket: WebSocket) -> None:
self._headers: list[tuple[str, str]] = list(websocket.headers.items())
self._cookies: dict[str, str] = dict(websocket.cookies)
client = websocket.client
self._remote_ip: str | None = client.host if client else None
@property
def headers(self) -> Iterable[tuple[str, str]]:
"""All headers as (name, value) tuples."""
return self._headers
@property
def cookies(self) -> Mapping[str, str]:
"""Cookies as a name-to-value mapping."""
return self._cookies
@property
def remote_ip(self) -> str | None:
"""The client's remote IP address."""
return self._remote_ip
class StarletteSessionClient(SessionClient):
"""WebSocket client for Starlette that implements the SessionClient interface.
This class bridges the synchronous `write_forward_msg` calls from the Streamlit
runtime to the asynchronous WebSocket send operations. It uses an internal
queue and a background sender task to avoid blocking the calling thread.
Parameters
----------
websocket
The Starlette WebSocket connection to send messages through.
"""
def __init__(self, websocket: WebSocket) -> None:
self._websocket = websocket
self._client_context = StarletteClientContext(websocket)
# The queue bridges sync write_forward_msg calls to async WebSocket sends.
# Overwhelmed clients get disconnected via SessionClientDisconnectedError.
self._send_queue: asyncio.Queue[bytes] = asyncio.Queue(
maxsize=WEBSOCKET_MAX_SEND_QUEUE_SIZE
)
self._sender_task = asyncio.create_task(
self._sender(), name="starlette-ws-send"
)
self._closed = asyncio.Event()
async def _sender(self) -> None:
"""Background task that drains the send queue and writes to the WebSocket.
This task runs continuously, waiting for messages on the queue and sending
them to the WebSocket. It decouples message generation (sync) from network
I/O (async), allowing non-blocking sends from the runtime thread.
The task terminates when the WebSocket disconnects or an error occurs,
at which point it sets the closed flag to signal the client is no longer
usable.
"""
from starlette.websockets import WebSocketDisconnect
try:
while True:
payload = await self._send_queue.get()
await self._websocket.send_bytes(payload)
except WebSocketDisconnect:
pass
except Exception:
_LOGGER.exception("Error sending websocket payload")
finally:
self._closed.set()
def write_forward_msg(self, msg: ForwardMsg) -> None:
"""Send a ForwardMsg to the browser via the WebSocket.
This method is called synchronously from the Streamlit runtime. The message
is serialized and queued for asynchronous sending by the background sender task.
Parameters
----------
msg
The ForwardMsg protobuf to send to the client.
Raises
------
SessionClientDisconnectedError
If the client is already closed or the send queue is full (client
is overwhelmed and not consuming messages fast enough).
"""
if self._closed.is_set():
raise SessionClientDisconnectedError
payload = serialize_forward_msg(msg)
try:
self._send_queue.put_nowait(payload)
except asyncio.QueueFull as exc: # pragma: no cover - defensive
self._closed.set()
raise SessionClientDisconnectedError from exc
@property
def client_context(self) -> ClientContext:
"""Return the client's connection context."""
return self._client_context
async def aclose(self) -> None:
"""Close the client and release resources.
Sets the closed flag to prevent further message sends, cancels the
background sender task, and waits for it to complete cleanup.
"""
self._closed.set()
self._sender_task.cancel()
with suppress(asyncio.CancelledError):
await self._sender_task
def create_websocket_handler(runtime: Runtime) -> Any:
"""Create the WebSocket endpoint handler for client-server communication.
This factory function creates a Starlette WebSocket handler that manages the
bidirectional communication between the browser and the Streamlit runtime.
The handler performs:
- Origin validation (CORS/XSRF protection)
- Subprotocol negotiation
- Session management (connect/disconnect)
- User authentication via cookies and trusted headers
- BackMsg processing from the client
- ForwardMsg sending to the client (via StarletteSessionClient)
Parameters
----------
runtime
The Streamlit runtime instance that manages sessions and script execution.
Returns
-------
Callable
An async function that handles WebSocket connections.
"""
from starlette.websockets import WebSocketDisconnect
expose_tokens = get_expose_tokens_config()
async def _websocket_endpoint(websocket: WebSocket) -> None:
# Validate origin before accepting the connection to prevent
# cross-site WebSocket hijacking (mirrors Tornado's check_origin).
origin = websocket.headers.get("Origin")
host = websocket.headers.get("Host")
if not _is_origin_allowed(origin, host):
_LOGGER.warning(
"Rejecting WebSocket connection from disallowed origin: %s", origin
)
await websocket.close(code=1008) # 1008 = Policy Violation
return
subprotocol, xsrf_token, existing_session_id = _parse_subprotocols(
websocket.headers
)
await websocket.accept(subprotocol=subprotocol)
client = StarletteSessionClient(websocket)
session_id: str | None = None
try:
user_info: dict[str, Any] = {}
if is_xsrf_enabled():
xsrf_cookie = websocket.cookies.get(XSRF_COOKIE_NAME)
origin_header = websocket.headers.get("Origin")
# Validate XSRF token before parsing auth cookie:
if origin_header and starlette_app_utils.validate_xsrf_token(
xsrf_token, xsrf_cookie
):
try:
raw_auth_cookie = _get_signed_cookie_with_chunks(
websocket.cookies, USER_COOKIE_NAME
)
if raw_auth_cookie:
user_info.update(
_parse_decoded_user_cookie(
raw_auth_cookie, origin_header
)
)
raw_token_cookie = _get_signed_cookie_with_chunks(
websocket.cookies, TOKENS_COOKIE_NAME
)
if raw_token_cookie:
all_tokens = json.loads(raw_token_cookie)
filtered_tokens: dict[str, str] = {}
for token_type in expose_tokens:
token_key = f"{token_type}_token"
if token_key in all_tokens:
filtered_tokens[token_type] = all_tokens[
token_key
]
user_info["tokens"] = filtered_tokens
except Exception: # pragma: no cover - defensive
_LOGGER.exception("Error parsing auth cookie for websocket")
# Map in any user-configured headers. Note that these override anything
# coming from the auth cookie.
user_info.update(_gather_user_info(websocket.headers))
session_id = runtime.connect_session(
client=client,
user_info=user_info,
existing_session_id=existing_session_id,
)
while True:
try:
data = await websocket.receive_bytes()
except WebSocketDisconnect:
break
except RuntimeError:
# Starlette raises RuntimeError when a text frame is received
# by receive_bytes. Streamlit strictly uses binary protobufs
# for communication. We reject text frames to enforce the
# protocol and prevent ambiguity.
await websocket.close()
raise TypeError(
"WebSocket text frames are not supported; connection closed. "
"Expected binary protobufs."
)
back_msg = BackMsg()
try:
back_msg.ParseFromString(data)
except Exception as exc:
_LOGGER.exception("Error deserializing back message")
if session_id is not None:
runtime.handle_backmsg_deserialization_exception(
session_id, exc
)
continue
msg_type = back_msg.WhichOneof("type")
# "debug_disconnect_websocket" and "debug_shutdown_runtime" are
# special developmentMode-only messages used in e2e tests to test
# reconnect handling and disabling widgets.
if msg_type == "debug_disconnect_websocket":
if config.get_option("global.developmentMode") or config.get_option(
"global.e2eTest"
):
await websocket.close()
break
_LOGGER.warning(
"Client tried to disconnect websocket when not in "
"development mode or e2e testing."
)
continue
if msg_type == "debug_shutdown_runtime":
if config.get_option("global.developmentMode") or config.get_option(
"global.e2eTest"
):
runtime.stop()
break
_LOGGER.warning(
"Client tried to shut down runtime when not in "
"development mode or e2e testing."
)
continue
runtime.handle_backmsg(session_id, back_msg)
except WebSocketDisconnect:
# The websocket was closed by the client,
# we are handling it in the finally block.
pass
finally:
try:
if session_id is not None:
runtime.disconnect_session(session_id)
finally:
# Ensure client cleanup happens even if disconnect_session raises.
await client.aclose()
return _websocket_endpoint
def create_websocket_routes(runtime: Runtime, base_url: str | None) -> list[BaseRoute]:
"""Create the WebSocket route for client-server communication.
Creates a route at `/_stcore/stream` (with optional base URL prefix) that handles
the bidirectional WebSocket connection between the browser and Streamlit runtime.
Parameters
----------
runtime
The Streamlit runtime instance that manages sessions and script execution.
base_url
Optional base URL path prefix for the route (e.g., "myapp" results in
"/myapp/_stcore/stream").
Returns
-------
list[BaseRoute]
A list containing the single WebSocketRoute for the stream endpoint.
"""
from starlette.routing import WebSocketRoute
from streamlit.url_util import make_url_path
return [
WebSocketRoute(
make_url_path(base_url or "", _ROUTE_WEBSOCKET_STREAM),
create_websocket_handler(runtime),
)
]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_websocket.py",
"license": "Apache License 2.0",
"lines": 462,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_websocket_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for starlette_websocket module."""
from __future__ import annotations
import asyncio
import json
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from streamlit.web.server.starlette import starlette_app_utils
from streamlit.web.server.starlette.starlette_websocket import (
StarletteClientContext,
StarletteSessionClient,
_gather_user_info,
_get_signed_cookie_with_chunks,
_is_origin_allowed,
_parse_decoded_user_cookie,
_parse_subprotocols,
_parse_user_cookie_signed,
create_websocket_handler,
create_websocket_routes,
)
from tests.testutil import patch_config_options
class TestParseSubprotocols:
"""Tests for _parse_subprotocols function."""
def test_returns_none_when_header_missing(self) -> None:
"""Test that None values are returned when header is missing."""
headers = MagicMock()
headers.get.return_value = None
selected, xsrf, session = _parse_subprotocols(headers)
assert selected is None
assert xsrf is None
assert session is None
def test_returns_none_when_header_empty(self) -> None:
"""Test that None values are returned when header is empty."""
headers = MagicMock()
headers.get.return_value = ""
selected, xsrf, session = _parse_subprotocols(headers)
assert selected is None
assert xsrf is None
assert session is None
def test_parses_single_subprotocol(self) -> None:
"""Test parsing a single subprotocol value."""
headers = MagicMock()
headers.get.return_value = "streamlit"
selected, xsrf, session = _parse_subprotocols(headers)
assert selected == "streamlit"
assert xsrf is None
assert session is None
def test_parses_two_subprotocols(self) -> None:
"""Test parsing two subprotocol values (with XSRF token)."""
headers = MagicMock()
headers.get.return_value = "streamlit, xsrf-token-value"
selected, xsrf, session = _parse_subprotocols(headers)
assert selected == "streamlit"
assert xsrf == "xsrf-token-value"
assert session is None
def test_parses_three_subprotocols(self) -> None:
"""Test parsing three subprotocol values (with session ID)."""
headers = MagicMock()
headers.get.return_value = "streamlit, xsrf-token, session-123"
selected, xsrf, session = _parse_subprotocols(headers)
assert selected == "streamlit"
assert xsrf == "xsrf-token"
assert session == "session-123"
def test_strips_whitespace(self) -> None:
"""Test that whitespace is stripped from values."""
headers = MagicMock()
headers.get.return_value = " streamlit , xsrf , session "
selected, xsrf, session = _parse_subprotocols(headers)
assert selected == "streamlit"
assert xsrf == "xsrf"
assert session == "session"
def test_empty_entries_preserve_positions(self) -> None:
"""Test that empty entries are treated as None, preserving positions."""
headers = MagicMock()
headers.get.return_value = "streamlit, , , session"
selected, xsrf, session = _parse_subprotocols(headers)
assert selected == "streamlit"
assert xsrf is None # Position 1 is empty, not shifted
assert session is None # Position 2 is empty, "session" is at position 3
class TestGatherUserInfo:
"""Tests for _gather_user_info function."""
@patch_config_options({"server.trustedUserHeaders": {}})
def test_returns_empty_dict_when_no_mapping(self) -> None:
"""Test that empty dict is returned when no header mapping configured."""
headers = MagicMock()
result = _gather_user_info(headers)
assert result == {}
@patch_config_options({"server.trustedUserHeaders": None})
def test_returns_empty_dict_when_mapping_not_dict(self) -> None:
"""Test that empty dict is returned when mapping is not a dict."""
headers = MagicMock()
result = _gather_user_info(headers)
assert result == {}
@patch_config_options({"server.trustedUserHeaders": {"X-User-Email": "email"}})
def test_extracts_header_value(self) -> None:
"""Test that header values are extracted correctly."""
headers = MagicMock()
headers.getlist.return_value = ["user@example.com"]
result = _gather_user_info(headers)
assert result == {"email": "user@example.com"}
headers.getlist.assert_called_with("X-User-Email")
@patch_config_options({"server.trustedUserHeaders": {"X-User-Email": "email"}})
def test_returns_none_for_missing_header(self) -> None:
"""Test that None is returned for missing headers."""
headers = MagicMock()
headers.getlist.return_value = []
result = _gather_user_info(headers)
assert result == {"email": None}
@patch_config_options(
{
"server.trustedUserHeaders": {
"X-User-Email": "email",
"X-User-Name": "name",
}
}
)
def test_extracts_multiple_headers(self) -> None:
"""Test that multiple headers are extracted."""
headers = MagicMock()
headers.getlist.side_effect = lambda h: {
"X-User-Email": ["user@example.com"],
"X-User-Name": ["John Doe"],
}.get(h, [])
result = _gather_user_info(headers)
assert result == {"email": "user@example.com", "name": "John Doe"}
@patch_config_options({"server.trustedUserHeaders": {"X-User-Email": "email"}})
def test_uses_first_value_when_multiple(self) -> None:
"""Test that first value is used when header has multiple values."""
headers = MagicMock()
headers.getlist.return_value = ["first@example.com", "second@example.com"]
result = _gather_user_info(headers)
assert result == {"email": "first@example.com"}
class TestParseDecodedUserCookie:
"""Tests for _parse_decoded_user_cookie function."""
def test_returns_empty_dict_for_invalid_json(self) -> None:
"""Test that empty dict is returned for invalid JSON."""
result = _parse_decoded_user_cookie(b"not-valid-json", "http://localhost")
assert result == {}
def test_returns_empty_dict_for_invalid_utf8(self) -> None:
"""Test that empty dict is returned for invalid UTF-8."""
result = _parse_decoded_user_cookie(b"\xff\xfe", "http://localhost")
assert result == {}
def test_returns_empty_dict_for_missing_scheme(self) -> None:
"""Test that empty dict is returned when origin has no scheme."""
cookie_data = json.dumps({"origin": "http://localhost", "is_logged_in": True})
result = _parse_decoded_user_cookie(cookie_data.encode(), "localhost")
assert result == {}
def test_returns_empty_dict_for_origin_mismatch(self) -> None:
"""Test that empty dict is returned when origins don't match."""
cookie_data = json.dumps({"origin": "http://localhost", "is_logged_in": True})
result = _parse_decoded_user_cookie(cookie_data.encode(), "http://other.com")
assert result == {}
def test_parses_valid_cookie(self) -> None:
"""Test that valid cookie data is parsed correctly."""
cookie_data = json.dumps(
{
"origin": "http://localhost",
"is_logged_in": True,
"email": "user@test.com",
"name": "Test User",
}
)
result = _parse_decoded_user_cookie(cookie_data.encode(), "http://localhost")
assert result["is_logged_in"] is True
assert result["email"] == "user@test.com"
assert result["name"] == "Test User"
assert "origin" not in result
def test_handles_port_in_origin(self) -> None:
"""Test that origin with port is handled correctly."""
cookie_data = json.dumps(
{"origin": "http://localhost:8501", "is_logged_in": True}
)
result = _parse_decoded_user_cookie(
cookie_data.encode(), "http://localhost:8501/some/path"
)
assert result["is_logged_in"] is True
class TestParseUserCookieSigned:
"""Tests for _parse_user_cookie_signed function."""
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_returns_empty_dict_for_invalid_signature(self) -> None:
"""Test that empty dict is returned for invalid signature."""
result = _parse_user_cookie_signed("invalid-cookie", "http://localhost")
assert result == {}
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_returns_empty_dict_for_invalid_origin(self) -> None:
"""Test that empty dict is returned for invalid origin format."""
cookie_payload = json.dumps(
{
"origin": "http://localhost",
"is_logged_in": True,
"email": "test@test.com",
}
)
signed_cookie = starlette_app_utils.create_signed_value(
"test-secret", "_streamlit_user", cookie_payload
)
# Invalid origin (missing scheme)
result = _parse_user_cookie_signed(signed_cookie, "localhost")
assert result == {}
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_returns_empty_dict_for_origin_mismatch(self) -> None:
"""Test that empty dict is returned when origins don't match."""
cookie_payload = json.dumps(
{
"origin": "http://localhost",
"is_logged_in": True,
"email": "test@test.com",
}
)
signed_cookie = starlette_app_utils.create_signed_value(
"test-secret", "_streamlit_user", cookie_payload
)
# Different origin
result = _parse_user_cookie_signed(signed_cookie, "http://example.com")
assert result == {}
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_parses_valid_cookie(self) -> None:
"""Test that valid cookie is parsed correctly."""
cookie_payload = json.dumps(
{
"origin": "http://localhost",
"is_logged_in": True,
"email": "test@test.com",
}
)
signed_cookie = starlette_app_utils.create_signed_value(
"test-secret", "_streamlit_user", cookie_payload
)
result = _parse_user_cookie_signed(signed_cookie, "http://localhost")
assert result["is_logged_in"] is True
assert result["email"] == "test@test.com"
assert "origin" not in result # Origin is removed
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_handles_bytes_cookie(self) -> None:
"""Test that bytes cookie is handled correctly."""
cookie_payload = json.dumps(
{"origin": "http://localhost", "is_logged_in": True}
)
signed_cookie = starlette_app_utils.create_signed_value(
"test-secret", "_streamlit_user", cookie_payload
)
# Pass as bytes
result = _parse_user_cookie_signed(signed_cookie, "http://localhost")
assert result["is_logged_in"] is True
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_handles_string_cookie(self) -> None:
"""Test that string cookie is handled correctly."""
cookie_payload = json.dumps(
{"origin": "http://localhost", "is_logged_in": True}
)
signed_cookie = starlette_app_utils.create_signed_value(
"test-secret", "_streamlit_user", cookie_payload
)
# Pass as string
result = _parse_user_cookie_signed(
signed_cookie.decode("utf-8"), "http://localhost"
)
assert result["is_logged_in"] is True
class TestIsOriginAllowed:
"""Tests for _is_origin_allowed function (Origin validation for WebSocket)."""
@pytest.mark.parametrize(
("origin", "host", "expected"),
[
# None origin allowed (non-browser clients)
(None, "localhost:8501", True),
# Same-origin requests allowed
("http://localhost:8501", "localhost:8501", True),
# Localhost origins allowed by default
("http://localhost:3000", "somehost:8501", True),
# 127.0.0.1 origins allowed by default
("http://127.0.0.1:3000", "somehost:8501", True),
# Disallowed cross-origin requests rejected
("http://evil.com", "localhost:8501", False),
# Different host origins rejected when not in allowlist
("http://attacker.example.com", "myapp.com:8501", False),
],
ids=[
"none_origin",
"same_origin",
"localhost",
"127.0.0.1",
"disallowed_origin",
"different_host",
],
)
@patch_config_options({"server.enableCORS": True})
def test_origin_validation_with_cors_enabled(
self, origin: str | None, host: str, expected: bool
) -> None:
"""Test origin validation when CORS is enabled."""
assert _is_origin_allowed(origin, host) is expected
@patch_config_options({"server.enableCORS": False})
def test_allows_all_origins_when_cors_disabled(self) -> None:
"""Test that all origins are allowed when CORS is disabled."""
assert _is_origin_allowed("http://evil.com", "localhost:8501") is True
@pytest.mark.parametrize(
("origin", "expected"),
[
("http://trusted.com", True),
("http://untrusted.com", False),
],
ids=["allowed_origins", "not_in_allowlist"],
)
@patch_config_options(
{"server.enableCORS": True, "server.corsAllowedOrigins": ["http://trusted.com"]}
)
def test_origin_validation_with_allowlist(
self, origin: str, expected: bool
) -> None:
"""Test origin validation against explicit allowlist."""
assert _is_origin_allowed(origin, "localhost:8501") is expected
class TestWebsocketHandlerUserInfoPrecedence:
"""Tests for user_info precedence in websocket handler."""
@patch_config_options(
{
"server.enableXsrfProtection": True,
"server.cookieSecret": "test-secret",
"server.trustedUserHeaders": {"X-User-Email": "email"},
"server.enableCORS": False,
}
)
def test_headers_override_cookie_values(self) -> None:
"""Test that trusted headers override auth cookie values.
When both an auth cookie and trusted headers provide the same user info
key (e.g., 'email'), the header value should take precedence. This matches
Tornado's behavior where headers override auth cookie values.
"""
from starlette.websockets import WebSocketDisconnect
# Create a valid signed cookie with email from auth provider
cookie_payload = json.dumps(
{
"origin": "http://localhost",
"is_logged_in": True,
"email": "cookie@example.com",
}
)
signed_cookie = starlette_app_utils.create_signed_value(
"test-secret", "_streamlit_user", cookie_payload
)
xsrf_token = starlette_app_utils.generate_xsrf_token_string()
# Mock websocket with both cookie and header providing different emails
mock_websocket = MagicMock()
mock_websocket.headers = MagicMock()
mock_websocket.headers.get.side_effect = lambda key: {
"Origin": "http://localhost",
"Host": "localhost:8501",
"sec-websocket-protocol": f"streamlit, {xsrf_token}",
}.get(key)
mock_websocket.headers.getlist.return_value = ["header@example.com"]
mock_websocket.cookies = {
"_streamlit_user": signed_cookie.decode("utf-8"),
"_streamlit_xsrf": xsrf_token,
}
mock_websocket.accept = AsyncMock()
mock_websocket.close = AsyncMock()
# Simulate immediate disconnect after connect_session
mock_websocket.receive_bytes = AsyncMock(side_effect=WebSocketDisconnect())
# Mock runtime
mock_runtime = MagicMock()
mock_runtime.connect_session = MagicMock(return_value="test-session-id")
mock_runtime.disconnect_session = MagicMock()
# Create handler and patch the client class
handler = create_websocket_handler(mock_runtime)
with patch(
"streamlit.web.server.starlette.starlette_websocket.StarletteSessionClient"
) as mock_client_class:
mock_client = MagicMock()
mock_client.aclose = AsyncMock()
mock_client_class.return_value = mock_client
# Also patch validate_xsrf_token to ensure cookie parsing succeeds
with patch(
"streamlit.web.server.starlette.starlette_app_utils.validate_xsrf_token",
return_value=True,
):
asyncio.run(handler(mock_websocket))
# Verify connect_session was called
mock_runtime.connect_session.assert_called_once()
# Get the user_info that was passed to connect_session
call_kwargs = mock_runtime.connect_session.call_args
user_info = call_kwargs.kwargs.get("user_info") or call_kwargs[1].get(
"user_info"
)
# Headers should override cookie values - this is the key assertion
assert user_info["email"] == "header@example.com"
# Cookie values that aren't overridden should still be present
assert user_info["is_logged_in"] is True
class TestGetSignedCookieWithChunks:
"""Tests for _get_signed_cookie_with_chunks function."""
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_returns_none_for_missing_cookie(self) -> None:
"""Test that None is returned when cookie is not present."""
cookies: dict[str, str] = {}
result = _get_signed_cookie_with_chunks(cookies, "_streamlit_user")
assert result is None
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_returns_decoded_value_for_valid_cookie(self) -> None:
"""Test that signed cookie is decoded correctly."""
payload = "test-payload"
signed_cookie = starlette_app_utils.create_signed_value(
"test-secret", "_streamlit_user", payload
)
cookies = {"_streamlit_user": signed_cookie.decode("utf-8")}
result = _get_signed_cookie_with_chunks(cookies, "_streamlit_user")
assert result == payload.encode("utf-8")
@patch_config_options({"server.cookieSecret": "test-secret"})
def test_returns_none_for_invalid_signature(self) -> None:
"""Test that None is returned for invalid signature."""
cookies = {"_streamlit_user": "invalid-signed-value"}
result = _get_signed_cookie_with_chunks(cookies, "_streamlit_user")
assert result is None
class TestStarletteSessionClient:
"""Tests for StarletteSessionClient class."""
@pytest.mark.anyio
async def test_write_forward_msg_raises_when_closed(self) -> None:
"""Test that write_forward_msg raises when client is closed."""
from streamlit.runtime.session_manager import SessionClientDisconnectedError
mock_websocket = MagicMock()
client = StarletteSessionClient(mock_websocket)
# Mark as closed
client._closed.set()
mock_msg = MagicMock()
with pytest.raises(SessionClientDisconnectedError):
client.write_forward_msg(mock_msg)
# Cleanup
await client.aclose()
@pytest.mark.anyio
async def test_write_forward_msg_queues_message(self) -> None:
"""Test that write_forward_msg adds message to queue."""
mock_websocket = MagicMock()
client = StarletteSessionClient(mock_websocket)
mock_msg = MagicMock()
with patch(
"streamlit.web.server.starlette.starlette_websocket.serialize_forward_msg"
) as mock_serialize:
mock_serialize.return_value = b"serialized"
client.write_forward_msg(mock_msg)
assert client._send_queue.qsize() == 1
# Cleanup
await client.aclose()
@pytest.mark.anyio
async def test_aclose_sets_closed_and_cancels_task(self) -> None:
"""Test that aclose sets closed flag and cancels sender task."""
mock_websocket = MagicMock()
client = StarletteSessionClient(mock_websocket)
await client.aclose()
assert client._closed.is_set()
assert client._sender_task.cancelled()
class TestCreateWebsocketRoutes:
"""Tests for create_websocket_routes function."""
def test_creates_websocket_route(self) -> None:
"""Test that WebSocket route is created with correct path."""
mock_runtime = MagicMock()
routes = create_websocket_routes(mock_runtime, base_url=None)
assert len(routes) == 1
assert routes[0].path == "/_stcore/stream"
def test_creates_route_with_base_url(self) -> None:
"""Test that WebSocket route is created with base URL prefix."""
mock_runtime = MagicMock()
routes = create_websocket_routes(mock_runtime, base_url="myapp")
assert len(routes) == 1
assert routes[0].path == "/myapp/_stcore/stream"
def test_creates_route_with_slashed_base_url(self) -> None:
"""Test that slashes are handled correctly in base URL."""
mock_runtime = MagicMock()
routes = create_websocket_routes(mock_runtime, base_url="/myapp/")
assert len(routes) == 1
assert routes[0].path == "/myapp/_stcore/stream"
class TestStarletteClientContext:
"""Tests for StarletteClientContext class."""
def test_headers_returns_all_headers(self) -> None:
"""Test that headers property returns all headers including duplicates."""
mock_websocket = MagicMock()
mock_websocket.headers.items.return_value = [
("Content-Type", "text/html"),
("Accept", "application/json"),
("Accept", "text/plain"),
]
ctx = StarletteClientContext(mock_websocket)
headers = list(ctx.headers)
assert len(headers) == 3
assert ("Content-Type", "text/html") in headers
assert ("Accept", "application/json") in headers
assert ("Accept", "text/plain") in headers
def test_cookies_returns_all_cookies(self) -> None:
"""Test that cookies property returns all cookies."""
mock_websocket = MagicMock()
mock_websocket.cookies = {"session": "abc123", "user": "test"}
ctx = StarletteClientContext(mock_websocket)
assert ctx.cookies == {"session": "abc123", "user": "test"}
def test_remote_ip_returns_client_host(self) -> None:
"""Test that remote_ip property returns client host."""
mock_websocket = MagicMock()
mock_client = MagicMock()
mock_client.host = "192.168.1.100"
mock_websocket.client = mock_client
ctx = StarletteClientContext(mock_websocket)
assert ctx.remote_ip == "192.168.1.100"
def test_remote_ip_returns_none_when_no_client(self) -> None:
"""Test that remote_ip property returns None when client is None."""
mock_websocket = MagicMock()
mock_websocket.client = None
ctx = StarletteClientContext(mock_websocket)
assert ctx.remote_ip is None
class TestStarletteSessionClientClientContext:
"""Tests for client_context property on StarletteSessionClient."""
@pytest.mark.anyio
async def test_client_context_returns_starlette_context(self) -> None:
"""Test that client_context property returns a StarletteClientContext."""
mock_websocket = MagicMock()
mock_websocket.headers.items.return_value = [("Host", "localhost")]
mock_websocket.cookies = {"test": "cookie"}
client = StarletteSessionClient(mock_websocket)
ctx = client.client_context
assert isinstance(ctx, StarletteClientContext)
# Verify it wraps the same websocket
headers = list(ctx.headers)
assert headers == [("Host", "localhost")]
assert ctx.cookies == {"test": "cookie"}
# Cleanup
await client.aclose()
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_websocket_test.py",
"license": "Apache License 2.0",
"lines": 532,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_static_routes.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Static file handling for the Starlette server.
This is for serving the core Streamlit static assets (HTML/JS/CSS)
not related to the app static file serving feature.
"""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any, Final
from streamlit import file_util
from streamlit.path_security import is_unsafe_path_pattern
from streamlit.url_util import make_url_path
from streamlit.web.server.routes import (
NO_CACHE_PATTERN,
STATIC_ASSET_CACHE_MAX_AGE_SECONDS,
)
if TYPE_CHECKING:
from collections.abc import MutableMapping
from starlette.routing import BaseRoute
from starlette.staticfiles import StaticFiles
from starlette.types import Receive, Scope, Send
# Reserved paths that should return 404 instead of index.html fallback.
_RESERVED_STATIC_PATH_SUFFIXES: Final = ("_stcore/health", "_stcore/host-config")
def create_streamlit_static_handler(
directory: str, base_url: str | None
) -> StaticFiles:
"""Create a static file handler used for serving Streamlit's static assets.
This also handles:
- SPA fallback (serving index.html on 404s for client-side routing)
- Long-term caching of hashed assets
- No-cache for HTML/manifest files
- Trailing slash redirect (301)
- Double-slash protection (400 for protocol-relative URL security)
"""
from starlette.exceptions import HTTPException
from starlette.responses import FileResponse, RedirectResponse, Response
from starlette.staticfiles import StaticFiles
class _StreamlitStaticFiles(StaticFiles):
def __init__(self, directory: str, base_url: str | None) -> None:
super().__init__(directory=directory, html=True)
self._base_url = (base_url or "").strip("/")
self._index_path = os.path.join(directory, "index.html")
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
"""Handle incoming requests with security checks and redirects."""
if scope["type"] != "http":
await super().__call__(scope, receive, send)
return
path = scope.get("path", "")
# Security check: Block paths starting with double slash (protocol-relative
# URL protection). A path like //example.com could be misinterpreted as a
# protocol-relative URL if redirected, which is a security risk.
if path.startswith("//"):
response = Response(content="Bad Request", status_code=400)
await response(scope, receive, send)
return
# Security check: Block UNC paths, absolute paths, drive-qualified paths,
# and path traversal patterns BEFORE any filesystem operations.
# See is_unsafe_path_pattern() docstring for details.
# Strip the leading slash since paths come in as "/filename" but we check
# the relative portion.
relative_path = path.lstrip("/")
if relative_path and is_unsafe_path_pattern(relative_path):
response = Response(content="Bad Request", status_code=400)
await response(scope, receive, send)
return
# Handle trailing slash redirect: Returns 301 for paths with trailing
# slashes (except root "/" or mount root).
# We replicate this for consistent URL handling and to avoid duplicate
# content issues. When mounted (e.g., at "/app"), scope["path"] is the
# full path "/app/" and scope["root_path"] is "/app", so we must not
# redirect the mount root to avoid infinite redirect loops.
root_path = scope.get("root_path", "")
if len(path) > 1 and path.endswith("/"):
redirect_path = path.rstrip("/")
# Don't redirect if we're at the mount root (path without slash equals root_path)
if redirect_path == root_path:
await super().__call__(scope, receive, send)
return
# Build redirect URL without trailing slash
query_string = scope.get("query_string", b"")
if query_string:
redirect_path += "?" + query_string.decode("latin-1")
response = RedirectResponse(
url=redirect_path,
status_code=301,
headers={"Cache-Control": "no-cache"},
)
await response(scope, receive, send)
return
await super().__call__(scope, receive, send)
async def get_response(
self, path: str, scope: MutableMapping[str, Any]
) -> Response:
served_path = path
try:
response = await super().get_response(path, scope)
except HTTPException as exc:
if exc.status_code != 404 or self._is_reserved(scope["path"]):
raise
# Serve index.html for 404s (existing behavior):
response = FileResponse(self._index_path)
served_path = "index.html"
self._apply_cache_headers(response, served_path)
return response
def _is_reserved(self, request_path: str) -> bool:
"""Check if the request path is reserved and should not fallback."""
# Match Tornado's behavior: simple endswith check on the URL path.
# TODO: Consider making this path-segment-aware in the future to avoid
# false positives like "/my_stcore/health" matching "_stcore/health".
url_path = request_path.split("?", 1)[0]
return any(url_path.endswith(x) for x in _RESERVED_STATIC_PATH_SUFFIXES)
def _apply_cache_headers(self, response: Response, served_path: str) -> None:
"""Apply cache headers matching Tornado's behavior."""
if response.status_code in {301, 302, 303, 304, 307, 308}:
return
normalized = served_path.replace("\\", "/").lstrip("./")
# Tornado marks HTML/manifest assets as no-cache but lets hashed bundles
# live in cache. Keep that contract to avoid churning snapshots or CDNs.
cache_value = (
"no-cache"
if not normalized or NO_CACHE_PATTERN.search(normalized)
else f"public, immutable, max-age={STATIC_ASSET_CACHE_MAX_AGE_SECONDS}"
)
response.headers["Cache-Control"] = cache_value
return _StreamlitStaticFiles(directory=directory, base_url=base_url)
def create_streamlit_static_assets_routes(base_url: str | None) -> list[BaseRoute]:
"""Create the static assets mount for serving Streamlit's core assets."""
from starlette.routing import Mount
static_assets = create_streamlit_static_handler(
directory=file_util.get_static_dir(), base_url=base_url
)
# Strip trailing slash from the path because Starlette's Mount with a trailing
# slash (e.g., "/myapp/") won't match requests without it (e.g., "/myapp").
# Mount without trailing slash handles both cases by redirecting "/myapp" to
# "/myapp/". Use "/" as fallback for root path.
mount_path = make_url_path(base_url or "", "").rstrip("/") or "/"
return [
Mount(
mount_path,
app=static_assets,
name="static-assets",
)
]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_static_routes.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_static_routes_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for starlette_static module."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from starlette.applications import Starlette
from starlette.routing import Mount
from starlette.testclient import TestClient
from streamlit.web.server.routes import STATIC_ASSET_CACHE_MAX_AGE_SECONDS
from streamlit.web.server.starlette.starlette_static_routes import (
_RESERVED_STATIC_PATH_SUFFIXES,
create_streamlit_static_handler,
)
if TYPE_CHECKING:
from collections.abc import Iterator
from pathlib import Path
@pytest.fixture
def static_app(tmp_path: Path) -> Iterator[TestClient]:
"""Create a test client with static files mounted."""
# Create static directory with test files
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
(static_dir / "app.abc123.js").write_text("console.log('app')")
(static_dir / "manifest.json").write_text("{}")
(static_dir / "style.css").write_text("body {}")
# Create subdirectory
subdir = static_dir / "subdir"
subdir.mkdir()
(subdir / "page.html").write_text("<html>Page</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
app = Starlette(routes=[Mount("/", app=static_files)])
with TestClient(app) as client:
yield client
class TestStreamlitStaticFiles:
"""Tests for the Streamlit static files handler."""
def test_serves_index_html(self, static_app: TestClient) -> None:
"""Test that index.html is served."""
response = static_app.get("/index.html")
assert response.status_code == 200
assert response.text == "<html>Home</html>"
def test_serves_root_as_index(self, static_app: TestClient) -> None:
"""Test that root path serves index.html."""
response = static_app.get("/")
assert response.status_code == 200
assert response.text == "<html>Home</html>"
def test_serves_js_files(self, static_app: TestClient) -> None:
"""Test that JS files are served."""
response = static_app.get("/app.abc123.js")
assert response.status_code == 200
assert response.text == "console.log('app')"
def test_serves_css_files(self, static_app: TestClient) -> None:
"""Test that CSS files are served."""
response = static_app.get("/style.css")
assert response.status_code == 200
assert response.text == "body {}"
def test_spa_fallback_returns_index(self, static_app: TestClient) -> None:
"""Test that unknown paths fall back to index.html (SPA routing)."""
response = static_app.get("/unknown/path")
assert response.status_code == 200
assert response.text == "<html>Home</html>"
def test_cache_control_for_index(self, static_app: TestClient) -> None:
"""Test that index.html has no-cache header."""
response = static_app.get("/index.html")
assert response.headers["Cache-Control"] == "no-cache"
def test_cache_control_for_manifest(self, static_app: TestClient) -> None:
"""Test that manifest.json has no-cache header."""
response = static_app.get("/manifest.json")
assert response.headers["Cache-Control"] == "no-cache"
def test_cache_control_for_hashed_assets(self, static_app: TestClient) -> None:
"""Test that hashed assets have long cache headers."""
response = static_app.get("/app.abc123.js")
expected = f"public, immutable, max-age={STATIC_ASSET_CACHE_MAX_AGE_SECONDS}"
assert response.headers["Cache-Control"] == expected
def test_cache_control_for_css(self, static_app: TestClient) -> None:
"""Test that CSS files have long cache headers."""
response = static_app.get("/style.css")
expected = f"public, immutable, max-age={STATIC_ASSET_CACHE_MAX_AGE_SECONDS}"
assert response.headers["Cache-Control"] == expected
def test_spa_fallback_has_no_cache(self, static_app: TestClient) -> None:
"""Test that SPA fallback response has no-cache header."""
response = static_app.get("/some/spa/route")
assert response.headers["Cache-Control"] == "no-cache"
class TestReservedPaths:
"""Tests for reserved path handling."""
def test_reserved_paths_constant(self) -> None:
"""Test that reserved paths are defined correctly."""
assert "_stcore/health" in _RESERVED_STATIC_PATH_SUFFIXES
assert "_stcore/host-config" in _RESERVED_STATIC_PATH_SUFFIXES
def test_reserved_path_returns_404(self, static_app: TestClient) -> None:
"""Test that reserved paths return 404 instead of SPA fallback."""
response = static_app.get("/_stcore/health")
assert response.status_code == 404
def test_reserved_path_host_config_returns_404(
self, static_app: TestClient
) -> None:
"""Test that reserved host-config path returns 404."""
response = static_app.get("/_stcore/host-config")
assert response.status_code == 404
def test_user_path_ending_with_reserved_suffix_returns_404(
self, static_app: TestClient
) -> None:
"""Test that paths ending with reserved suffixes return 404.
This matches Tornado's behavior where endswith() is used for reserved
path matching. Paths like /my_stcore/health return 404 because they
end with '_stcore/health'.
TODO: Consider making this path-segment-aware in the future to avoid
false positives.
"""
response = static_app.get("/my_stcore/health")
# Matches Tornado: endswith check treats this as reserved
assert response.status_code == 404
def test_user_path_custom_stcore_returns_404(self, static_app: TestClient) -> None:
"""Test that /custom_stcore/host-config returns 404 (matches Tornado)."""
response = static_app.get("/custom_stcore/host-config")
# Matches Tornado: endswith check treats this as reserved
assert response.status_code == 404
def test_nested_reserved_path_returns_404(self, static_app: TestClient) -> None:
"""Test that nested reserved paths like /foo/_stcore/health return 404."""
response = static_app.get("/foo/_stcore/health")
assert response.status_code == 404
class TestWithBaseUrl:
"""Tests for static files with base URL."""
def test_serves_files_with_base_url(self, tmp_path: Path) -> None:
"""Test that files are served correctly with a base URL."""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Base</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url="myapp"
)
app = Starlette(routes=[Mount("/myapp", app=static_files)])
with TestClient(app) as client:
response = client.get("/myapp/index.html")
assert response.status_code == 200
assert response.text == "<html>Base</html>"
def test_no_redirect_loop_when_mounted(self, tmp_path: Path) -> None:
"""Test that mount root with trailing slash doesn't cause redirect loop.
When mounted at a path (e.g., /app), requests to /app/ should serve
index.html, not redirect to /app which would then redirect back to /app/.
"""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Mounted</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=""
)
app = Starlette(routes=[Mount("/app", app=static_files)])
with TestClient(app, follow_redirects=False) as client:
# /app should redirect to /app/ (Starlette's Mount behavior)
response = client.get("/app")
assert response.status_code == 307
assert response.headers["location"] == "http://testserver/app/"
# /app/ should serve content, NOT redirect to /app
response = client.get("/app/")
assert response.status_code == 200
assert response.text == "<html>Mounted</html>"
def test_nested_mount_no_redirect_loop(self, tmp_path: Path) -> None:
"""Test that nested mounts (like FastAPI mounting Streamlit) work correctly."""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Nested</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=""
)
# Inner app with static files at root (like Streamlit does)
inner_app = Starlette(routes=[Mount("/", app=static_files)])
# Outer app mounting inner at /app (like FastAPI does)
outer_app = Starlette(routes=[Mount("/app", app=inner_app)])
with TestClient(outer_app, follow_redirects=False) as client:
# /app/ should serve content without redirect loop
response = client.get("/app/")
assert response.status_code == 200
assert response.text == "<html>Nested</html>"
# Also verify it works with follow_redirects
with TestClient(outer_app, follow_redirects=True) as client:
response = client.get("/app")
assert response.status_code == 200
assert response.text == "<html>Nested</html>"
class TestDoubleSlashProtection:
"""Tests for double-slash (protocol-relative URL) security protection.
Note: We need to test these with raw ASGI scope because HTTP clients
interpret //evil.com as a protocol-relative URL, not a path.
"""
@pytest.mark.anyio
async def test_double_slash_returns_400(self, tmp_path: Path) -> None:
"""Test that paths starting with // return 400 Bad Request.
Double-slash paths like //example.com could be misinterpreted as
protocol-relative URLs if redirected, which is a security risk.
"""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
# Create raw ASGI scope with // path
scope = {
"type": "http",
"method": "GET",
"path": "//evil.com",
"query_string": b"",
"root_path": "",
"headers": [],
}
response_started = False
response_status = 0
response_body = b""
async def receive() -> dict[str, object]:
return {"type": "http.request", "body": b""}
async def send(message: dict[str, object]) -> None:
nonlocal response_started, response_status, response_body
if message["type"] == "http.response.start":
response_started = True
response_status = message["status"]
elif message["type"] == "http.response.body":
response_body += message.get("body", b"")
await static_files(scope, receive, send)
assert response_status == 400
assert response_body == b"Bad Request"
@pytest.mark.anyio
async def test_double_slash_with_path_returns_400(self, tmp_path: Path) -> None:
"""Test that paths like //evil.com/path return 400."""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
scope = {
"type": "http",
"method": "GET",
"path": "//evil.com/some/path",
"query_string": b"",
"root_path": "",
"headers": [],
}
response_status = 0
response_body = b""
async def receive() -> dict[str, object]:
return {"type": "http.request", "body": b""}
async def send(message: dict[str, object]) -> None:
nonlocal response_status, response_body
if message["type"] == "http.response.start":
response_status = message["status"]
elif message["type"] == "http.response.body":
response_body += message.get("body", b"")
await static_files(scope, receive, send)
assert response_status == 400
assert response_body == b"Bad Request"
@pytest.mark.anyio
async def test_double_slash_at_root_returns_400(self, tmp_path: Path) -> None:
"""Test that just // returns 400."""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
scope = {
"type": "http",
"method": "GET",
"path": "//",
"query_string": b"",
"root_path": "",
"headers": [],
}
response_status = 0
response_body = b""
async def receive() -> dict[str, object]:
return {"type": "http.request", "body": b""}
async def send(message: dict[str, object]) -> None:
nonlocal response_status, response_body
if message["type"] == "http.response.start":
response_status = message["status"]
elif message["type"] == "http.response.body":
response_body += message.get("body", b"")
await static_files(scope, receive, send)
assert response_status == 400
assert response_body == b"Bad Request"
def test_single_slash_path_works(self, static_app: TestClient) -> None:
"""Test that normal single-slash paths still work correctly."""
response = static_app.get("/index.html")
assert response.status_code == 200
assert response.text == "<html>Home</html>"
def test_path_with_double_slash_in_middle_works(
self, static_app: TestClient
) -> None:
"""Test that double slash not at start doesn't trigger protection.
Only paths starting with // are blocked. Paths like /foo//bar
are handled normally by the SPA fallback.
"""
response = static_app.get("/foo//bar")
# This falls through to SPA fallback, not blocked
assert response.status_code == 200
assert response.text == "<html>Home</html>"
class TestTrailingSlashRedirect:
"""Tests for trailing slash redirect behavior (301 redirects)."""
def test_trailing_slash_redirects_to_without(self, tmp_path: Path) -> None:
"""Test that paths with trailing slash redirect to path without."""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
app = Starlette(routes=[Mount("/", app=static_files)])
with TestClient(app, follow_redirects=False) as client:
response = client.get("/somepath/")
assert response.status_code == 301
assert response.headers["location"] == "/somepath"
assert response.headers["Cache-Control"] == "no-cache"
def test_nested_trailing_slash_redirects(self, tmp_path: Path) -> None:
"""Test that nested paths with trailing slash redirect correctly."""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
app = Starlette(routes=[Mount("/", app=static_files)])
with TestClient(app, follow_redirects=False) as client:
response = client.get("/deep/nested/path/")
assert response.status_code == 301
assert response.headers["location"] == "/deep/nested/path"
def test_trailing_slash_redirect_preserves_query_string(
self, tmp_path: Path
) -> None:
"""Test that query string is preserved in trailing slash redirect."""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
app = Starlette(routes=[Mount("/", app=static_files)])
with TestClient(app, follow_redirects=False) as client:
response = client.get("/somepath/?foo=bar&baz=qux")
assert response.status_code == 301
assert response.headers["location"] == "/somepath?foo=bar&baz=qux"
def test_root_slash_does_not_redirect(self, tmp_path: Path) -> None:
"""Test that root path '/' does not redirect."""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
app = Starlette(routes=[Mount("/", app=static_files)])
with TestClient(app, follow_redirects=False) as client:
response = client.get("/")
# Should serve content, not redirect
assert response.status_code == 200
assert response.text == "<html>Home</html>"
class TestCacheHeadersOnRedirects:
"""Tests for cache headers behavior on redirect responses."""
def test_redirect_responses_keep_their_cache_headers(self, tmp_path: Path) -> None:
"""Test that redirect responses don't get cache headers overwritten.
The _apply_cache_headers method should skip adding cache headers
to redirect responses (301, 302, etc.) to avoid overwriting
the redirect-specific Cache-Control header.
"""
static_dir = tmp_path / "static"
static_dir.mkdir()
(static_dir / "index.html").write_text("<html>Home</html>")
static_files = create_streamlit_static_handler(
directory=str(static_dir), base_url=None
)
app = Starlette(routes=[Mount("/", app=static_files)])
with TestClient(app, follow_redirects=False) as client:
response = client.get("/somepath/")
# Should have the redirect-specific no-cache header, not the
# static asset caching header
assert response.status_code == 301
assert response.headers["Cache-Control"] == "no-cache"
assert "immutable" not in response.headers["Cache-Control"]
def test_regular_html_has_no_cache(self, static_app: TestClient) -> None:
"""Test that regular HTML files have no-cache header (not redirect)."""
response = static_app.get("/index.html")
assert response.status_code == 200
assert response.headers["Cache-Control"] == "no-cache"
def test_hashed_js_has_long_cache(self, static_app: TestClient) -> None:
"""Test that hashed JS files have long cache headers (not redirect)."""
response = static_app.get("/app.abc123.js")
assert response.status_code == 200
expected = f"public, immutable, max-age={STATIC_ASSET_CACHE_MAX_AGE_SECONDS}"
assert response.headers["Cache-Control"] == expected
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_static_routes_test.py",
"license": "Apache License 2.0",
"lines": 401,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_routes.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa: RUF029 # Async route handlers are idiomatic even without await
"""Route handlers for the Starlette server."""
from __future__ import annotations
import os
from pathlib import Path
from typing import TYPE_CHECKING, Final
from urllib.parse import quote
from streamlit import config, file_util
from streamlit.logger import get_logger
from streamlit.runtime.media_file_storage import MediaFileKind, MediaFileStorageError
from streamlit.runtime.memory_media_file_storage import get_extension_for_mimetype
from streamlit.runtime.uploaded_file_manager import UploadedFileRec
from streamlit.web.server.app_static_file_handler import (
MAX_APP_STATIC_FILE_SIZE,
SAFE_APP_STATIC_FILE_EXTENSIONS,
)
from streamlit.web.server.component_file_utils import (
build_safe_abspath,
guess_content_type,
)
from streamlit.web.server.routes import (
allow_all_cross_origin_requests,
is_allowed_origin,
)
from streamlit.web.server.server_util import get_url, is_xsrf_enabled
from streamlit.web.server.starlette import starlette_app_utils
from streamlit.web.server.starlette.starlette_app_utils import validate_xsrf_token
from streamlit.web.server.starlette.starlette_server_config import XSRF_COOKIE_NAME
from streamlit.web.server.stats_request_handler import StatsRequestHandler
if TYPE_CHECKING:
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import BaseRoute
from streamlit.components.types.base_component_registry import BaseComponentRegistry
from streamlit.components.v2.component_manager import BidiComponentManager
from streamlit.runtime import Runtime
from streamlit.runtime.memory_media_file_storage import MemoryMediaFileStorage
from streamlit.runtime.memory_uploaded_file_manager import MemoryUploadedFileManager
_LOGGER: Final = get_logger(__name__)
# Route path constants (without base URL prefix)
# These define the canonical paths for all Starlette server endpoints.
# IMPORTANT: Keep these in sync with:
# - frontend/app/vite.config.ts (dev server proxy configuration)
# - frontend/connection/src/DefaultStreamlitEndpoints.ts
BASE_ROUTE_CORE: Final = "_stcore"
BASE_ROUTE_MEDIA: Final = "media"
BASE_ROUTE_UPLOAD_FILE: Final = f"{BASE_ROUTE_CORE}/upload_file"
BASE_ROUTE_COMPONENT: Final = "component"
# Health check routes
_ROUTE_HEALTH: Final = f"{BASE_ROUTE_CORE}/health"
_ROUTE_SCRIPT_HEALTH: Final = f"{BASE_ROUTE_CORE}/script-health-check"
# Metrics routes
_ROUTE_METRICS: Final = f"{BASE_ROUTE_CORE}/metrics"
# Host configuration
_ROUTE_HOST_CONFIG: Final = f"{BASE_ROUTE_CORE}/host-config"
# Media and file routes
_ROUTE_MEDIA: Final = f"{BASE_ROUTE_MEDIA}/{{file_id:path}}"
_ROUTE_UPLOAD_FILE: Final = f"{BASE_ROUTE_UPLOAD_FILE}/{{session_id}}/{{file_id}}"
# Component routes
_ROUTE_COMPONENTS_V1: Final = f"{BASE_ROUTE_COMPONENT}/{{path:path}}"
_ROUTE_COMPONENTS_V2: Final = f"{BASE_ROUTE_CORE}/bidi-components/{{path:path}}"
# App static files
_ROUTE_APP_STATIC: Final = "app/static/{path:path}"
def _with_base(path: str, base_url: str | None = None) -> str:
"""Prepend the base URL path to a route path.
Parameters
----------
path
The route path to prepend the base URL to (e.g., "_stcore/health").
base_url
Optional explicit base URL. If None, uses the configured server.baseUrlPath.
If an empty string, no base URL is prepended.
Returns
-------
str
The full route path with base URL prepended (e.g., "/myapp/_stcore/health").
"""
from streamlit.url_util import make_url_path
base = (
base_url if base_url is not None else config.get_option("server.baseUrlPath")
) or ""
return make_url_path(base, path)
async def _set_cors_headers(request: Request, response: Response) -> None:
"""Set CORS headers on a response based on configuration.
Configures the Access-Control-Allow-Origin header according to the following rules:
- If CORS is disabled or in development mode: allows all origins ("*")
- Otherwise: only allows origins that match the configured allowlist
Parameters
----------
request
The incoming Starlette request (used to read the Origin header).
response
The outgoing Starlette response to set headers on.
"""
if allow_all_cross_origin_requests():
response.headers["Access-Control-Allow-Origin"] = "*"
return
origin = request.headers.get("Origin")
if origin and is_allowed_origin(origin):
response.headers["Access-Control-Allow-Origin"] = origin
def _ensure_xsrf_cookie(request: Request, response: Response) -> None:
"""Ensure that the XSRF cookie is set on the response.
This function manages XSRF (Cross-Site Request Forgery) token generation
and cookie setting to maintain compatibility with Tornado's implementation.
If an existing valid XSRF cookie is present, its token bytes and timestamp
are preserved. Otherwise, a new token is generated.
The cookie is only set if XSRF protection is enabled in the configuration.
The Secure flag is added when SSL is configured.
Note: The XSRF cookie intentionally does NOT have the HttpOnly flag. This
is required for the double-submit cookie pattern: JavaScript reads the
cookie value and includes it in the X-Xsrftoken request header, which the
server then compares against the cookie value to validate requests.
Parameters
----------
request
The incoming Starlette request (used to read existing XSRF cookie).
response
The outgoing Starlette response to set the cookie on.
"""
if not is_xsrf_enabled():
return
# Try to decode existing XSRF cookie to preserve token across requests
raw_cookie = request.cookies.get(XSRF_COOKIE_NAME)
token_bytes: bytes | None = None
timestamp: int | None = None
if raw_cookie:
token_bytes, timestamp = starlette_app_utils.decode_xsrf_token_string(
raw_cookie
)
# Generate token string (reuses existing token bytes/timestamp if available)
cookie_value = starlette_app_utils.generate_xsrf_token_string(
token_bytes, timestamp
)
_set_unquoted_cookie(
response,
XSRF_COOKIE_NAME,
cookie_value,
secure=bool(config.get_option("server.sslCertFile")),
)
def _set_unquoted_cookie(
response: Response,
cookie_name: str,
cookie_value: str,
*,
secure: bool,
) -> None:
"""Set a cookie without URL-encoding or quoting the value.
Starlette's standard set_cookie() method URL-encodes special characters
(like `|`) in cookie values. This function bypasses that encoding to
maintain compatibility with Tornado's cookie format, which is required
for XSRF tokens that use the format "2|mask|token|timestamp".
If a cookie with the same name already exists, it is replaced.
Cookie flags set:
- Path=/: Available to all paths
- SameSite=Lax: Protects against CSRF while allowing top-level navigations
- Secure (conditional): Added when SSL is configured
HttpOnly is intentionally NOT set for XSRF cookies because JavaScript must
read the cookie value to include it in request headers (double-submit pattern).
This matches Tornado's behavior.
Parameters
----------
response
The Starlette response to set the cookie on.
cookie_name
The name of the cookie.
cookie_value
The raw cookie value (will not be URL-encoded or quoted).
secure
Whether to add the Secure flag (should be True when using HTTPS).
"""
# Build the Set-Cookie header value manually to avoid encoding
header_value = "; ".join(
[
f"{cookie_name}={cookie_value}",
"Path=/",
"SameSite=Lax",
*(["Secure"] if secure else []),
]
)
# Remove any existing cookie with the same name before adding the new one
key_prefix = f"{cookie_name}=".encode("latin-1")
filtered_headers: list[tuple[bytes, bytes]] = [
(name, value)
for name, value in response.raw_headers
if not (
name.lower() == b"set-cookie"
and value.lower().startswith(key_prefix.lower())
)
]
filtered_headers.append((b"set-cookie", header_value.encode("latin-1")))
response.raw_headers = filtered_headers
def create_health_routes(runtime: Runtime, base_url: str | None) -> list[BaseRoute]:
"""Create health check route handlers for /_stcore/health.
The health endpoint returns 200 OK when the runtime is ready to accept
browser connections, or 503 Service Unavailable otherwise. This is used
by load balancers and orchestration systems to determine service readiness.
Parameters
----------
runtime
The Streamlit runtime instance to check health status.
base_url
Optional base URL path prefix for the routes.
Returns
-------
list[BaseRoute]
List of Starlette Route objects for GET, HEAD, and OPTIONS methods.
"""
from starlette.responses import PlainTextResponse, Response
from starlette.routing import Route
async def _health_endpoint(request: Request) -> PlainTextResponse:
ok, message = await runtime.is_ready_for_browser_connection
status = 200 if ok else 503
response = PlainTextResponse(message, status_code=status)
response.headers["Cache-Control"] = "no-cache"
await _set_cors_headers(request, response)
_ensure_xsrf_cookie(request, response)
return response
async def _health_options(request: Request) -> Response:
response = Response(status_code=204)
response.headers["Cache-Control"] = "no-cache"
await _set_cors_headers(request, response)
return response
return [
Route(
_with_base(_ROUTE_HEALTH, base_url),
_health_endpoint,
methods=["GET", "HEAD"],
),
Route(
_with_base(_ROUTE_HEALTH, base_url),
_health_options,
methods=["OPTIONS"],
),
]
def create_script_health_routes(
runtime: Runtime, base_url: str | None
) -> list[BaseRoute]:
"""Create script health check route handlers."""
from starlette.responses import PlainTextResponse, Response
from starlette.routing import Route
async def _script_health_endpoint(request: Request) -> PlainTextResponse:
ok, message = await runtime.does_script_run_without_error()
status = 200 if ok else 503
response = PlainTextResponse(message, status_code=status)
response.headers["Cache-Control"] = "no-cache"
await _set_cors_headers(request, response)
_ensure_xsrf_cookie(request, response)
return response
async def _script_health_options(request: Request) -> Response:
response = Response(status_code=204)
response.headers["Cache-Control"] = "no-cache"
await _set_cors_headers(request, response)
return response
return [
Route(
_with_base(_ROUTE_SCRIPT_HEALTH, base_url),
_script_health_endpoint,
methods=["GET", "HEAD"],
),
Route(
_with_base(_ROUTE_SCRIPT_HEALTH, base_url),
_script_health_options,
methods=["OPTIONS"],
),
]
def create_metrics_routes(runtime: Runtime, base_url: str | None) -> list[BaseRoute]:
"""Create metrics route handlers."""
from starlette.responses import PlainTextResponse, Response
from starlette.routing import Route
async def _metrics_endpoint(request: Request) -> Response:
requested_families = request.query_params.getlist("families")
stats = runtime.stats_mgr.get_stats(family_names=requested_families or None)
accept = request.headers.get("Accept", "")
if "application/x-protobuf" in accept:
payload = StatsRequestHandler._stats_to_proto(stats).SerializeToString()
response = Response(payload, media_type="application/x-protobuf")
else:
text = StatsRequestHandler._stats_to_text(stats)
response = PlainTextResponse(
text, media_type="application/openmetrics-text"
)
await _set_cors_headers(request, response)
return response
async def _metrics_options(request: Request) -> Response:
response = Response(status_code=204)
response.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Accept"
await _set_cors_headers(request, response)
return response
return [
Route(
_with_base(_ROUTE_METRICS, base_url),
_metrics_endpoint,
methods=["GET"],
),
Route(
_with_base(_ROUTE_METRICS, base_url),
_metrics_options,
methods=["OPTIONS"],
),
]
def create_host_config_routes(base_url: str | None) -> list[BaseRoute]:
"""Create host config route handlers."""
from starlette.responses import JSONResponse
from starlette.routing import Route
async def _host_config_endpoint(request: Request) -> JSONResponse:
allowed: list[str] = list(config.get_option("client.allowedOrigins"))
if (
config.get_option("global.developmentMode")
and "http://localhost" not in allowed
):
allowed.append("http://localhost")
response = JSONResponse(
{
"allowedOrigins": allowed,
"useExternalAuthToken": False,
"enableCustomParentMessages": False,
"enforceDownloadInNewTab": False,
"metricsUrl": "",
"blockErrorDialogs": False,
"resourceCrossOriginMode": None,
}
)
await _set_cors_headers(request, response)
response.headers["Cache-Control"] = "no-cache"
return response
return [
Route(
_with_base(_ROUTE_HOST_CONFIG, base_url),
_host_config_endpoint,
methods=["GET"],
),
]
def create_media_routes(
media_storage: MemoryMediaFileStorage, base_url: str | None
) -> list[BaseRoute]:
"""Create media file route handlers for /media/{file_id}.
Serves media files (images, audio, video) stored by st.image, st.audio,
st.video, and st.download_button. Supports HTTP range requests for
streaming media playback.
Parameters
----------
media_storage
The media file storage backend.
base_url
Optional base URL path prefix for the routes.
Returns
-------
list[BaseRoute]
List of Starlette Route objects for GET, HEAD, and OPTIONS methods.
"""
from starlette.exceptions import HTTPException
from starlette.responses import Response
from starlette.routing import Route
async def _media_endpoint(request: Request) -> Response:
file_id = request.path_params["file_id"]
try:
media_file = media_storage.get_file(file_id)
except MediaFileStorageError as exc:
raise HTTPException(status_code=404, detail="File not found") from exc
headers: dict[str, str] = {}
if media_file.kind == MediaFileKind.DOWNLOADABLE:
filename = media_file.filename
if not filename:
filename = (
f"streamlit_download"
f"{get_extension_for_mimetype(media_file.mimetype)}"
)
try:
filename.encode("latin1")
disposition = f'filename="{filename}"'
except UnicodeEncodeError:
disposition = f"filename*=utf-8''{quote(filename)}"
headers["Content-Disposition"] = f"attachment; {disposition}"
# Ensure support for range requests (e.g. for video files)
headers["Accept-Ranges"] = "bytes"
content = media_file.content
content_length = len(content)
status_code = 200
range_header = request.headers.get("range")
if range_header:
try:
range_start, range_end = starlette_app_utils.parse_range_header(
range_header, content_length
)
except ValueError:
raise HTTPException(
status_code=416,
detail="Invalid range",
headers={"Content-Range": f"bytes */{content_length}"},
)
status_code = 206
content = content[range_start : range_end + 1]
headers["Content-Range"] = (
f"bytes {range_start}-{range_end}/{content_length}"
)
headers["Content-Length"] = str(len(content))
else:
headers["Content-Length"] = str(content_length)
response = Response(
content,
status_code=status_code,
media_type=media_file.mimetype or "text/plain",
headers=headers,
)
await _set_cors_headers(request, response)
return response
async def _media_options(request: Request) -> Response:
response = Response(status_code=204)
await _set_cors_headers(request, response)
return response
return [
Route(
_with_base(_ROUTE_MEDIA, base_url),
_media_endpoint,
# HEAD is needed for browsers (especially WebKit) to probe media files
methods=["GET", "HEAD"],
),
Route(
_with_base(_ROUTE_MEDIA, base_url),
_media_options,
methods=["OPTIONS"],
),
]
def create_upload_routes(
runtime: Runtime,
upload_mgr: MemoryUploadedFileManager,
base_url: str | None,
) -> list[BaseRoute]:
"""Create file upload route handlers for /_stcore/upload_file/{session_id}/{file_id}.
Handles file uploads from st.file_uploader widgets. Supports PUT for uploading
files and DELETE for removing them. XSRF protection is enforced when enabled.
Parameters
----------
runtime
The Streamlit runtime instance (used to validate session IDs).
upload_mgr
The uploaded file manager to store/retrieve files.
base_url
Optional base URL path prefix for the routes.
Returns
-------
list[BaseRoute]
List of Starlette Route objects for PUT, DELETE, and OPTIONS methods.
"""
from starlette.datastructures import UploadFile
from starlette.exceptions import HTTPException
from starlette.responses import Response
from starlette.routing import Route
def _check_xsrf(request: Request) -> None:
"""Validate XSRF token for non-safe HTTP methods.
Raises HTTPException with 403 if XSRF is enabled and validation fails.
This mirrors Tornado's automatic XSRF protection for non-GET requests.
"""
if not is_xsrf_enabled():
return
xsrf_header = request.headers.get("X-Xsrftoken")
xsrf_cookie = request.cookies.get(XSRF_COOKIE_NAME)
if not validate_xsrf_token(xsrf_header, xsrf_cookie):
raise HTTPException(status_code=403, detail="XSRF token missing or invalid")
async def _set_upload_headers(request: Request, response: Response) -> None:
response.headers["Access-Control-Allow-Methods"] = "PUT, OPTIONS, DELETE"
response.headers["Access-Control-Allow-Headers"] = "Content-Type"
if is_xsrf_enabled():
response.headers["Access-Control-Allow-Origin"] = get_url(
config.get_option("browser.serverAddress")
)
response.headers["Access-Control-Allow-Headers"] = (
"X-Xsrftoken, Content-Type"
)
response.headers["Vary"] = "Origin"
response.headers["Access-Control-Allow-Credentials"] = "true"
else:
await _set_cors_headers(request, response)
async def _upload_options(request: Request) -> Response:
response = Response(status_code=204)
await _set_upload_headers(request, response)
return response
async def _upload_put(request: Request) -> Response:
"""Upload a file to the server."""
_check_xsrf(request)
session_id = request.path_params["session_id"]
file_id = request.path_params["file_id"]
if not runtime.is_active_session(session_id):
raise HTTPException(status_code=400, detail="Invalid session_id")
max_size_bytes = ( # maxUploadSize is in megabytes
config.get_option("server.maxUploadSize") * 1024 * 1024
)
# 1. Fast fail via header (if present) - check before reading the body
content_length = request.headers.get("content-length")
if content_length:
try:
if int(content_length) > max_size_bytes:
raise HTTPException(status_code=413, detail="File too large")
except ValueError:
raise HTTPException(
status_code=400, detail="Invalid Content-Length header"
)
form = await request.form()
uploads = [value for value in form.values() if isinstance(value, UploadFile)]
if len(uploads) != 1:
raise HTTPException(
status_code=400, detail=f"Expected 1 file, but got {len(uploads)}"
)
upload = uploads[0]
# 2. Check actual file size (Content-Length may be absent or inaccurate)
# TODO(lukasmasuch): Improve by using a streaming approach that rejects uploads as soon as
# they exceed max_size_bytes, rather than waiting for the full upload to complete.
try:
data = await upload.read()
finally:
upload.file.close()
if len(data) > max_size_bytes:
raise HTTPException(status_code=413, detail="File too large")
upload_mgr.add_file(
session_id=session_id,
file=UploadedFileRec(
file_id=file_id,
name=upload.filename or "",
type=upload.content_type or "application/octet-stream",
data=data,
),
)
response = Response(status_code=204)
await _set_upload_headers(request, response)
return response
async def _upload_delete(request: Request) -> Response:
"""Delete a file from the server."""
_check_xsrf(request)
session_id = request.path_params["session_id"]
file_id = request.path_params["file_id"]
upload_mgr.remove_file(session_id=session_id, file_id=file_id)
response = Response(status_code=204)
await _set_upload_headers(request, response)
return response
return [
Route(
_with_base(_ROUTE_UPLOAD_FILE, base_url),
_upload_put,
methods=["PUT"],
),
Route(
_with_base(_ROUTE_UPLOAD_FILE, base_url),
_upload_delete,
methods=["DELETE"],
),
Route(
_with_base(_ROUTE_UPLOAD_FILE, base_url),
_upload_options,
methods=["OPTIONS"],
),
]
def create_component_routes(
component_registry: BaseComponentRegistry, base_url: str | None
) -> list[BaseRoute]:
"""Create custom component route handlers."""
import anyio
from starlette.exceptions import HTTPException
from starlette.responses import Response
from starlette.routing import Route
async def _component_endpoint(request: Request) -> Response:
path = request.path_params["path"]
parts = path.split("/", maxsplit=1)
if len(parts) == 0 or not parts[0]:
raise HTTPException(status_code=404, detail="Component not found")
component_name = parts[0]
filename = parts[1] if len(parts) == 2 else ""
component_root = component_registry.get_component_path(component_name)
if component_root is None:
raise HTTPException(status_code=404, detail="Component not found")
# Use build_safe_abspath to properly resolve symlinks and prevent traversal
abspath = build_safe_abspath(component_root, filename)
if abspath is None:
# Return 400 for malicious paths (consistent with middleware behavior)
raise HTTPException(status_code=400, detail="Bad Request")
try:
async with await anyio.open_file(abspath, "rb") as file:
data = await file.read()
except OSError as exc:
raise HTTPException(status_code=404, detail="read error") from exc
response = Response(content=data, media_type=guess_content_type(abspath))
await _set_cors_headers(request, response)
if not filename or filename.endswith(".html"):
response.headers["Cache-Control"] = "no-cache"
else:
response.headers["Cache-Control"] = "public"
return response
async def _component_options(request: Request) -> Response:
response = Response(status_code=204)
await _set_cors_headers(request, response)
return response
return [
Route(
_with_base(_ROUTE_COMPONENTS_V1, base_url),
_component_endpoint,
methods=["GET"],
),
Route(
_with_base(_ROUTE_COMPONENTS_V1, base_url),
_component_options,
methods=["OPTIONS"],
),
]
def create_bidi_component_routes(
bidi_component_manager: BidiComponentManager, base_url: str | None
) -> list[BaseRoute]:
"""Create bidirectional component route handlers."""
import anyio
from anyio import Path as AsyncPath
from starlette.responses import PlainTextResponse, Response
from starlette.routing import Route
async def _bidi_component_endpoint(request: Request) -> Response:
async def _text_response(body: str, status_code: int) -> PlainTextResponse:
response = PlainTextResponse(body, status_code=status_code)
await _set_cors_headers(request, response)
return response
path = request.path_params["path"]
parts = path.split("/")
component_name = parts[0] if parts else ""
if not component_name:
return await _text_response("not found", 404)
if bidi_component_manager.get(component_name) is None:
return await _text_response("not found", 404)
component_root = bidi_component_manager.get_component_path(component_name)
if component_root is None:
return await _text_response("not found", 404)
filename = "/".join(parts[1:])
if not filename or filename.endswith("/"):
return await _text_response("not found", 404)
abspath = build_safe_abspath(component_root, filename)
if abspath is None:
# Return 400 for unsafe paths (matches Tornado behavior for opacity)
return await _text_response("Bad Request", 400)
if await AsyncPath(abspath).is_dir():
return await _text_response("not found", 404)
try:
async with await anyio.open_file(abspath, "rb") as file:
data = await file.read()
except OSError:
sanitized_abspath = abspath.replace("\n", "").replace("\r", "")
_LOGGER.exception(
"Error reading bidi component asset: %s", sanitized_abspath
)
return await _text_response("read error", 404)
response = Response(content=data, media_type=guess_content_type(abspath))
await _set_cors_headers(request, response)
if filename.endswith(".html"):
response.headers["Cache-Control"] = "no-cache"
else:
response.headers["Cache-Control"] = "public"
return response
async def _bidi_component_options(request: Request) -> Response:
response = Response(status_code=204)
await _set_cors_headers(request, response)
return response
return [
Route(
_with_base(_ROUTE_COMPONENTS_V2, base_url),
_bidi_component_endpoint,
methods=["GET"],
),
Route(
_with_base(_ROUTE_COMPONENTS_V2, base_url),
_bidi_component_options,
methods=["OPTIONS"],
),
]
def create_app_static_serving_routes(
main_script_path: str | None, base_url: str | None
) -> list[BaseRoute]:
"""Create app static serving file route handlers."""
from anyio import Path as AsyncPath
from starlette.exceptions import HTTPException
from starlette.responses import FileResponse, Response
from starlette.routing import Route
app_static_root = (
os.path.realpath(file_util.get_app_static_dir(main_script_path))
if main_script_path
else None
)
async def _app_static_endpoint(request: Request) -> Response:
if not app_static_root:
raise HTTPException(status_code=404, detail="File not found")
relative_path = request.path_params.get("path", "")
safe_path = build_safe_abspath(app_static_root, relative_path)
if safe_path is None:
# Return 400 for malicious paths (consistent with middleware behavior)
raise HTTPException(status_code=400, detail="Bad Request")
async_path = AsyncPath(safe_path)
if not await async_path.exists() or await async_path.is_dir():
raise HTTPException(status_code=404, detail="File not found")
file_stat = await async_path.stat()
if file_stat.st_size > MAX_APP_STATIC_FILE_SIZE:
raise HTTPException(
status_code=404,
detail="File is too large",
)
ext = Path(safe_path).suffix.lower()
media_type = None
if ext not in SAFE_APP_STATIC_FILE_EXTENSIONS:
media_type = "text/plain"
response = FileResponse(safe_path, media_type=media_type)
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["X-Content-Type-Options"] = "nosniff"
return response
async def _app_static_options(_request: Request) -> Response:
response = Response(status_code=204)
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Content-Type"
return response
return [
Route(
_with_base(_ROUTE_APP_STATIC, base_url),
_app_static_endpoint,
methods=["GET"],
),
Route(
_with_base(_ROUTE_APP_STATIC, base_url),
_app_static_options,
methods=["OPTIONS"],
),
]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_routes.py",
"license": "Apache License 2.0",
"lines": 736,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_routes_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for starlette_routes module."""
from __future__ import annotations
import asyncio
from unittest.mock import MagicMock, patch
from starlette.responses import Response
from streamlit.web.server.starlette.starlette_routes import (
_ensure_xsrf_cookie,
_set_cors_headers,
_set_unquoted_cookie,
_with_base,
)
from streamlit.web.server.starlette.starlette_server_config import XSRF_COOKIE_NAME
from tests.testutil import patch_config_options
class TestWithBase:
"""Tests for _with_base function."""
@patch_config_options({"server.baseUrlPath": ""})
def test_no_base_url(self) -> None:
"""Test path with no base URL configured."""
result = _with_base("_stcore/health")
assert result == "/_stcore/health"
@patch_config_options({"server.baseUrlPath": ""})
def test_no_base_url_with_leading_slash(self) -> None:
"""Test path with leading slash and no base URL."""
result = _with_base("/_stcore/health")
assert result == "/_stcore/health"
@patch_config_options({"server.baseUrlPath": "myapp"})
def test_with_base_url(self) -> None:
"""Test path with base URL configured."""
result = _with_base("_stcore/health")
assert result == "/myapp/_stcore/health"
@patch_config_options({"server.baseUrlPath": "/myapp/"})
def test_strips_slashes_from_base(self) -> None:
"""Test that slashes are stripped from base URL."""
result = _with_base("_stcore/health")
assert result == "/myapp/_stcore/health"
@patch_config_options({"server.baseUrlPath": "shouldbeignored"})
def test_explicit_base_url_overrides_config(self) -> None:
"""Test that explicit base_url parameter overrides config."""
result = _with_base("_stcore/health", base_url="custom")
assert result == "/custom/_stcore/health"
@patch_config_options({"server.baseUrlPath": "shouldbeignored"})
def test_explicit_empty_base_url(self) -> None:
"""Test that explicit empty base_url works."""
result = _with_base("_stcore/health", base_url="")
assert result == "/_stcore/health"
@patch_config_options({"server.baseUrlPath": "fromconfig"})
def test_explicit_none_base_url_uses_config(self) -> None:
"""Test that explicit None uses config."""
result = _with_base("_stcore/health", base_url=None)
assert result == "/fromconfig/_stcore/health"
class TestSetCorsHeaders:
"""Tests for _set_cors_headers function."""
@patch_config_options({"server.enableCORS": False})
def test_allows_all_when_cors_disabled(self) -> None:
"""Test that all origins are allowed when CORS is disabled."""
request = MagicMock()
response = MagicMock()
response.headers = {}
asyncio.run(_set_cors_headers(request, response))
assert response.headers["Access-Control-Allow-Origin"] == "*"
@patch_config_options({"global.developmentMode": True, "server.enableCORS": True})
def test_allows_all_in_dev_mode(self) -> None:
"""Test that all origins are allowed in development mode."""
request = MagicMock()
response = MagicMock()
response.headers = {}
asyncio.run(_set_cors_headers(request, response))
assert response.headers["Access-Control-Allow-Origin"] == "*"
@patch_config_options(
{
"server.enableCORS": True,
"global.developmentMode": False,
}
)
def test_no_header_when_origin_not_allowed(self) -> None:
"""Test that no header is set when origin is not in allowed list."""
request = MagicMock()
request.headers = MagicMock()
# This origin won't be in any allowed list by default
request.headers.get.return_value = "http://random-untrusted-origin.com"
response = MagicMock()
response.headers = {}
asyncio.run(_set_cors_headers(request, response))
assert "Access-Control-Allow-Origin" not in response.headers
@patch_config_options(
{
"server.enableCORS": True,
"global.developmentMode": False,
}
)
def test_no_header_when_no_origin(self) -> None:
"""Test that no header is set when request has no Origin header."""
request = MagicMock()
request.headers = MagicMock()
request.headers.get.return_value = None
response = MagicMock()
response.headers = {}
asyncio.run(_set_cors_headers(request, response))
assert "Access-Control-Allow-Origin" not in response.headers
@patch_config_options(
{
"server.enableCORS": True,
"global.developmentMode": False,
"server.corsAllowedOrigins": ["http://allowed.example.com"],
}
)
def test_allows_configured_origin(self) -> None:
"""Test that configured allowed origins are permitted."""
request = MagicMock()
request.headers = MagicMock()
request.headers.get.return_value = "http://allowed.example.com"
response = MagicMock()
response.headers = {}
asyncio.run(_set_cors_headers(request, response))
assert (
response.headers["Access-Control-Allow-Origin"]
== "http://allowed.example.com"
)
class TestEnsureXsrfCookie:
"""Tests for _ensure_xsrf_cookie function."""
@patch_config_options({"server.enableXsrfProtection": False})
def test_no_cookie_when_xsrf_disabled(self) -> None:
"""Test that no cookie is set when XSRF protection is disabled."""
request = MagicMock()
request.cookies = {}
response = Response()
_ensure_xsrf_cookie(request, response)
cookie_headers = [
value
for name, value in response.raw_headers
if name.lower() == b"set-cookie"
]
assert len(cookie_headers) == 0
@patch_config_options(
{"server.enableXsrfProtection": True, "server.sslCertFile": None}
)
def test_generates_new_token_when_no_cookie(self) -> None:
"""Test that a new XSRF token is generated when no cookie exists."""
request = MagicMock()
request.cookies = {}
response = Response()
_ensure_xsrf_cookie(request, response)
cookie_headers = [
value.decode("latin-1")
for name, value in response.raw_headers
if name.lower() == b"set-cookie"
]
assert len(cookie_headers) == 1
assert cookie_headers[0].startswith(f"{XSRF_COOKIE_NAME}=2|")
assert "Secure" not in cookie_headers[0]
@patch_config_options(
{"server.enableXsrfProtection": True, "server.sslCertFile": "/path/to/cert"}
)
def test_sets_secure_flag_with_ssl(self) -> None:
"""Test that Secure flag is added when SSL is configured."""
request = MagicMock()
request.cookies = {}
response = Response()
_ensure_xsrf_cookie(request, response)
cookie_headers = [
value.decode("latin-1")
for name, value in response.raw_headers
if name.lower() == b"set-cookie"
]
assert len(cookie_headers) == 1
assert "Secure" in cookie_headers[0]
@patch_config_options(
{"server.enableXsrfProtection": True, "server.sslCertFile": None}
)
@patch(
"streamlit.web.server.starlette.starlette_routes.starlette_app_utils.decode_xsrf_token_string"
)
@patch(
"streamlit.web.server.starlette.starlette_routes.starlette_app_utils.generate_xsrf_token_string"
)
def test_preserves_existing_token(
self, mock_generate: MagicMock, mock_decode: MagicMock
) -> None:
"""Test that existing token bytes and timestamp are preserved."""
existing_token = b"existing_token_bytes"
existing_timestamp = 1234567890
mock_decode.return_value = (existing_token, existing_timestamp)
mock_generate.return_value = "2|mocked|token|1234567890"
request = MagicMock()
request.cookies = {XSRF_COOKIE_NAME: "existing_cookie_value"}
response = Response()
_ensure_xsrf_cookie(request, response)
mock_decode.assert_called_once_with("existing_cookie_value")
mock_generate.assert_called_once_with(existing_token, existing_timestamp)
class TestSetUnquotedCookie:
"""Tests for _set_unquoted_cookie function."""
def test_sets_cookie_without_quoting(self) -> None:
"""Test that cookie value is set without URL encoding or quoting."""
response = Response()
cookie_value = "2|abcd1234|efgh5678|1234567890"
_set_unquoted_cookie(response, "test_cookie", cookie_value, secure=False)
cookie_headers = [
value.decode("latin-1")
for name, value in response.raw_headers
if name.lower() == b"set-cookie"
]
assert len(cookie_headers) == 1
assert cookie_headers[0].startswith(f"test_cookie={cookie_value};")
assert "Path=/" in cookie_headers[0]
assert "SameSite=Lax" in cookie_headers[0]
assert "Secure" not in cookie_headers[0]
def test_sets_secure_flag_when_requested(self) -> None:
"""Test that Secure flag is added when secure=True."""
response = Response()
_set_unquoted_cookie(response, "test_cookie", "value", secure=True)
cookie_headers = [
value.decode("latin-1")
for name, value in response.raw_headers
if name.lower() == b"set-cookie"
]
assert len(cookie_headers) == 1
assert "Secure" in cookie_headers[0]
def test_replaces_existing_cookie_with_same_name(self) -> None:
"""Test that setting a cookie replaces any existing cookie with the same name."""
response = Response()
response.set_cookie("test_cookie", "old_value")
_set_unquoted_cookie(response, "test_cookie", "new_value", secure=False)
cookie_headers = [
value.decode("latin-1")
for name, value in response.raw_headers
if name.lower() == b"set-cookie"
]
assert len(cookie_headers) == 1
assert "new_value" in cookie_headers[0]
assert "old_value" not in cookie_headers[0]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_routes_test.py",
"license": "Apache License 2.0",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_app_utils.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for the Starlette server implementation."""
from __future__ import annotations
import binascii
import os
import time
def parse_range_header(range_header: str, total_size: int) -> tuple[int, int]:
"""Parse the Range header and return the start and end byte positions.
This is used for serving media files with range requests.
Parameters
----------
range_header : str
The value of the Range header (e.g. "bytes=0-1023").
total_size : int
The total size of the resource in bytes.
Returns
-------
tuple[int, int]
A tuple containing (start, end) byte positions.
"""
if total_size <= 0:
raise ValueError("empty content")
units, sep, range_spec = range_header.partition("=")
if units.strip().lower() != "bytes" or sep == "" or "," in range_spec:
raise ValueError("invalid range")
range_spec = range_spec.strip()
if range_spec.startswith("-"):
try:
suffix = int(range_spec[1:])
except ValueError:
raise ValueError("invalid suffix range") from None
if suffix <= 0:
raise ValueError("invalid suffix range")
if suffix >= total_size:
return 0, total_size - 1
return total_size - suffix, total_size - 1
start_str, sep, end_str = range_spec.partition("-")
if not start_str:
raise ValueError("missing range start")
start = int(start_str)
if start < 0 or start >= total_size:
raise ValueError("start out of range")
if sep == "" or not end_str:
end = total_size - 1
else:
end = int(end_str)
if end < start:
raise ValueError("end before start")
end = min(end, total_size - 1)
return start, end
def websocket_mask(mask: bytes, data: bytes) -> bytes:
"""Mask or unmask data for WebSocket transmission per RFC 6455.
Each byte of data is XORed with mask[i % 4]. This operation is
bidirectional - applying it twice with the same mask returns the
original data.
Parameters
----------
mask : bytes
A 4-byte masking key.
data : bytes
The data to mask or unmask.
Returns
-------
bytes
The masked/unmasked data.
"""
if len(mask) != 4:
raise ValueError("mask must be 4 bytes")
result = bytearray(len(data))
for i, byte in enumerate(data):
result[i] = byte ^ mask[i % 4]
return bytes(result)
def create_signed_value(
secret: str,
name: str,
value: str | bytes,
) -> bytes:
"""Create a signed cookie value using itsdangerous.
Note: This uses itsdangerous for signing, which is NOT compatible with
Tornado's secure cookie format. Cookies signed by this function cannot
be read by Tornado's get_signed_cookie/get_secure_cookie, and vice versa.
Switching between Tornado and Starlette backends will invalidate existing
auth cookies (_streamlit_user), requiring users to re-authenticate.
This is expected behavior when switching between Tornado and Starlette backends.
Parameters
----------
secret
The secret key used for signing.
name
The cookie name (used as salt for additional security).
value
The value to sign.
Returns
-------
bytes
The signed value as bytes.
"""
from itsdangerous import URLSafeTimedSerializer
serializer = URLSafeTimedSerializer(secret, salt=name)
if isinstance(value, bytes):
value = value.decode("utf-8")
return serializer.dumps(value).encode("utf-8")
def decode_signed_value(
secret: str,
name: str,
value: str | bytes,
max_age_days: float = 31,
) -> bytes | None:
"""Decode a signed cookie value using itsdangerous.
Parameters
----------
secret
The secret key used for signing.
name
The cookie name (used as salt for additional security).
value
The signed value to decode.
max_age_days
Maximum age of the cookie in days (default: 31).
Returns
-------
bytes | None
The decoded value as bytes, or None if invalid/expired.
"""
from itsdangerous import BadSignature, SignatureExpired, URLSafeTimedSerializer
if not value:
return None
try:
if isinstance(value, bytes):
value = value.decode("utf-8")
serializer = URLSafeTimedSerializer(secret, salt=name)
decoded = serializer.loads(value, max_age=int(max_age_days * 86400))
if isinstance(decoded, str):
return decoded.encode("utf-8")
if isinstance(decoded, bytes):
return decoded
# Unexpected type from deserializer — treat as invalid
return None
except (BadSignature, SignatureExpired, UnicodeDecodeError):
return None
def generate_xsrf_token_string(
token_bytes: bytes | None = None, timestamp: int | None = None
) -> str:
"""Generate a version 2 XSRF token string compatible with Tornado.
Format: 2|mask|masked_token|timestamp
Parameters
----------
token_bytes
The raw token bytes to encode. If None, generates 16 random bytes.
timestamp
The Unix timestamp to include in the token. If None, uses current time.
Returns
-------
str
The encoded XSRF token string in version 2 format.
"""
if token_bytes is None:
token_bytes = os.urandom(16)
if timestamp is None:
timestamp = int(time.time())
mask = os.urandom(4)
masked_token = websocket_mask(mask, token_bytes)
return "2|{}|{}|{}".format(
binascii.b2a_hex(mask).decode("ascii"),
binascii.b2a_hex(masked_token).decode("ascii"),
timestamp,
)
def decode_xsrf_token_string(
cookie_value: str,
) -> tuple[bytes | None, int | None]:
"""Decode a Tornado XSRF token string.
Supports version 2 (masked) and version 1 (unmasked) tokens.
Parameters
----------
cookie_value
The XSRF token cookie value to decode.
Returns
-------
tuple[bytes | None, int | None]
A tuple of (token_bytes, timestamp). Both values are None if decoding fails.
"""
if not cookie_value:
return None, None
value = cookie_value.strip("\"'")
if not value:
return None, None
try:
# V2 tokens:
if value.startswith("2|"):
_, mask_hex, masked_hex, timestamp_str = value.split("|")
mask = binascii.a2b_hex(mask_hex.encode("ascii"))
masked = binascii.a2b_hex(masked_hex.encode("ascii"))
token = websocket_mask(mask, masked)
return token, int(timestamp_str)
# V1 tokens:
# TODO(lukasmasuch): This is likely unused in Streamlit since only V2 tokens
# are used. We might be able to just remove this part.
token = binascii.a2b_hex(value.encode("ascii"))
if not token:
return None, None
# V1 tokens don't have an embedded timestamp, so we use current time
# as a placeholder (matches Tornado's behavior). This timestamp is
# informational only and not used for token validation.
return token, int(time.time())
except (binascii.Error, ValueError):
return None, None
def generate_random_hex_string(num_bytes: int = 32) -> str:
"""Generate a cryptographically secure random hex string.
Parameters
----------
num_bytes
Number of random bytes to generate (default: 32).
The resulting hex string will be twice this length.
Returns
-------
str
A hex-encoded random string.
"""
return binascii.b2a_hex(os.urandom(num_bytes)).decode("ascii")
def validate_xsrf_token(supplied_token: str | None, xsrf_cookie: str | None) -> bool:
"""Validate the XSRF token from the WebSocket subprotocol against the cookie.
This mirrors Tornado's XSRF validation logic to ensure the frontend can share
XSRF logic between WebSocket handshake and HTTP uploads regardless of backend.
"""
if not supplied_token or not xsrf_cookie:
return False
# Decode the supplied token from the subprotocol
supplied_token_bytes, _ = decode_xsrf_token_string(supplied_token)
# Decode the expected token from the cookie
expected_token_bytes, _ = decode_xsrf_token_string(xsrf_cookie)
if not supplied_token_bytes or not expected_token_bytes:
return False
import hmac
return hmac.compare_digest(supplied_token_bytes, expected_token_bytes)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_app_utils.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_gzip_middleware.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom GZip middleware that excludes audio/video content from compression."""
from __future__ import annotations
from typing import TYPE_CHECKING, Final
from starlette.datastructures import Headers
from starlette.middleware.gzip import (
DEFAULT_EXCLUDED_CONTENT_TYPES,
GZipMiddleware,
GZipResponder,
IdentityResponder,
)
if TYPE_CHECKING:
from starlette.types import ASGIApp, Message, Receive, Scope, Send
# Extended exclusion list: Starlette's default + audio/video prefixes.
# Compressing binary media content breaks playback in browsers,
# especially with range requests.
_EXCLUDED_CONTENT_TYPES: Final = (
*DEFAULT_EXCLUDED_CONTENT_TYPES,
"audio/",
"video/",
)
def _handle_response_start(
responder: IdentityResponder | GZipResponder, message: Message
) -> None:
"""Handle http.response.start message for media-aware responders.
This function extracts headers from the response start message and determines
whether the content should be excluded from compression based on its type.
Parameters
----------
responder
The responder instance (either IdentityResponder or GZipResponder)
to update with response metadata.
message
The ASGI "http.response.start" message containing response headers.
"""
responder.initial_message = message
headers = Headers(raw=responder.initial_message["headers"])
responder.content_encoding_set = "content-encoding" in headers
responder.content_type_is_excluded = headers.get("content-type", "").startswith(
_EXCLUDED_CONTENT_TYPES
)
class _MediaAwareIdentityResponder(IdentityResponder):
"""IdentityResponder that excludes audio/video from compression.
This responder extends Starlette's IdentityResponder to use our extended
list of excluded content types that includes audio/ and video/ prefixes.
Used when the client does not support gzip compression.
"""
async def send_with_compression(self, message: Message) -> None:
"""Process response messages, checking content type for exclusion."""
if message["type"] == "http.response.start":
_handle_response_start(self, message)
else:
await super().send_with_compression(message)
class _MediaAwareGZipResponder(GZipResponder):
"""GZipResponder that excludes audio/video from compression.
This responder extends Starlette's GZipResponder to use our extended
list of excluded content types that includes audio/ and video/ prefixes.
Used when the client supports gzip compression.
"""
async def send_with_compression(self, message: Message) -> None:
"""Process response messages, checking content type for exclusion."""
if message["type"] == "http.response.start":
_handle_response_start(self, message)
else:
await super().send_with_compression(message)
class MediaAwareGZipMiddleware(GZipMiddleware):
"""GZip middleware that excludes audio/video content from compression.
Extends Starlette's GZipMiddleware to also exclude audio/ and video/
content types. Avoiding compression for media content provides better
browser compatibility (some browsers like WebKit have issues with
explicit identity encoding on media).
"""
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] != "http":
await self.app(scope, receive, send)
return
headers = Headers(scope=scope)
responder: ASGIApp
if "gzip" in headers.get("Accept-Encoding", ""):
responder = _MediaAwareGZipResponder(
self.app, self.minimum_size, compresslevel=self.compresslevel
)
else:
responder = _MediaAwareIdentityResponder(self.app, self.minimum_size)
await responder(scope, receive, send)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_gzip_middleware.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_server_config.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for the Starlette server."""
from __future__ import annotations
from typing import Final
# Cookie name for storing signed user identity information.
USER_COOKIE_NAME: Final = "_streamlit_user"
# Cookie name for storing signed OAuth tokens (access_token, id_token).
TOKENS_COOKIE_NAME: Final = "_streamlit_user_tokens"
# Cookie name for Cross-Site Request Forgery (XSRF) token validation.
XSRF_COOKIE_NAME: Final = "_streamlit_xsrf"
# Cookie name for server-side session management.
SESSION_COOKIE_NAME: Final = "_streamlit_session"
# Max pending messages per client in the send queue before disconnecting.
# Each connected client has its own queue; under normal conditions the queue drains
# continuously and rarely exceeds single digits. This limit protects against slow
# clients (bad network, paused tabs) causing unbounded server memory growth.
# With N concurrent users, worst case memory is N * WEBSOCKET_MAX_SEND_QUEUE_SIZE * msg_size.
WEBSOCKET_MAX_SEND_QUEUE_SIZE: Final = 500
# Gzip middleware configuration:
# Do not GZip responses that are smaller than this minimum size in bytes:
GZIP_MINIMUM_SIZE: Final = 500
# Used during GZip compression. It is an integer ranging from 1 to 9.
# Lower value results in faster compression but larger file sizes, while higher value
# results in slower compression but smaller file sizes.
GZIP_COMPRESSLEVEL: Final = 6
# When server.port is not available it will look for the next available port
# up to this number of retries.
MAX_PORT_SEARCH_RETRIES: Final = 100
# Default address to bind to when no address is configured:
DEFAULT_SERVER_ADDRESS: Final = "0.0.0.0" # noqa: S104
# Default WebSocket ping interval in seconds, can be configured by the user
DEFAULT_WEBSOCKET_PING_INTERVAL: Final = 30
# Default WebSocket ping timeout in seconds, can be configured by the user
DEFAULT_WEBSOCKET_PING_TIMEOUT: Final = 30
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/web/server/starlette/starlette_server_config.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_app_utils_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for starlette_app_utils.py."""
from __future__ import annotations
import binascii
import time
import unittest
import pytest
from tornado.util import _websocket_mask
from streamlit.web.server.starlette import starlette_app_utils
class StarletteServerUtilsTest(unittest.TestCase):
def test_parse_range_header_bytes(self):
"""Test parsing standard byte ranges."""
# Entire file
assert starlette_app_utils.parse_range_header("bytes=0-", 100) == (0, 99)
# First 10 bytes
assert starlette_app_utils.parse_range_header("bytes=0-9", 100) == (0, 9)
# Middle range
assert starlette_app_utils.parse_range_header("bytes=10-19", 100) == (10, 19)
# Last 10 bytes (suffix)
assert starlette_app_utils.parse_range_header("bytes=-10", 100) == (90, 99)
# Range exceeding end caps at end
assert starlette_app_utils.parse_range_header("bytes=90-200", 100) == (
90,
99,
)
def test_parse_range_header_errors(self):
"""Test invalid range headers raise ValueError."""
# Empty content
with pytest.raises(ValueError, match="empty content"):
starlette_app_utils.parse_range_header("bytes=0-10", 0)
# Invalid units
with pytest.raises(ValueError, match="invalid range"):
starlette_app_utils.parse_range_header("bits=0-10", 100)
# Multiple ranges not supported
with pytest.raises(ValueError, match="invalid range"):
starlette_app_utils.parse_range_header("bytes=0-10, 20-30", 100)
# Invalid start
with pytest.raises(ValueError, match="invalid suffix range"):
starlette_app_utils.parse_range_header("bytes=-5-10", 100)
# Start > total
with pytest.raises(ValueError, match="start out of range"):
starlette_app_utils.parse_range_header("bytes=150-200", 100)
# End before start
with pytest.raises(ValueError, match="end before start"):
starlette_app_utils.parse_range_header("bytes=50-40", 100)
def test_websocket_mask_compatibility(self):
"""Test that websocket_mask matches Tornado's implementation."""
mask = b"1234"
data = b"hello world"
expected = _websocket_mask(mask, data)
actual = starlette_app_utils.websocket_mask(mask, data)
assert actual == expected
# It should be reversible (XOR)
masked = actual
unmasked = starlette_app_utils.websocket_mask(mask, masked)
assert unmasked == data
def test_websocket_mask_empty_data(self):
"""Test that masking empty data returns empty bytes."""
mask = b"1234"
data = b""
result = starlette_app_utils.websocket_mask(mask, data)
assert result == b""
def test_websocket_mask_invalid_mask_length(self):
"""Test that invalid mask length raises ValueError."""
with pytest.raises(ValueError, match="mask must be 4 bytes"):
starlette_app_utils.websocket_mask(b"12", b"data")
with pytest.raises(ValueError, match="mask must be 4 bytes"):
starlette_app_utils.websocket_mask(b"12345", b"data")
with pytest.raises(ValueError, match="mask must be 4 bytes"):
starlette_app_utils.websocket_mask(b"", b"data")
def test_websocket_mask_various_lengths(self):
"""Test masking data of various lengths matches Tornado."""
mask = b"\x01\x02\x03\x04"
# Test lengths 1-10 to cover different modulo cases
for length in range(1, 11):
data = bytes(range(length))
expected = _websocket_mask(mask, data)
actual = starlette_app_utils.websocket_mask(mask, data)
assert actual == expected, f"Mismatch for length {length}"
def test_signed_value_roundtrip(self):
"""Test that create_signed_value and decode_signed_value work together."""
secret = "test_secret_key"
name = "test_cookie"
value = "test_value"
# Create a signed value
signed_value = starlette_app_utils.create_signed_value(secret, name, value)
# Decode using our utility
decoded = starlette_app_utils.decode_signed_value(secret, name, signed_value)
assert decoded is not None
assert decoded.decode("utf-8") == value
def test_signed_value_with_bytes(self):
"""Test that signed value works with bytes input."""
secret = "test_secret_key"
name = "test_cookie"
value = b"test_value_bytes"
signed_value = starlette_app_utils.create_signed_value(secret, name, value)
decoded = starlette_app_utils.decode_signed_value(secret, name, signed_value)
assert decoded == value
def test_decode_signed_value_invalid_signature(self):
"""Test that invalid signature returns None."""
secret = "test_secret_key"
name = "test_cookie"
# Tampered value
result = starlette_app_utils.decode_signed_value(
secret, name, "invalid_signed_value"
)
assert result is None
def test_decode_signed_value_wrong_secret(self):
"""Test that wrong secret returns None."""
secret = "test_secret_key"
name = "test_cookie"
value = "test_value"
signed_value = starlette_app_utils.create_signed_value(secret, name, value)
result = starlette_app_utils.decode_signed_value(
"wrong_secret", name, signed_value
)
assert result is None
def test_decode_signed_value_empty_value(self):
"""Test that empty value returns None."""
secret = "test_secret_key"
name = "test_cookie"
# Empty string
assert starlette_app_utils.decode_signed_value(secret, name, "") is None
# Empty bytes
assert starlette_app_utils.decode_signed_value(secret, name, b"") is None
def test_decode_signed_value_non_utf8_bytes(self):
"""Test that non-UTF-8 bytes return None instead of raising."""
secret = "test_secret_key"
name = "test_cookie"
# Invalid UTF-8 sequence
invalid_utf8 = b"\xff\xfe\x00\x01"
result = starlette_app_utils.decode_signed_value(secret, name, invalid_utf8)
assert result is None
def test_xsrf_token_roundtrip(self):
"""Test generating and then decoding an XSRF token."""
token = b"some_random_token_bytes"
timestamp = int(time.time())
# Generate string
cookie_val = starlette_app_utils.generate_xsrf_token_string(token, timestamp)
# Verify format
assert cookie_val.startswith("2|")
parts = cookie_val.split("|")
assert len(parts) == 4
# Decode string
decoded_token, decoded_timestamp = starlette_app_utils.decode_xsrf_token_string(
cookie_val
)
assert decoded_token == token
assert decoded_timestamp == timestamp
def test_decode_xsrf_token_v1(self):
"""Test decoding a legacy v1 XSRF token (unmasked hex)."""
token = b"legacy_token"
hex_token = binascii.b2a_hex(token).decode("ascii")
# decode_xsrf_token_string treats anything not starting with '2|' as v1
decoded_token, decoded_timestamp = starlette_app_utils.decode_xsrf_token_string(
hex_token
)
assert decoded_token == token
# For v1 tokens, it returns current time as timestamp
assert decoded_timestamp is not None
assert abs(decoded_timestamp - time.time()) < 2
def test_decode_xsrf_token_invalid(self):
"""Test decoding invalid tokens returns (None, None)."""
assert starlette_app_utils.decode_xsrf_token_string("invalid") == (
None,
None,
)
assert starlette_app_utils.decode_xsrf_token_string("2|bad|format") == (
None,
None,
)
def test_decode_xsrf_token_empty(self):
"""Test that empty/whitespace-only strings return (None, None)."""
# Empty string
assert starlette_app_utils.decode_xsrf_token_string("") == (None, None)
# Whitespace only (stripped to empty)
assert starlette_app_utils.decode_xsrf_token_string(" ") == (None, None)
# Only quotes (stripped to empty)
assert starlette_app_utils.decode_xsrf_token_string('""') == (None, None)
assert starlette_app_utils.decode_xsrf_token_string("''") == (None, None)
def test_generate_random_hex_string_default(self):
"""Test generate_random_hex_string with default length."""
result = starlette_app_utils.generate_random_hex_string()
# Default is 32 bytes = 64 hex characters
assert len(result) == 64
# Should be valid hex
int(result, 16)
def test_generate_random_hex_string_custom_length(self):
"""Test generate_random_hex_string with custom byte count."""
result = starlette_app_utils.generate_random_hex_string(16)
# 16 bytes = 32 hex characters
assert len(result) == 32
# Should be valid hex
int(result, 16)
def test_generate_random_hex_string_uniqueness(self):
"""Test that generate_random_hex_string produces unique values."""
results = {starlette_app_utils.generate_random_hex_string() for _ in range(100)}
# All 100 should be unique
assert len(results) == 100
class TestValidateXsrfToken:
"""Tests for validate_xsrf_token function."""
def test_returns_false_when_supplied_token_none(self) -> None:
"""Test that False is returned when supplied token is None."""
xsrf_cookie = starlette_app_utils.generate_xsrf_token_string()
result = starlette_app_utils.validate_xsrf_token(None, xsrf_cookie)
assert result is False
def test_returns_false_when_cookie_none(self) -> None:
"""Test that False is returned when cookie is None."""
xsrf_token = starlette_app_utils.generate_xsrf_token_string()
result = starlette_app_utils.validate_xsrf_token(xsrf_token, None)
assert result is False
def test_returns_false_when_both_none(self) -> None:
"""Test that False is returned when both are None."""
result = starlette_app_utils.validate_xsrf_token(None, None)
assert result is False
def test_returns_true_for_matching_tokens(self) -> None:
"""Test that True is returned when tokens match."""
xsrf_token = starlette_app_utils.generate_xsrf_token_string()
result = starlette_app_utils.validate_xsrf_token(xsrf_token, xsrf_token)
assert result is True
def test_returns_true_for_matching_tokens_with_different_timestamps(self) -> None:
"""Test that validation succeeds when tokens have same bytes but different timestamps."""
token_bytes = b"0123456789abcdef"
token1 = starlette_app_utils.generate_xsrf_token_string(
token_bytes, timestamp=12345
)
token2 = starlette_app_utils.generate_xsrf_token_string(
token_bytes, timestamp=67890
)
result = starlette_app_utils.validate_xsrf_token(token1, token2)
assert result is True
def test_returns_false_for_different_tokens(self) -> None:
"""Test that False is returned when tokens differ."""
token1 = starlette_app_utils.generate_xsrf_token_string()
token2 = starlette_app_utils.generate_xsrf_token_string()
result = starlette_app_utils.validate_xsrf_token(token1, token2)
assert result is False
def test_returns_false_for_invalid_token_format(self) -> None:
"""Test that False is returned for invalid token format."""
valid_token = starlette_app_utils.generate_xsrf_token_string()
result = starlette_app_utils.validate_xsrf_token("invalid-token", valid_token)
assert result is False
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_app_utils_test.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_gzip_middleware_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for starlette_gzip_middleware module."""
from __future__ import annotations
import pytest
from starlette.applications import Starlette
from starlette.responses import Response
from starlette.routing import Route
from starlette.testclient import TestClient
from streamlit.web.server.starlette.starlette_gzip_middleware import (
_EXCLUDED_CONTENT_TYPES,
MediaAwareGZipMiddleware,
)
def _create_test_app(content_type: str, body: bytes = b"x" * 1000) -> Starlette:
"""Create a test Starlette app that returns a response with the given content type."""
async def endpoint(request):
return Response(content=body, media_type=content_type)
app = Starlette(routes=[Route("/", endpoint)])
app.add_middleware(MediaAwareGZipMiddleware, minimum_size=100)
return app
class TestMediaAwareGZipMiddleware:
"""Tests for MediaAwareGZipMiddleware."""
def test_compresses_text_content(self) -> None:
"""Test that text content is compressed when client supports gzip."""
app = _create_test_app("text/plain")
client = TestClient(app)
response = client.get("/", headers={"Accept-Encoding": "gzip"})
assert response.status_code == 200
assert response.headers.get("content-encoding") == "gzip"
def test_compresses_json_content(self) -> None:
"""Test that JSON content is compressed when client supports gzip."""
app = _create_test_app("application/json")
client = TestClient(app)
response = client.get("/", headers={"Accept-Encoding": "gzip"})
assert response.status_code == 200
assert response.headers.get("content-encoding") == "gzip"
def test_does_not_compress_audio_content(self) -> None:
"""Test that audio content is not compressed."""
app = _create_test_app("audio/mpeg")
client = TestClient(app)
response = client.get("/", headers={"Accept-Encoding": "gzip"})
assert response.status_code == 200
assert response.headers.get("content-encoding") is None
def test_does_not_compress_video_content(self) -> None:
"""Test that video content is not compressed."""
app = _create_test_app("video/mp4")
client = TestClient(app)
response = client.get("/", headers={"Accept-Encoding": "gzip"})
assert response.status_code == 200
assert response.headers.get("content-encoding") is None
@pytest.mark.parametrize(
"content_type",
[
"audio/mpeg",
"audio/wav",
"audio/ogg",
"audio/webm",
"video/mp4",
"video/webm",
"video/ogg",
],
ids=[
"audio/mpeg",
"audio/wav",
"audio/ogg",
"audio/webm",
"video/mp4",
"video/webm",
"video/ogg",
],
)
def test_excludes_various_media_types(self, content_type: str) -> None:
"""Test that various audio/video types are excluded from compression."""
app = _create_test_app(content_type)
client = TestClient(app)
response = client.get("/", headers={"Accept-Encoding": "gzip"})
assert response.status_code == 200
assert response.headers.get("content-encoding") is None
def test_does_not_compress_when_client_does_not_support_gzip(self) -> None:
"""Test that content is not compressed when client doesn't support gzip."""
app = _create_test_app("text/plain")
client = TestClient(app)
# Explicitly set Accept-Encoding to something other than gzip
response = client.get("/", headers={"Accept-Encoding": "identity"})
assert response.status_code == 200
assert response.headers.get("content-encoding") is None
def test_does_not_compress_small_content(self) -> None:
"""Test that small content is not compressed (below minimum_size)."""
app = _create_test_app("text/plain", body=b"small")
client = TestClient(app)
response = client.get("/", headers={"Accept-Encoding": "gzip"})
assert response.status_code == 200
# Small content should not be compressed
assert response.headers.get("content-encoding") is None
class TestExcludedContentTypes:
"""Tests for the _EXCLUDED_CONTENT_TYPES constant."""
def test_includes_audio_prefix(self) -> None:
"""Test that audio/ prefix is in the excluded types."""
assert "audio/" in _EXCLUDED_CONTENT_TYPES
def test_includes_video_prefix(self) -> None:
"""Test that video/ prefix is in the excluded types."""
assert "video/" in _EXCLUDED_CONTENT_TYPES
def test_includes_starlette_defaults(self) -> None:
"""Test that Starlette default exclusions are preserved."""
# text/event-stream is excluded by Starlette by default (for SSE)
assert "text/event-stream" in _EXCLUDED_CONTENT_TYPES
def test_extends_starlette_defaults(self) -> None:
"""Test that our exclusion list extends (not replaces) Starlette defaults."""
from starlette.middleware.gzip import DEFAULT_EXCLUDED_CONTENT_TYPES
# All Starlette defaults should be included
for content_type in DEFAULT_EXCLUDED_CONTENT_TYPES:
assert content_type in _EXCLUDED_CONTENT_TYPES
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/web/server/starlette/starlette_gzip_middleware_test.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/host_config_bypass.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test app for host config bypass feature."""
import numpy as np
import pandas as pd
import pydeck as pdk
import streamlit as st
def page1():
pass
def page2():
pass
def page3():
pass
pages = {
"General": [
st.Page(page1, title="Home", icon=":material/home:"),
st.Page(page2, title="Data visualizations", icon=":material/monitoring:"),
],
"Admin": [st.Page(page3, title="Settings", icon=":material/settings:")],
}
pg = st.navigation(pages)
pg.run()
st.subheader("Connection status test", divider="gray")
st.slider("Slider", value=50, min_value=0, max_value=100)
st.multiselect(
"Multiselect",
default=["Option 1", "Option 2"],
options=["Option 1", "Option 2", "Option 3", "Option 4", "Option 5"],
)
# Add a button to verify interactivity
st.write("Button:")
if st.button("Click me"):
st.write("Button clicked!")
with st.sidebar:
st.metric("Temperature", "70 °F", "1.2 °F")
st.metric("Wind", "9 mph", "-8%")
# Elements for testing disableFullscreenMode
st.subheader("Fullscreen mode test", divider="gray")
# Always generate the same data
np.random.seed(0)
st.dataframe(
pd.DataFrame(np.random.randint(0, 100, size=(10, 4)), columns=list("ABCD")),
key="test_dataframe",
)
# Elements for testing mapboxToken - uses pydeck with explicit Mapbox style
# This ensures the mapboxToken from host config is actually used in API requests
st.subheader("Mapbox token test", divider="gray")
st.pydeck_chart(
pdk.Deck(
map_style="mapbox://styles/mapbox/light-v9",
map_provider="mapbox",
initial_view_state=pdk.ViewState(
latitude=37.76,
longitude=-122.4,
zoom=11,
),
)
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/host_config_bypass.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/host_config_bypass_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E tests for host config bypass feature.
These tests verify that when a host provides minimal configuration via
window.__streamlit, the WebSocket connection can be established immediately
without waiting for the host-config endpoint response (bypass mode).
"""
from __future__ import annotations
import json
from urllib import parse
import pytest
from playwright.sync_api import Page, Route, WebSocket, expect
from e2e_playwright.conftest import build_app_url, wait_until
from e2e_playwright.shared.app_utils import goto_app
def _origin_from_url(url: str) -> str:
split_url = parse.urlsplit(url)
return f"{split_url.scheme}://{split_url.netloc}"
def _verify_fullscreen_button(page: Page, *, should_be_visible: bool) -> None:
"""Verify fullscreen button visibility after hovering over the dataframe.
Parameters
----------
page : Page
The Playwright page object.
should_be_visible : bool
If True, expects fullscreen button to be visible.
If False, expects toolbar visible but fullscreen button not attached.
"""
expect(page.get_by_text("Fullscreen mode test")).to_be_visible()
dataframe_element = page.get_by_test_id("stDataFrame")
dataframe_element.hover()
if should_be_visible:
fullscreen_button = dataframe_element.get_by_role("button", name="Fullscreen")
expect(fullscreen_button).to_be_visible()
else:
dataframe_toolbar = dataframe_element.get_by_test_id("stElementToolbar")
expect(dataframe_toolbar).to_have_css("opacity", "1")
expect(page.get_by_role("button", name="Fullscreen")).not_to_be_attached()
def _inject_bypass_config(page: Page, backend_url: str) -> None:
"""Inject minimal host config to enable bypass mode.
Uses default values from routes.py:
- allowedOrigins: [backend origin]
- useExternalAuthToken: False (default)
- metricsUrl: "" (default)
"""
backend_origin = _origin_from_url(backend_url)
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{backend_url}",
HOST_CONFIG: {{
allowedOrigins: ["{backend_origin}"],
useExternalAuthToken: false,
metricsUrl: ""
}}
}}
"""
)
def test_bypass_mode_executes_websocket_and_host_config_in_parallel(
page: Page, app_base_url: str
) -> None:
"""Test that bypass mode executes both WebSocket and host-config requests.
In bypass mode:
- WebSocket connection is not blocked by host-config
- Host-config endpoint is still called (in background)
- Both should succeed
Note: We don't assert strict ordering because both happen in parallel,
but we verify both events occur, demonstrating bypass behavior.
"""
# Track connection events
events = []
# Set up WebSocket tracking BEFORE injecting config
def track_websocket(_ws: WebSocket) -> None:
"""Track WebSocket connection timing."""
events.append({"type": "websocket"})
page.on("websocket", track_websocket)
# Track host-config endpoint calls BEFORE injecting config
def track_host_config(route: Route) -> None:
"""Track host-config call timing and allow through."""
events.append({"type": "host-config"})
route.continue_()
page.route("**/_stcore/host-config", track_host_config)
# Inject bypass config BEFORE navigation
_inject_bypass_config(page, app_base_url)
# Navigate to app
goto_app(page, app_base_url)
# Verify both WebSocket and host-config were used
ws_events = [e for e in events if e["type"] == "websocket"]
hc_events = [e for e in events if e["type"] == "host-config"]
assert len(ws_events) > 0, "WebSocket should have connected in bypass mode"
assert len(hc_events) > 0, (
"Host-config should still be called (in background) in bypass mode"
)
def test_bypass_mode_app_becomes_interactive(page: Page, app_base_url: str) -> None:
"""Test that app becomes interactive in bypass mode."""
_inject_bypass_config(page, app_base_url)
goto_app(page, app_base_url)
# Verify app content loaded
expect(page.get_by_text("Connection status test")).to_be_visible()
expect(page.get_by_text("Slider")).to_be_visible()
# Verify app is interactive - button can be clicked
button = page.get_by_test_id("stButton").locator("button")
expect(button).to_be_enabled()
button.click()
# Verify button action worked
expect(page.get_by_text("Button clicked!")).to_be_visible()
def test_bypass_mode_host_config_values_take_precedence(
page: Page, app_base_url: str
) -> None:
"""Test that window.__streamlit.HOST_CONFIG values override endpoint values.
Even though the host-config endpoint returns its own values, the initial
window config should take precedence for allowedOrigins, useExternalAuthToken,
and metricsUrl.
"""
# Custom values that differ from defaults
custom_allowed_origins = [
"https://custom.example.com",
"https://another.example.com",
]
custom_metrics_url = "https://custom-metrics.example.com"
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{app_base_url}",
HOST_CONFIG: {{
allowedOrigins: {json.dumps(custom_allowed_origins)},
useExternalAuthToken: false, // False so we don't block on auth token
metricsUrl: "{custom_metrics_url}"
}}
}}
"""
)
goto_app(page, app_base_url)
# The main verification is that the app loads successfully with custom config
# If precedence was not working correctly, the app would fail or behave incorrectly
expect(page.get_by_text("Connection status test")).to_be_visible()
expect(page.get_by_text("Slider")).to_be_visible()
# Verify app is interactive (confirms config was applied correctly)
button = page.get_by_test_id("stButton").locator("button")
expect(button).to_be_enabled()
def test_default_path_without_bypass_config(page: Page, app_base_url: str) -> None:
"""Test default behavior when no bypass config is provided.
Without window.__streamlit config, the app should use the normal path
that waits for host-config endpoint before establishing WebSocket.
"""
# Track order of events to verify default path behavior
events = []
# Track host-config completion
def track_host_config(route: Route) -> None:
"""Track when host-config completes."""
events.append({"type": "host-config-start"})
route.continue_()
# Mark completion after continuing
events.append({"type": "host-config-complete"})
page.route("**/_stcore/host-config", track_host_config)
# Track WebSocket connection
def track_websocket(_ws: WebSocket) -> None:
"""Track WebSocket connection timing."""
events.append({"type": "websocket"})
page.on("websocket", track_websocket)
# Don't inject any window.__streamlit config - use default path
goto_app(page, app_base_url)
# Verify both host-config and WebSocket were used
hc_events = [e for e in events if e["type"] == "host-config-start"]
ws_events = [e for e in events if e["type"] == "websocket"]
assert len(hc_events) > 0, "Host-config should be called"
assert len(ws_events) > 0, "WebSocket should connect"
# In default path, host-config should be called before WebSocket connects
hc_index = events.index(hc_events[0])
ws_index = events.index(ws_events[0])
assert hc_index < ws_index, (
f"Default path: host-config ({hc_index}) should start before "
f"WebSocket connects ({ws_index})"
)
# App should work normally
expect(page.get_by_text("Connection status test")).to_be_visible()
expect(page.get_by_text("Slider")).to_be_visible()
# Verify interactivity
button = page.get_by_test_id("stButton").locator("button")
expect(button).to_be_enabled()
def test_bypass_mode_handles_connection_errors_gracefully(
page: Page, app_base_url: str
) -> None:
"""Test bypass mode functions by blocking host-config endpoint
The app should still load because we bypass the endpoints to establish
the initial websocket connection. Then the connection error dialog appears
because we are still handling connection errors the same way as the default path.
"""
ws_connected = {"connected": False}
host_config_call_attempted = {"attempted": False}
# Track WebSocket connection
def track_websocket(_ws: WebSocket) -> None:
ws_connected["connected"] = True
page.on("websocket", track_websocket)
# Completely block host-config endpoint from the start
def block_host_config(route: Route) -> None:
host_config_call_attempted["attempted"] = True
route.abort("failed")
page.route("**/_stcore/host-config", block_host_config)
# Inject bypass config BEFORE navigation
_inject_bypass_config(page, app_base_url)
# Navigate to app
goto_app(page, app_base_url)
# Verify WebSocket connected despite host-config being blocked
assert ws_connected["connected"], (
"WebSocket should connect in bypass mode even when host-config fails"
)
# Verify host-config was attempted (proves bypass doesn't skip it entirely)
assert host_config_call_attempted["attempted"], (
"Host-config should still be called (in background)"
)
# Verify app loaded despite host-config being blocked
expect(page.get_by_text("Connection status test")).to_be_visible()
# Verify Connection error dialog eventually appears
wait_until(
page,
lambda: (
page.get_by_test_id("stDialog").is_visible()
and "Connection error" in page.get_by_test_id("stDialog").inner_text()
),
)
def test_bypass_requires_all_minimal_fields(page: Page, app_base_url: str) -> None:
"""Test that bypass mode requires all minimal fields to be present.
Missing any of: BACKEND_BASE_URL, allowedOrigins, or useExternalAuthToken
should fall back to default path.
"""
# Track order of events to verify default path is used
events = []
# Track host-config and WebSocket to verify ordering
def track_host_config(route: Route) -> None:
events.append({"type": "host-config-start"})
route.continue_()
page.route("**/_stcore/host-config", track_host_config)
def track_websocket(_ws: WebSocket) -> None:
events.append({"type": "websocket"})
page.on("websocket", track_websocket)
# Test with missing useExternalAuthToken (incomplete config)
app_origin = _origin_from_url(app_base_url)
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{app_base_url}",
HOST_CONFIG: {{
allowedOrigins: ["{app_origin}"]
// Missing useExternalAuthToken - should NOT enable bypass
}}
}}
"""
)
goto_app(page, app_base_url)
# Verify default path was used (host-config before WebSocket)
hc_events = [e for e in events if e["type"] == "host-config-start"]
ws_events = [e for e in events if e["type"] == "websocket"]
# Explicit assertions before checking order
assert len(hc_events) > 0, "Host-config should be called"
assert len(ws_events) > 0, "WebSocket should connect"
hc_index = events.index(hc_events[0])
ws_index = events.index(ws_events[0])
assert hc_index < ws_index, (
"Incomplete config should use default path: "
f"host-config ({hc_index}) before WebSocket ({ws_index})"
)
# App should still load (fallback to default path)
expect(page.get_by_text("Connection status test")).to_be_visible()
def test_bypass_requires_non_empty_allowed_origins(
page: Page, app_base_url: str
) -> None:
"""Test that bypass mode requires allowedOrigins to be non-empty."""
# Track order of events to verify default path is used
events = []
def track_host_config(route: Route) -> None:
events.append({"type": "host-config-start"})
route.continue_()
page.route("**/_stcore/host-config", track_host_config)
def track_websocket(_ws: WebSocket) -> None:
events.append({"type": "websocket"})
page.on("websocket", track_websocket)
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{app_base_url}",
HOST_CONFIG: {{
allowedOrigins: [], // Empty array should NOT enable bypass
useExternalAuthToken: false
}}
}}
"""
)
goto_app(page, app_base_url)
# Verify default path was used (host-config before WebSocket)
hc_events = [e for e in events if e["type"] == "host-config-start"]
ws_events = [e for e in events if e["type"] == "websocket"]
# Explicit assertions before checking order
assert len(hc_events) > 0, "Host-config should be called"
assert len(ws_events) > 0, "WebSocket should connect"
hc_index = events.index(hc_events[0])
ws_index = events.index(ws_events[0])
assert hc_index < ws_index, (
"Empty allowedOrigins should use default path: "
f"host-config ({hc_index}) before WebSocket ({ws_index})"
)
# App should still load (fallback to default path)
expect(page.get_by_text("Connection status test")).to_be_visible()
def test_disable_fullscreen_mode_via_window_in_bypass(
page: Page, app_base_url: str
) -> None:
"""Test that disableFullscreenMode can be set via window config in bypass mode.
When disableFullscreenMode is true, the fullscreen button should NOT be visible
in the dataframe toolbar.
"""
app_origin = _origin_from_url(app_base_url)
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{app_base_url}",
HOST_CONFIG: {{
allowedOrigins: ["{app_origin}"],
useExternalAuthToken: false,
disableFullscreenMode: true
}}
}}
"""
)
goto_app(page, app_base_url)
_verify_fullscreen_button(page, should_be_visible=False)
def test_disable_fullscreen_mode_window_takes_precedence_over_endpoint_in_bypass(
page: Page, app_base_url: str
) -> None:
"""Test that window config takes precedence over endpoint for disableFullscreenMode in bypass.
Window config: disableFullscreenMode = false (allow fullscreen)
Endpoint: disableFullscreenMode = true (block fullscreen)
Expected: Fullscreen button IS visible (window wins)
"""
# Modify endpoint to return disableFullscreenMode: true
def modify_host_config(route: Route) -> None:
response = route.fetch()
body = response.json()
body["disableFullscreenMode"] = True
route.fulfill(response=response, json=body)
page.route("**/_stcore/host-config", modify_host_config)
# Window config sets disableFullscreenMode: false (should take precedence)
app_origin = _origin_from_url(app_base_url)
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{app_base_url}",
HOST_CONFIG: {{
allowedOrigins: ["{app_origin}"],
useExternalAuthToken: false,
disableFullscreenMode: false
}}
}}
"""
)
goto_app(page, app_base_url)
_verify_fullscreen_button(page, should_be_visible=True)
def test_disable_fullscreen_mode_window_takes_precedence_over_endpoint_without_bypass(
page: Page, app_base_url: str
) -> None:
"""Test that window config takes precedence over endpoint for disableFullscreenMode without bypass.
Without bypass (incomplete window config), the app waits for endpoint response,
but window config values should still take precedence during reconciliation.
Window config: disableFullscreenMode = false (allow fullscreen)
Endpoint: disableFullscreenMode = true (block fullscreen)
Expected: Fullscreen button IS visible (window wins after reconciliation)
"""
# Modify endpoint to return disableFullscreenMode: true
def modify_host_config(route: Route) -> None:
response = route.fetch()
body = response.json()
body["disableFullscreenMode"] = True
route.fulfill(response=response, json=body)
page.route("**/_stcore/host-config", modify_host_config)
# Incomplete window config (missing useExternalAuthToken) so bypass won't activate
# But disableFullscreenMode should still be applied during reconciliation
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{app_base_url}",
HOST_CONFIG: {{
disableFullscreenMode: false
}}
}}
"""
)
goto_app(page, app_base_url)
_verify_fullscreen_button(page, should_be_visible=True)
def test_block_error_dialogs_via_window_config_bypass(
page: Page, app_base_url: str
) -> None:
"""Test that blockErrorDialogs can be set via window config in bypass mode.
When blockErrorDialogs is true, error dialogs should not be shown.
Instead, errors are sent to the host via postMessage.
"""
app_origin = _origin_from_url(app_base_url)
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{app_base_url}",
HOST_CONFIG: {{
allowedOrigins: ["{app_origin}"],
useExternalAuthToken: false,
blockErrorDialogs: true
}}
}}
"""
)
# Initial load of page
goto_app(page, app_base_url)
# Verify app loaded
expect(page.get_by_text("Connection status test")).to_be_visible()
# Capture console messages to verify error is logged (not shown in dialog)
messages: list[str] = []
page.on("console", lambda msg: messages.append(msg.text))
# Navigate to a non-existent page to trigger page not found error
page.goto(build_app_url(app_base_url, path="/nonexistent_page"))
# Wait until the expected error is logged
wait_until(
page,
lambda: any(
"The page that you have requested does not seem to exist" in message
for message in messages
),
)
# Verify no error dialog is shown (blockErrorDialogs is working)
expect(page.get_by_role("dialog")).not_to_be_attached()
# Firefox doesn't render pydeck charts properly in CI, so no Mapbox API requests are made
@pytest.mark.skip_browser("firefox")
def test_mapbox_token_via_window_config_bypass(page: Page, app_base_url: str) -> None:
"""Test that mapboxToken from window config is used in Mapbox API requests.
The app contains a pydeck chart with explicit Mapbox style (mapbox://styles/mapbox/light-v9).
This triggers requests to api.mapbox.com with the access token.
We intercept these requests and verify our custom token is included.
"""
test_token = "pk.test_window_config_token_12345"
mapbox_requests: list[str] = []
# Intercept requests to Mapbox API to verify the token is used
def track_mapbox_request(route: Route) -> None:
mapbox_requests.append(route.request.url)
# Abort the request - we just want to verify the token, not actually load tiles
route.abort()
page.route("**/api.mapbox.com/**", track_mapbox_request)
app_origin = _origin_from_url(app_base_url)
page.add_init_script(
f"""
window.__streamlit = {{
BACKEND_BASE_URL: "{app_base_url}",
HOST_CONFIG: {{
allowedOrigins: ["{app_origin}"],
useExternalAuthToken: false,
mapboxToken: "{test_token}"
}}
}}
"""
)
goto_app(page, app_base_url)
# Wait for the pydeck chart to be visible (it uses Mapbox style)
expect(page.get_by_text("Mapbox token test")).to_be_visible()
map_element = page.get_by_test_id("stDeckGlJsonChart")
expect(map_element).to_be_visible(timeout=15000)
# Wait for Mapbox API requests to be made (map loading tiles)
wait_until(page, lambda: len(mapbox_requests) > 0, timeout=15000)
token_used = any(test_token in url for url in mapbox_requests)
assert token_used, (
f"Expected mapboxToken '{test_token}' to be used in Mapbox API requests. "
f"Requests made: {mapbox_requests}"
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/host_config_bypass_test.py",
"license": "Apache License 2.0",
"lines": 477,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/runtime/caching/ttl_cleanup_cache.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LRU cache supporting TTL and max entry count, as well as release hooks for cleanup."""
from collections.abc import Callable
from typing import TypeVar
from cachetools import TTLCache
# override is in typing after Python 3.12 and can be imported from there after 3.11
# support is retired.
from typing_extensions import override
from streamlit.runtime.caching.cache_utils import OnRelease
K = TypeVar("K")
V = TypeVar("V")
class TTLCleanupCache(TTLCache[K, V]):
"""A TTLCache that supports hooks called when items are released.
Note that item release only happens reliably when done automatically due to TTL
or maxsize expiration - and specifically does not happen when using ``del``. To
remove an item and have on_release be called, use safe_del.
"""
def __init__(
self,
maxsize: float,
ttl: float,
timer: Callable[[], float],
on_release: OnRelease,
) -> None:
"""Create a cache with the given size, TTL, and release hook.
Parameters
----------
maxsize : float
The maximum number of elements this cache should hold.
ttl : float
The amount of time a cache entry should remain valid, in seconds.
timer : Callable[[], float]
The timer function to use to fetch the current time.
on_release : OnRelease
The function to call with cache entries when they are removed from the
cache.
"""
super().__init__(maxsize=maxsize, ttl=ttl, timer=timer)
self._on_release = on_release
@override
def popitem(self) -> tuple[K, V]:
key, value = super().popitem()
self._on_release(value)
return key, value
@override
def expire(self, time: float | None = None) -> list[tuple[K, V]]:
items = super().expire(time)
for _, value in items:
self._on_release(value)
return items
def safe_del(self, key: K) -> None:
"""Delete that calls _on_release."""
has_value = key in self
old_value = self.get(key)
del self[key]
# Check has_value, not None, to allow for None values.
if has_value:
self._on_release(old_value)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/runtime/caching/ttl_cleanup_cache.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/runtime/caching/ttl_cleanup_cache_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from streamlit.runtime.caching.ttl_cleanup_cache import TTLCleanupCache
def fake_timer() -> float:
return 1234
class TestTTLCleanupCache:
def test_releases_when_hits_size(self):
"""Unit test for on_release.
Tests that on_release is called when entries are removed from the cache due to
hitting the size limit.
"""
released_items = []
def on_release(item: int) -> None:
released_items.append(item)
maxsize = 5
test_cache = TTLCleanupCache(
maxsize=maxsize, ttl=120, timer=fake_timer, on_release=on_release
)
# Add a few elements to the cache.
for i in range(maxsize):
test_cache[i] = i + 10
# No items released yet.
assert released_items == []
# Access the third item.
assert test_cache[2] == 12
# Add three more items. The first, second, and fourth items should be released.
for i in range(3):
test_cache[i + 10] = i + 20
assert released_items == [10, 11, 13]
assert test_cache[2] == 12
assert test_cache[4] == 14
assert test_cache[10] == 20
assert test_cache[11] == 21
assert test_cache[12] == 22
def test_releases_when_hits_ttl(self):
"""Unit test for on_release.
Tests that on_release is called when entries are removed from the cache due to
hitting the TTL.
"""
released_items = []
def on_release(item: int) -> None:
released_items.append(item)
test_cache = TTLCleanupCache(
maxsize=500, ttl=0, timer=fake_timer, on_release=on_release
)
# Add a few elements to the cache.
for i in range(5):
test_cache[i] = i + 10
# Cache should have released all but the last item. This is an implementation
# quirk: It releases prior to write, and doesn't check TTL expiration on write.
assert released_items == [10, 11, 12, 13]
# Validate that the cache doesn't have the last item, either.
assert 4 not in test_cache
# Validate the last item was not released on read. This is a cache quirk: Items
# are not removed on read.
assert released_items == [10, 11, 12, 13]
# Manually expire, and validate the last item will be removed.
test_cache.expire()
assert released_items == [10, 11, 12, 13, 14]
def test_clear_calls_on_release(self):
"""Tests that clear() will call release() on all elements."""
released_items = []
def on_release(item: int) -> None:
released_items.append(item)
test_cache = TTLCleanupCache(
maxsize=math.inf, ttl=math.inf, timer=fake_timer, on_release=on_release
)
# Add a few elements to the cache.
for i in range(5):
test_cache[i] = i + 10
# No items released yet.
assert released_items == []
test_cache.clear()
assert released_items == [i + 10 for i in range(5)]
def test_safe_del_calls_release(self):
"""Tests that safe_del() will call release() on elements."""
released_items = []
def on_release(item: int) -> None:
released_items.append(item)
test_cache = TTLCleanupCache(
maxsize=math.inf, ttl=math.inf, timer=fake_timer, on_release=on_release
)
# Add a few elements to the cache.
for i in range(5):
test_cache[i] = i + 10
# No items released yet.
assert released_items == []
test_cache.safe_del(1)
test_cache.safe_del(3)
assert released_items == [11, 13]
assert list(test_cache.keys()) == [0, 2, 4]
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/runtime/caching/ttl_cleanup_cache_test.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:scripts/sync_ruff_version.py | #!/usr/bin/env python
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Synchronize ruff version between pyproject.toml and pre-commit config.
This script ensures that the ruff version specified in pyproject.toml [dependency-groups]
matches the version in .pre-commit-config.yaml. It can either check if the
versions are in sync or update the pre-commit config to match.
Examples
--------
Check if versions are in sync:
$ python scripts/sync_ruff_version.py --check
Synchronize versions (update pre-commit config to match pyproject.toml):
$ python scripts/sync_ruff_version.py
"""
from __future__ import annotations
import argparse
import os
import re
import sys
def _get_repo_root() -> str:
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def _get_ruff_version_from_pyproject(repo_root: str) -> str | None:
pyproject_path = os.path.join(repo_root, "pyproject.toml")
try:
with open(pyproject_path, encoding="utf-8") as f:
for line in f:
stripped_line = line.strip()
# Match ruff version in dependency groups: "ruff==0.14.11",
match = re.match(r'^"ruff==([0-9]+\.[0-9]+\.[0-9]+)",?', stripped_line)
if match:
return match.group(1)
except FileNotFoundError:
print(f"Error: File not found: {pyproject_path}")
return None
return None
def _get_ruff_version_from_pre_commit_config(repo_root: str) -> str | None:
pre_commit_config_path = os.path.join(repo_root, ".pre-commit-config.yaml")
try:
with open(pre_commit_config_path, encoding="utf-8") as f:
content = f.read()
# Use re.DOTALL to handle variable number of lines between repo and rev
match = re.search(
r"repo:\s*https://github\.com/astral-sh/ruff-pre-commit.*?rev:\s*v([0-9]+\.[0-9]+\.[0-9]+)",
content,
re.DOTALL,
)
if match:
return match.group(1)
except FileNotFoundError:
print(f"Error: File not found: {pre_commit_config_path}")
return None
return None
def _update_pre_commit_config(repo_root: str, new_version: str) -> bool:
pre_commit_config_path = os.path.join(repo_root, ".pre-commit-config.yaml")
try:
with open(pre_commit_config_path, encoding="utf-8") as f:
content = f.read()
# Use re.DOTALL to handle variable number of lines between repo and rev
pattern = r"(repo:\s*https://github\.com/astral-sh/ruff-pre-commit.*?rev:\s*v)[0-9]+\.[0-9]+\.[0-9]+"
new_content = re.sub(pattern, rf"\g<1>{new_version}", content, flags=re.DOTALL)
if new_content == content:
print("Warning: No changes made to .pre-commit-config.yaml")
return False
with open(pre_commit_config_path, "w", encoding="utf-8") as f:
f.write(new_content)
print(f"Updated .pre-commit-config.yaml to ruff version v{new_version}")
return True
except OSError as e:
print(f"Error updating .pre-commit-config.yaml: {e}")
return False
def check_sync_status(repo_root: str) -> bool:
"""Check if ruff versions are in sync between config files.
Parameters
----------
repo_root : str
Path to the repository root directory.
Returns
-------
bool
True if versions are in sync, False otherwise.
"""
pyproject_version = _get_ruff_version_from_pyproject(repo_root)
pre_commit_version = _get_ruff_version_from_pre_commit_config(repo_root)
if pyproject_version is None:
print("Error: Could not find ruff version in pyproject.toml")
return False
if pre_commit_version is None:
print("Error: Could not find ruff version in .pre-commit-config.yaml")
return False
if pyproject_version != pre_commit_version:
print("❌ Ruff versions are out of sync:")
print(f" pyproject.toml: {pyproject_version}")
print(f" .pre-commit-config.yaml: v{pre_commit_version}")
return False
print(f"✅ Ruff versions are in sync: {pyproject_version}")
return True
def sync_versions(repo_root: str) -> bool:
"""Sync ruff version from pyproject.toml to pre-commit config.
Parameters
----------
repo_root : str
Path to the repository root directory.
Returns
-------
bool
True if already in sync, False if modified or on error.
Returns False after modifications so pre-commit hooks fail and
prompt the user to stage the changes.
"""
pyproject_version = _get_ruff_version_from_pyproject(repo_root)
if pyproject_version is None:
print("Error: Could not find ruff version in pyproject.toml")
return False
pre_commit_version = _get_ruff_version_from_pre_commit_config(repo_root)
if pre_commit_version is None:
print("Error: Could not find ruff version in .pre-commit-config.yaml")
return False
if pyproject_version == pre_commit_version:
print(f"✅ Ruff versions already in sync: {pyproject_version}")
return True
print(f"Syncing ruff version from {pre_commit_version} to {pyproject_version}...")
if _update_pre_commit_config(repo_root, pyproject_version):
# Return False after modifying so pre-commit fails and user stages changes
print("Please stage the updated .pre-commit-config.yaml and commit again.")
return False
return False
def _parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Sync ruff version from pyproject.toml to .pre-commit-config.yaml"
)
parser.add_argument(
"--check",
action="store_true",
help="Check if versions are in sync without modifying files.",
)
return parser.parse_args()
def main() -> None:
"""Entry point for the ruff version sync script.
Parses command line arguments and executes the appropriate sync or check
operation. Exits with status 0 on success, 1 on failure.
"""
args = _parse_arguments()
repo_root = _get_repo_root()
if args.check:
print("🔍 Checking ruff version sync...")
success = check_sync_status(repo_root)
else:
print("🔄 Syncing ruff version...")
success = sync_versions(repo_root)
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()
| {
"repo_id": "streamlit/streamlit",
"file_path": "scripts/sync_ruff_version.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/typing/checkbox_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
if TYPE_CHECKING:
from streamlit.elements.widgets.checkbox import CheckboxMixin
checkbox = CheckboxMixin().checkbox
toggle = CheckboxMixin().toggle
# =====================================================================
# st.checkbox return type tests
# =====================================================================
assert_type(checkbox("Accept terms"), bool)
assert_type(checkbox("Accept terms", key="my_checkbox"), bool)
assert_type(checkbox("Accept terms", key=123), bool)
assert_type(checkbox("Accept terms", value=True), bool)
assert_type(checkbox("Accept terms", value=False), bool)
assert_type(checkbox("Accept terms", help="Check to accept"), bool)
assert_type(checkbox("Accept terms", help=None), bool)
assert_type(checkbox("Accept terms", disabled=True), bool)
assert_type(checkbox("Accept terms", disabled=False), bool)
assert_type(checkbox("Accept terms", label_visibility="visible"), bool)
assert_type(checkbox("Accept terms", label_visibility="hidden"), bool)
assert_type(checkbox("Accept terms", label_visibility="collapsed"), bool)
assert_type(checkbox("Accept terms", width="content"), bool)
assert_type(checkbox("Accept terms", width="stretch"), bool)
assert_type(checkbox("Accept terms", width=200), bool)
assert_type(
checkbox("Accept terms", key="bind_checkbox", bind="query-params"), bool
)
def my_callback() -> None:
pass
def callback_with_args(x: int, y: str) -> None:
pass
assert_type(checkbox("Accept terms", on_change=my_callback), bool)
assert_type(
checkbox("Accept terms", on_change=callback_with_args, args=(1, "a")), bool
)
assert_type(
checkbox(
"Accept terms", on_change=callback_with_args, kwargs={"x": 1, "y": "a"}
),
bool,
)
assert_type(checkbox("Accept terms", on_change=None), bool)
assert_type(
checkbox(
"Full checkbox",
value=True,
key="full_checkbox",
help="Full help",
on_change=my_callback,
args=None,
kwargs=None,
disabled=False,
label_visibility="visible",
width="stretch",
),
bool,
)
# =====================================================================
# st.toggle return type tests
# =====================================================================
assert_type(toggle("Enable feature"), bool)
assert_type(toggle("Enable feature", key="my_toggle"), bool)
assert_type(toggle("Enable feature", key=456), bool)
assert_type(toggle("Enable feature", value=True), bool)
assert_type(toggle("Enable feature", value=False), bool)
assert_type(toggle("Enable feature", help="Toggle to enable"), bool)
assert_type(toggle("Enable feature", help=None), bool)
assert_type(toggle("Enable feature", disabled=True), bool)
assert_type(toggle("Enable feature", disabled=False), bool)
assert_type(toggle("Enable feature", label_visibility="visible"), bool)
assert_type(toggle("Enable feature", label_visibility="hidden"), bool)
assert_type(toggle("Enable feature", label_visibility="collapsed"), bool)
assert_type(toggle("Enable feature", width="content"), bool)
assert_type(toggle("Enable feature", width="stretch"), bool)
assert_type(toggle("Enable feature", width=150), bool)
assert_type(toggle("Enable feature", key="bind_toggle", bind="query-params"), bool)
assert_type(toggle("Enable feature", on_change=my_callback), bool)
assert_type(
toggle("Enable feature", on_change=callback_with_args, args=(1, "a")), bool
)
assert_type(
toggle(
"Enable feature", on_change=callback_with_args, kwargs={"x": 1, "y": "a"}
),
bool,
)
assert_type(toggle("Enable feature", on_change=None), bool)
assert_type(
toggle(
"Full toggle",
value=True,
key="full_toggle",
help="Full help",
on_change=my_callback,
args=None,
kwargs=None,
disabled=False,
label_visibility="visible",
width="stretch",
),
bool,
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/checkbox_types.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/window_config.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Streamlit app to test window.__streamlit configuration security."""
import streamlit as st
def home_page():
st.title("Window Config Security Test")
st.write("This app is used to test that `window.__streamlit` configuration")
st.write("is captured at load time and cannot be modified afterwards.")
st.divider()
# Display some content to verify theme application
st.subheader(":primary[Primary Colored] Header")
st.button("Click me", type="primary")
st.text_input("Enter text")
st.slider("Choose value", 0, 100, 50)
# Add download button for testing DOWNLOAD_ASSETS_BASE_URL usage
st.download_button(
label="Download Test File",
data="This is test content for download",
file_name="test.txt",
mime="text/plain",
)
def second_page():
st.title("Second Page")
st.write("This is page 2 for testing MAIN_PAGE_BASE_URL navigation.")
st.button("Page 2 Button")
def third_page():
st.title("Third Page")
st.write("This is page 3 for testing MAIN_PAGE_BASE_URL navigation.")
st.button("Page 3 Button")
# Create navigation
home = st.Page(home_page, title="Home", url_path="home", icon=":material/home:")
page2 = st.Page(second_page, title="Page 2", url_path="page2", icon=":material/star:")
page3 = st.Page(
third_page, title="Page 3", url_path="page3", icon=":material/analytics:"
)
pg = st.navigation([home, page2, page3])
pg.run()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/window_config.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/window_config_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E tests for window.__streamlit configuration security.
These tests verify that:
1. Configuration set via window.__streamlit before load is captured correctly
2. Modifications to window.__streamlit after load do NOT affect the app (security)
"""
from playwright.sync_api import Page, Request, expect
from e2e_playwright.conftest import (
ImageCompareFunction,
wait_for_app_loaded,
wait_until,
)
def test_window_config_captured_at_preload(
app: Page, assert_snapshot: ImageCompareFunction
):
"""Test that window.__streamlit configuration works when set before load.
This verifies the normal use case where embedding environments set
configuration before the Streamlit bundle loads. Tests multiple config
property types: themes, URLs, client IDs, and boolean flags.
"""
# Inject comprehensive configuration BEFORE the page loads
# This includes theme, URL, client ID, and boolean flag configs
app.add_init_script("""
window.__streamlit = {
LIGHT_THEME: {
base: "light",
primaryColor: "#1f2578",
backgroundColor: "#c8ccf7",
secondaryBackgroundColor: "#ebecf5",
textColor: "#1A1A1A",
},
MAIN_PAGE_BASE_URL: "https://host.example.com/my-app",
CUSTOM_COMPONENT_CLIENT_ID: "preload-test-client-id",
ENABLE_RELOAD_BASED_ON_HARDCODED_STREAMLIT_VERSION: false
}
""")
# Reload to apply the injected script
app.reload()
wait_for_app_loaded(app)
# Verify theme config was captured (visual verification)
assert_snapshot(app, name="window_config_preload_applied")
# Verify URL config was captured
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.MAIN_PAGE_BASE_URL")
== "https://host.example.com/my-app"
),
)
# Verify client ID config was captured
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.CUSTOM_COMPONENT_CLIENT_ID")
== "preload-test-client-id"
),
)
# Verify boolean flag config was captured
wait_until(
app,
lambda: (
app.evaluate(
"() => window.__streamlit?.ENABLE_RELOAD_BASED_ON_HARDCODED_STREAMLIT_VERSION"
)
is False
),
)
# Verify app is functional with all configs applied
button = app.get_by_role("button", name="Click me")
expect(button).to_be_visible()
button.click()
def test_window_theme_config_immutable_after_load(
app: Page, assert_snapshot: ImageCompareFunction
):
"""Test that theme changes after load are ignored.
This test specifically verifies that theme configuration cannot be
changed after the initial load, which would be a security/consistency issue.
"""
# Set initial green theme configuration before load
app.add_init_script("""
window.__streamlit = {
LIGHT_THEME: {
base: "light",
primaryColor: "#042604",
backgroundColor: "#F0FFF0",
textColor: "#006400",
}
}
""")
# Reload to apply the injected script
app.reload()
wait_for_app_loaded(app)
# Verify window.__streamlit exists and has our value
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.LIGHT_THEME?.primaryColor")
== "#042604"
),
)
# Take snapshot of the initial green theme
assert_snapshot(app, name="window_config_initial_green_theme")
# Try to modify window.__streamlit AFTER the app has loaded
# This should NOT affect the app's appearance since config is frozen at load
app.evaluate("""
() => {
window.__streamlit = {
LIGHT_THEME: {
base: "light",
primaryColor: "#FF0000",
backgroundColor: "#FFF0F0",
textColor: "#8B0000",
}
};
}
""")
# Verify window.__streamlit was actually changed
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.LIGHT_THEME?.primaryColor")
== "#FF0000"
),
)
# Take another snapshot immediately - should still show green theme, NOT red
# This proves the modification had no effect (no re-render occurred)
assert_snapshot(app, name="window_config_still_green_after_modification")
# App should still be fully functional
button = app.get_by_role("button", name="Click me")
expect(button).to_be_visible()
button.click()
text_input = app.get_by_test_id("stTextInput").locator("input")
expect(text_input).to_be_visible()
text_input.fill("security test passed")
expect(text_input).to_have_value("security test passed")
def test_window_config_backend_base_url_immutable(app: Page):
"""Test that BACKEND_BASE_URL is immutable after load.
This test verifies that BACKEND_BASE_URL is protected by the frozen config
mechanism.
"""
# Try to modify BACKEND_BASE_URL config after load
app.evaluate("""
() => {
window.__streamlit = {
BACKEND_BASE_URL: "https://malicious.example.com",
};
}
""")
# Verify window.__streamlit was modified
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.BACKEND_BASE_URL")
== "https://malicious.example.com"
),
)
# App should still be functional with original frozen config
# The internal frozen config still has the original values
# If backend base url is modified, the app would break (not connecting
# to the backend server)
button = app.get_by_role("button", name="Click me")
expect(button).to_be_visible()
button.click()
def test_window_config_main_page_url(app: Page):
"""Test that frozen MAIN_PAGE_BASE_URL pathname is used in navigation.
MAIN_PAGE_BASE_URL is used to extract the pathname for constructing page
paths. This test verifies the frozen pathname is used, not a modified one.
"""
# Set MAIN_PAGE_BASE_URL with a custom pathname before load
# Only the /my-app PATHNAME is used, not the full URL
app.add_init_script("""
window.__streamlit = {
MAIN_PAGE_BASE_URL: "https://example.com/my-app"
}
""")
# Reload to apply the injected script
app.reload()
wait_for_app_loaded(app)
# Verify config was captured
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.MAIN_PAGE_BASE_URL")
== "https://example.com/my-app"
),
)
# Now modify window.__streamlit AFTER load to a DIFFERENT pathname
app.evaluate("""
() => {
window.__streamlit = {
MAIN_PAGE_BASE_URL: "https://example.com/hacked-path"
};
}
""")
# Verify window.__streamlit was modified
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.MAIN_PAGE_BASE_URL")
== "https://example.com/hacked-path"
),
)
# Navigate to Page 2 - this triggers maybeUpdatePageUrl() which retrieves the
# pathname from parseUriIntoBaseParts(StreamlitConfig.MAIN_PAGE_BASE_URL).pathname
page2_link = app.get_by_role("link", name="Page 2")
expect(page2_link).to_be_visible()
page2_link.click()
# Wait for navigation
wait_for_app_loaded(app)
# CRITICAL ASSERTION: The pathname should be /my-app/page2 (using frozen config)
# If the modified config was used, pathname would be /hacked-path/page2
wait_until(
app, lambda: app.evaluate("() => window.location.pathname") == "/my-app/page2"
)
def test_window_config_direct_property_modification(app: Page):
"""Test that direct property assignment to window.__streamlit is ignored.
This test verifies that direct property assignment to window.__streamlit
(not replacing the entire object) has no effect on the app because it reads
from the frozen capturedConfig, not from window.__streamlit.
"""
# Set MAIN_PAGE_BASE_URL with a custom pathname before load
# Only the /my-app PATHNAME is used, not the full URL
app.add_init_script("""
window.__streamlit = {
MAIN_PAGE_BASE_URL: "https://example.com/my-app"
}
""")
# Reload to apply the injected script
app.reload()
wait_for_app_loaded(app)
# Verify config was captured
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.MAIN_PAGE_BASE_URL")
== "https://example.com/my-app"
),
)
# Modify window.__streamlit AFTER load via direct property assignment
# This tests: window.__streamlit.PROPERTY = "value" (not replacing the whole object)
app.evaluate("""
() => {
window.__streamlit.MAIN_PAGE_BASE_URL = "https://example.com/hacked-path";
}
""")
# Verify window.__streamlit property was directly modified
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.MAIN_PAGE_BASE_URL")
== "https://example.com/hacked-path"
),
)
# Navigate to Page 2 - this triggers maybeUpdatePageUrl() which should use
# StreamlitConfig.MAIN_PAGE_BASE_URL (the frozen config), NOT window.__streamlit
page2_link = app.get_by_role("link", name="Page 2")
expect(page2_link).to_be_visible()
page2_link.click()
# Wait for navigation
wait_for_app_loaded(app)
# CRITICAL ASSERTION: The pathname should be /my-app/page2 (using frozen config)
# If the modified config was used, pathname would be /hacked-path/page2
wait_until(
app, lambda: app.evaluate("() => window.location.pathname") == "/my-app/page2"
)
def test_window_config_download_url(app: Page):
"""Test that frozen DOWNLOAD_ASSETS_BASE_URL is used in download URL construction.
This test verifies that the frozen config value is used by checking the actual
download URL that gets constructed. The download button creates a URL using
StreamlitConfig.DOWNLOAD_ASSETS_BASE_URL - we verify it uses the FROZEN value,
not the modified window.__streamlit value.
NOTE: We don't test download success/failure since these are not real base urls,
just the URL construction.
"""
# Set DOWNLOAD_ASSETS_BASE_URL before load
app.add_init_script("""
window.__streamlit = {
DOWNLOAD_ASSETS_BASE_URL: "https://cdn.example.com"
}
""")
# Reload to apply the injected script
app.reload()
wait_for_app_loaded(app)
# Verify config was captured
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.DOWNLOAD_ASSETS_BASE_URL")
== "https://cdn.example.com"
),
)
# Now modify window.__streamlit AFTER load to a DIFFERENT URL
app.evaluate("""
() => {
window.__streamlit = {
DOWNLOAD_ASSETS_BASE_URL: "https://malicious.example.com"
};
}
""")
# Verify window.__streamlit was modified
wait_until(
app,
lambda: (
app.evaluate("() => window.__streamlit?.DOWNLOAD_ASSETS_BASE_URL")
== "https://malicious.example.com"
),
)
# Set up request interception to capture any URL requests
# This will catch the media/download URL that gets accessed
captured_urls = []
def capture_request(request: Request) -> None:
url = request.url
# Capture any requests to media endpoints or download URLs
# Exclude static assets like /static/media/fireworks.gif (New Year's easter egg)
if "/static/media/" in url:
return
if (
"/media/" in url
or "cdn.example.com" in url
or "malicious.example.com" in url
):
captured_urls.append(url)
app.on("request", capture_request)
# Click the download button AFTER the config was modified
download_button = app.get_by_role("button", name="Download Test File")
expect(download_button).to_be_visible()
download_button.click()
# Wait until we've captured the download URL request
wait_until(app, lambda: len(captured_urls) > 0)
download_url = captured_urls[0]
# CRITICAL ASSERTION: The URL should use the FROZEN config (cdn.example.com)
# NOT the modified config (malicious.example.com)
if "cdn.example.com" not in download_url:
raise AssertionError(
f"Download URL uses WRONG config! "
f"Expected URL to contain 'cdn.example.com' (frozen config), "
f"but got: {download_url}."
)
if "malicious.example.com" in download_url:
raise AssertionError(
f"Download URL is using the MODIFIED config! "
f"URL contains 'malicious.example.com': {download_url}. "
f"This proves the app is using window.__streamlit instead of frozen config!"
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/window_config_test.py",
"license": "Apache License 2.0",
"lines": 356,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/button_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
# Perform some "type checking testing"; mypy should flag any assignments that are
# incorrect.
if TYPE_CHECKING:
import io
from pathlib import Path
from streamlit.delta_generator import DeltaGenerator
from streamlit.elements.form import FormMixin
from streamlit.elements.widgets.button import ButtonMixin
from streamlit.navigation.page import Page
button = ButtonMixin().button
download_button = ButtonMixin().download_button
link_button = ButtonMixin().link_button
page_link = ButtonMixin().page_link
form_submit_button = FormMixin().form_submit_button
# =====================================================================
# st.button return type tests
# =====================================================================
# Basic button - returns bool
assert_type(button("Click me"), bool)
assert_type(button("Click me", key="my_button"), bool)
assert_type(button("Click me", key=123), bool)
# Button type parameter - Literal["primary", "secondary", "tertiary"]
assert_type(button("Primary", type="primary"), bool)
assert_type(button("Secondary", type="secondary"), bool)
assert_type(button("Tertiary", type="tertiary"), bool)
# Button with help parameter
assert_type(button("Help", help="This is help text"), bool)
assert_type(button("Help", help=None), bool)
# Button with icon parameter
assert_type(button("Icon", icon="🚨"), bool)
assert_type(button("Icon", icon=":material/thumb_up:"), bool)
assert_type(button("Icon", icon="spinner"), bool)
assert_type(button("Icon", icon=None), bool)
# Button with disabled parameter
assert_type(button("Disabled", disabled=True), bool)
assert_type(button("Enabled", disabled=False), bool)
# Button with width parameter - "content", "stretch", or int
assert_type(button("Content width", width="content"), bool)
assert_type(button("Stretch width", width="stretch"), bool)
assert_type(button("Fixed width", width=200), bool)
# Button with shortcut parameter
assert_type(button("Shortcut", shortcut="K"), bool)
assert_type(button("Shortcut", shortcut="Ctrl+S"), bool)
assert_type(button("Shortcut", shortcut=None), bool)
# Button with on_click callback
def my_callback() -> None:
pass
def callback_with_args(x: int, y: str) -> None:
pass
assert_type(button("Callback", on_click=my_callback), bool)
assert_type(button("Callback", on_click=callback_with_args, args=(1, "a")), bool)
assert_type(
button("Callback", on_click=callback_with_args, kwargs={"x": 1, "y": "a"}), bool
)
assert_type(button("No callback", on_click=None), bool)
# Button with all parameters combined
assert_type(
button(
"Full button",
key="full_button",
help="Full help",
on_click=my_callback,
args=None,
kwargs=None,
type="primary",
icon="🚀",
disabled=False,
width="stretch",
shortcut="Enter",
),
bool,
)
# =====================================================================
# st.download_button return type tests
# =====================================================================
# Basic download button - returns bool
assert_type(download_button("Download", data="text content"), bool)
assert_type(download_button("Download", data=b"binary content"), bool)
# Download button with different data types (TextIO, BinaryIO, RawIOBase)
text_io = io.StringIO("text")
binary_io = io.BytesIO(b"binary")
raw_io: io.RawIOBase = io.FileIO("/dev/null") # RawIOBase example
assert_type(download_button("Download", data=text_io), bool)
assert_type(download_button("Download", data=binary_io), bool)
assert_type(download_button("Download", data=raw_io), bool)
# Download button with callable data (deferred)
def generate_data() -> bytes:
return b"generated"
def generate_text() -> str:
return "generated"
assert_type(download_button("Download", data=generate_data), bool)
assert_type(download_button("Download", data=generate_text), bool)
# Download button with file_name and mime
assert_type(download_button("Download", data="content", file_name="file.txt"), bool)
assert_type(download_button("Download", data="content", mime="text/plain"), bool)
assert_type(
download_button(
"Download", data="content", file_name="file.csv", mime="text/csv"
),
bool,
)
# Download button with on_click parameter - supports "rerun", "ignore", callable, or None
assert_type(download_button("Download", data="content", on_click="rerun"), bool)
assert_type(download_button("Download", data="content", on_click="ignore"), bool)
assert_type(download_button("Download", data="content", on_click=my_callback), bool)
assert_type(download_button("Download", data="content", on_click=None), bool)
# Download button with type parameter
assert_type(download_button("Download", data="content", type="primary"), bool)
assert_type(download_button("Download", data="content", type="secondary"), bool)
assert_type(download_button("Download", data="content", type="tertiary"), bool)
# Download button with width parameter
assert_type(download_button("Download", data="content", width="content"), bool)
assert_type(download_button("Download", data="content", width="stretch"), bool)
assert_type(download_button("Download", data="content", width=150), bool)
# Download button with icon, disabled, help, key (str or int), shortcut
assert_type(download_button("Download", data="content", icon="📥"), bool)
assert_type(download_button("Download", data="content", disabled=True), bool)
assert_type(download_button("Download", data="content", help="Help text"), bool)
assert_type(download_button("Download", data="content", key="dl_key"), bool)
assert_type(download_button("Download", data="content", key=456), bool)
assert_type(download_button("Download", data="content", shortcut="Ctrl+D"), bool)
# Download button with all parameters combined
assert_type(
download_button(
"Full download",
data=b"content",
file_name="file.bin",
mime="application/octet-stream",
key="full_download",
help="Download this file",
on_click="ignore",
args=None,
kwargs=None,
type="primary",
icon=":material/download:",
disabled=False,
width="stretch",
shortcut="Ctrl+Shift+D",
),
bool,
)
# =====================================================================
# st.link_button return type tests
# =====================================================================
# Basic link button - returns DeltaGenerator
assert_type(link_button("Google", "https://google.com"), DeltaGenerator)
assert_type(
link_button("Google", "https://google.com", key="link_key"), DeltaGenerator
)
assert_type(link_button("Google", "https://google.com", key=789), DeltaGenerator)
# Link button with type parameter
assert_type(
link_button("Link", "https://example.com", type="primary"), DeltaGenerator
)
assert_type(
link_button("Link", "https://example.com", type="secondary"), DeltaGenerator
)
assert_type(
link_button("Link", "https://example.com", type="tertiary"), DeltaGenerator
)
# Link button with help parameter
assert_type(
link_button("Link", "https://example.com", help="Click to visit"),
DeltaGenerator,
)
assert_type(link_button("Link", "https://example.com", help=None), DeltaGenerator)
# Link button with icon parameter
assert_type(link_button("Link", "https://example.com", icon="🔗"), DeltaGenerator)
assert_type(
link_button("Link", "https://example.com", icon=":material/link:"),
DeltaGenerator,
)
assert_type(link_button("Link", "https://example.com", icon=None), DeltaGenerator)
# Link button with disabled parameter
assert_type(
link_button("Link", "https://example.com", disabled=True), DeltaGenerator
)
assert_type(
link_button("Link", "https://example.com", disabled=False), DeltaGenerator
)
# Link button with width parameter
assert_type(
link_button("Link", "https://example.com", width="content"), DeltaGenerator
)
assert_type(
link_button("Link", "https://example.com", width="stretch"), DeltaGenerator
)
assert_type(link_button("Link", "https://example.com", width=250), DeltaGenerator)
# Link button with shortcut parameter
assert_type(
link_button("Link", "https://example.com", shortcut="L"), DeltaGenerator
)
assert_type(
link_button("Link", "https://example.com", shortcut="Ctrl+L"), DeltaGenerator
)
assert_type(
link_button("Link", "https://example.com", shortcut=None), DeltaGenerator
)
# Link button with on_click parameter - supports "rerun", "ignore", or callable
assert_type(
link_button("Link", "https://example.com", on_click="ignore"), DeltaGenerator
)
assert_type(link_button("Link", "https://example.com", on_click="rerun"), bool)
assert_type(link_button("Link", "https://example.com", on_click=my_callback), bool)
assert_type(
link_button(
"Link",
"https://example.com",
on_click=callback_with_args,
args=(1, "a"),
),
bool,
)
assert_type(
link_button(
"Link",
"https://example.com",
on_click=callback_with_args,
kwargs={"x": 1, "y": "a"},
),
bool,
)
# Link button with all parameters combined
assert_type(
link_button(
"Full link",
"https://streamlit.io",
key="full_link",
on_click=my_callback,
args=None,
kwargs=None,
help="Visit Streamlit",
type="primary",
icon="🚀",
disabled=False,
width="stretch",
shortcut="Ctrl+Shift+S",
),
bool,
)
# =====================================================================
# st.page_link return type tests
# =====================================================================
# Basic page link - returns DeltaGenerator
# page parameter accepts str, Path, or StreamlitPage
assert_type(page_link("pages/page1.py"), DeltaGenerator)
assert_type(page_link(Path("pages/page1.py")), DeltaGenerator)
assert_type(page_link("https://example.com", label="External"), DeltaGenerator)
# Page link with StreamlitPage object
streamlit_page = Page("page1.py")
assert_type(page_link(streamlit_page), DeltaGenerator)
# Page link with label
assert_type(page_link("pages/page1.py", label="Page 1"), DeltaGenerator)
assert_type(page_link("pages/page1.py", label=None), DeltaGenerator)
# Page link with icon
assert_type(page_link("pages/page1.py", icon="📄"), DeltaGenerator)
assert_type(page_link("pages/page1.py", icon=":material/article:"), DeltaGenerator)
assert_type(page_link("pages/page1.py", icon=None), DeltaGenerator)
# Page link with help
assert_type(page_link("pages/page1.py", help="Go to page 1"), DeltaGenerator)
assert_type(page_link("pages/page1.py", help=None), DeltaGenerator)
# Page link with disabled
assert_type(page_link("pages/page1.py", disabled=True), DeltaGenerator)
assert_type(page_link("pages/page1.py", disabled=False), DeltaGenerator)
# Page link with width
assert_type(page_link("pages/page1.py", width="content"), DeltaGenerator)
assert_type(page_link("pages/page1.py", width="stretch"), DeltaGenerator)
assert_type(page_link("pages/page1.py", width=200), DeltaGenerator)
# Page link with query_params
assert_type(
page_link("pages/page1.py", query_params={"key": "value"}), DeltaGenerator
)
assert_type(
page_link("pages/page1.py", query_params=[("key", "value")]), DeltaGenerator
)
assert_type(page_link("pages/page1.py", query_params=None), DeltaGenerator)
# Page link with all parameters combined
assert_type(
page_link(
"pages/page1.py",
label="Page 1",
icon="📄",
help="Navigate to page 1",
disabled=False,
width="stretch",
query_params={"utm_source": "app"},
),
DeltaGenerator,
)
# =====================================================================
# st.form_submit_button return type tests
# =====================================================================
# Basic form submit button - returns bool
assert_type(form_submit_button(), bool)
assert_type(form_submit_button("Submit"), bool)
# Form submit button with key
assert_type(form_submit_button("Submit", key="submit_key"), bool)
assert_type(form_submit_button("Submit", key=123), bool)
assert_type(form_submit_button("Submit", key=None), bool)
# Form submit button with help
assert_type(form_submit_button("Submit", help="Click to submit"), bool)
assert_type(form_submit_button("Submit", help=None), bool)
# Form submit button with type parameter
assert_type(form_submit_button("Submit", type="primary"), bool)
assert_type(form_submit_button("Submit", type="secondary"), bool)
assert_type(form_submit_button("Submit", type="tertiary"), bool)
# Form submit button with icon
assert_type(form_submit_button("Submit", icon="✅"), bool)
assert_type(form_submit_button("Submit", icon=":material/send:"), bool)
assert_type(form_submit_button("Submit", icon=None), bool)
# Form submit button with disabled
assert_type(form_submit_button("Submit", disabled=True), bool)
assert_type(form_submit_button("Submit", disabled=False), bool)
# Form submit button with width
assert_type(form_submit_button("Submit", width="content"), bool)
assert_type(form_submit_button("Submit", width="stretch"), bool)
assert_type(form_submit_button("Submit", width=150), bool)
# Form submit button with shortcut
assert_type(form_submit_button("Submit", shortcut="Enter"), bool)
assert_type(form_submit_button("Submit", shortcut="Ctrl+Enter"), bool)
assert_type(form_submit_button("Submit", shortcut=None), bool)
# Form submit button with on_click callback
assert_type(form_submit_button("Submit", on_click=my_callback), bool)
assert_type(
form_submit_button("Submit", on_click=callback_with_args, args=(1, "a")), bool
)
assert_type(
form_submit_button(
"Submit", on_click=callback_with_args, kwargs={"x": 1, "y": "a"}
),
bool,
)
assert_type(form_submit_button("Submit", on_click=None), bool)
# Form submit button with all parameters combined
assert_type(
form_submit_button(
"Full Submit",
help="Submit the form",
on_click=my_callback,
args=None,
kwargs=None,
key="full_submit",
type="primary",
icon="🚀",
disabled=False,
width="stretch",
shortcut="Ctrl+Enter",
),
bool,
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/button_types.py",
"license": "Apache License 2.0",
"lines": 366,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/web_server.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Streamlit app for testing server endpoints and behavior."""
import base64
import streamlit as st
st.header("Server Endpoint Tests")
# Basic content to verify app loads
st.markdown("App loaded successfully.")
# File uploader for testing upload endpoint
st.subheader("File Upload")
uploaded_file = st.file_uploader("Upload a file", key="test_uploader")
if uploaded_file is not None:
st.text(f"Uploaded: {uploaded_file.name}")
st.text(f"Size: {uploaded_file.size} bytes")
# Image for testing media endpoint - using bytes triggers the media endpoint.
st.subheader("Image (Media Endpoint)")
# Create a simple 1x1 red pixel PNG for testing.
RED_PIXEL_PNG = base64.b64decode(
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg=="
)
st.image(RED_PIXEL_PNG, caption="Test image")
# Download button for testing downloadable media
st.subheader("Download Button")
st.download_button(
label="Download test file",
data=b"Test content for download",
file_name="test_download.txt",
mime="text/plain",
key="test_download",
)
# Display session info.
# Setting session state here also ensures cache_memory_bytes metrics are available
# for the metrics endpoint tests to verify filtering works correctly.
st.subheader("Session Info")
if "counter" not in st.session_state:
st.session_state.counter = 0
if st.button("Increment counter"):
st.session_state.counter += 1
st.markdown(f"Counter: {st.session_state.counter}")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/web_server.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/web_server_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
E2E tests for server endpoints and behavior.
These tests verify expected server behavior covering:
- Health endpoints (/_stcore/health, /_stcore/script-health-check)
- Metrics endpoint (/_stcore/metrics)
- Host config endpoint (/_stcore/host-config)
- Media endpoint with range requests (/media/*)
- File upload endpoint (/_stcore/upload_file/*)
- CORS headers
- XSRF cookie handling
- Static file serving (/app/static/*)
"""
from __future__ import annotations
from urllib import parse
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import build_app_url, wait_for_app_loaded, wait_for_app_run
from e2e_playwright.shared.app_utils import click_button
def _app_ws_url(app_base_url: str, path: str) -> str:
http_url = build_app_url(app_base_url, path=path)
split_url = parse.urlsplit(http_url)
scheme = "wss" if split_url.scheme == "https" else "ws"
return parse.urlunsplit(
(scheme, split_url.netloc, split_url.path, split_url.query, "")
)
# =============================================================================
# Health Endpoint Tests
# =============================================================================
def test_health_endpoint_returns_ok(app: Page, app_base_url: str):
"""Test that /_stcore/health returns 'ok' when app is healthy."""
response = app.request.get(build_app_url(app_base_url, path="/_stcore/health"))
expect(response).to_be_ok()
assert response.status == 200
assert response.text() == "ok"
def test_health_endpoint_has_no_cache_header(app: Page, app_base_url: str):
"""Test that health endpoint sets Cache-Control: no-cache."""
response = app.request.get(build_app_url(app_base_url, path="/_stcore/health"))
expect(response).to_be_ok()
assert response.headers.get("cache-control") == "no-cache"
def test_health_endpoint_supports_head_method(app: Page, app_base_url: str):
"""Test that health endpoint supports HEAD method for monitoring services."""
response = app.request.head(build_app_url(app_base_url, path="/_stcore/health"))
expect(response).to_be_ok()
assert response.status == 200
def test_health_endpoint_supports_options_for_cors(app: Page, app_base_url: str):
"""Test that health endpoint handles OPTIONS requests for CORS preflight."""
response = app.request.fetch(
build_app_url(app_base_url, path="/_stcore/health"),
method="OPTIONS",
)
# OPTIONS should return 204 No Content
assert response.status == 204
def test_script_health_endpoint_returns_ok(app: Page, app_base_url: str):
"""Test that /_stcore/script-health-check returns 'ok' for valid script."""
response = app.request.get(
build_app_url(app_base_url, path="/_stcore/script-health-check")
)
expect(response).to_be_ok()
assert response.status == 200
# The response should indicate script ran successfully.
assert "ok" in response.text().lower()
# =============================================================================
# Metrics Endpoint Tests
# =============================================================================
def test_metrics_endpoint_returns_valid_response(app: Page, app_base_url: str):
"""Test that /_stcore/metrics returns metrics in openmetrics format."""
response = app.request.get(build_app_url(app_base_url, path="/_stcore/metrics"))
expect(response).to_be_ok()
assert response.status == 200
# Should have openmetrics content type
content_type = response.headers.get("content-type", "")
assert "openmetrics" in content_type or "text/plain" in content_type
# Response should contain metric data (non-empty)
assert len(response.text()) > 0
def test_metrics_endpoint_accepts_protobuf(app: Page, app_base_url: str):
"""Test that metrics endpoint can return protobuf when requested."""
response = app.request.get(
build_app_url(app_base_url, path="/_stcore/metrics"),
headers={"Accept": "application/x-protobuf"},
)
expect(response).to_be_ok()
assert response.status == 200
# Should return protobuf content type
content_type = response.headers.get("content-type", "")
assert "protobuf" in content_type
def test_metrics_endpoint_filters_by_family_cache_memory(app: Page, app_base_url: str):
"""Test that metrics endpoint filters results by families query parameter.
When requesting families=cache_memory_bytes, only cache memory metrics
should be returned. The web_server.py app initializes session state to
ensure these metrics are always present.
"""
wait_for_app_loaded(app)
response = app.request.get(
build_app_url(
app_base_url, path="/_stcore/metrics", query="families=cache_memory_bytes"
)
)
expect(response).to_be_ok()
assert response.status == 200
text = response.text()
# Should contain cache_memory_bytes metrics (guaranteed by session state in web_server.py)
assert "cache_memory_bytes" in text
# Should NOT contain session_events_total or active_sessions metrics
assert "session_events_total" not in text
assert "active_sessions" not in text
def test_metrics_endpoint_filters_by_family_session_events(
app: Page, app_base_url: str
):
"""Test that metrics endpoint returns session_events_total when requested.
Session events metrics track connections, reconnections, and disconnections.
"""
response = app.request.get(
build_app_url(
app_base_url, path="/_stcore/metrics", query="families=session_events_total"
)
)
expect(response).to_be_ok()
assert response.status == 200
text = response.text()
# Should contain session_events_total metrics
assert "session_events_total" in text
# Should NOT contain cache_memory_bytes or active_sessions metrics
assert "cache_memory_bytes" not in text
assert "active_sessions" not in text
def test_metrics_endpoint_filters_by_family_active_sessions(
app: Page, app_base_url: str
):
"""Test that metrics endpoint returns active_sessions when requested.
Active sessions metrics track the current number of connected sessions.
"""
response = app.request.get(
build_app_url(
app_base_url, path="/_stcore/metrics", query="families=active_sessions"
)
)
expect(response).to_be_ok()
assert response.status == 200
text = response.text()
# Should contain active_sessions metrics
assert "active_sessions" in text
# Should NOT contain cache_memory_bytes or session_events_total metrics
assert "cache_memory_bytes" not in text
assert "session_events_total" not in text
def test_metrics_endpoint_filters_by_multiple_families(app: Page, app_base_url: str):
"""Test that metrics endpoint supports filtering by multiple families.
Multiple families query params should return metrics for all requested families.
"""
response = app.request.get(
build_app_url(
app_base_url,
path="/_stcore/metrics",
query="families=session_events_total&families=active_sessions",
)
)
expect(response).to_be_ok()
assert response.status == 200
text = response.text()
# Should contain both session_events_total and active_sessions metrics
assert "session_events_total" in text
assert "active_sessions" in text
# Should NOT contain cache_memory_bytes metrics
assert "cache_memory_bytes" not in text
def test_metrics_endpoint_unknown_family_returns_empty(app: Page, app_base_url: str):
"""Test that metrics endpoint returns empty response for unknown families.
When requesting a family that doesn't exist, the response should contain
only the EOF marker.
"""
response = app.request.get(
build_app_url(
app_base_url, path="/_stcore/metrics", query="families=unknown_family"
)
)
expect(response).to_be_ok()
assert response.status == 200
text = response.text()
# Should only contain the EOF marker
assert text.strip() == "# EOF"
def test_metrics_endpoint_no_filter_returns_all_families(app: Page, app_base_url: str):
"""Test that metrics endpoint without filter returns all metric families.
When no families query param is provided, all available metrics should be returned.
The web_server.py app initializes session state to ensure cache_memory_bytes
metrics are always present.
"""
wait_for_app_loaded(app)
response = app.request.get(build_app_url(app_base_url, path="/_stcore/metrics"))
expect(response).to_be_ok()
assert response.status == 200
text = response.text()
# Should contain metrics from all families
# cache_memory_bytes is guaranteed by session state initialization in web_server.py
assert "cache_memory_bytes" in text
assert "session_events_total" in text
assert "active_sessions" in text
# =============================================================================
# Host Config Endpoint Tests
# =============================================================================
def test_host_config_endpoint_returns_json(app: Page, app_base_url: str):
"""Test that /_stcore/host-config returns valid JSON configuration."""
response = app.request.get(build_app_url(app_base_url, path="/_stcore/host-config"))
expect(response).to_be_ok()
assert response.status == 200
config = response.json()
# Verify expected fields exist
assert "allowedOrigins" in config
assert isinstance(config["allowedOrigins"], list)
assert "useExternalAuthToken" in config
assert "enableCustomParentMessages" in config
def test_host_config_has_no_cache_header(app: Page, app_base_url: str):
"""Test that host-config endpoint sets Cache-Control: no-cache."""
response = app.request.get(build_app_url(app_base_url, path="/_stcore/host-config"))
expect(response).to_be_ok()
assert response.headers.get("cache-control") == "no-cache"
# =============================================================================
# CORS Header Tests
# =============================================================================
def test_cors_options_preflight_returns_204(app: Page, app_base_url: str):
"""Test that OPTIONS preflight requests return 204 No Content."""
split_base_url = parse.urlsplit(app_base_url)
request_origin = f"{split_base_url.scheme}://{split_base_url.netloc}"
response = app.request.fetch(
build_app_url(app_base_url, path="/_stcore/health"),
method="OPTIONS",
headers={"Origin": request_origin},
)
# OPTIONS should return 204 No Content
assert response.status == 204
# =============================================================================
# Media Endpoint Tests
# =============================================================================
def _get_media_url_from_image(app: Page, app_base_url: str) -> str | None:
"""Helper to get the media URL from an image element.
Parameters
----------
app : Page
The Playwright page object.
app_base_url : str
The base URL where the app is running.
Returns
-------
str or None
The full media URL if found, or None if no image with a media URL exists.
"""
wait_for_app_loaded(app)
# Get the image element
image = app.get_by_test_id("stImage").locator("img").first
expect(image).to_be_visible()
# Get the src attribute
src = image.get_attribute("src")
if src and "/media/" in src:
# Make it a full URL if it's relative
if src.startswith("/"):
return build_app_url(app_base_url, path=src)
return src
return None
def test_media_endpoint_serves_image_content(app: Page, app_base_url: str):
"""Test that media endpoint correctly serves image content."""
media_url = _get_media_url_from_image(app, app_base_url)
assert media_url is not None
# Fetch the media content directly
response = app.request.get(media_url)
expect(response).to_be_ok()
assert response.status == 200
# Should have a content-type header
content_type = response.headers.get("content-type")
assert content_type is not None
assert "image" in content_type
def test_media_endpoint_supports_range_requests(app: Page, app_base_url: str):
"""Test that media endpoint supports Accept-Ranges header for streaming."""
media_url = _get_media_url_from_image(app, app_base_url)
assert media_url is not None
response = app.request.get(media_url)
expect(response).to_be_ok()
# Should indicate range requests are supported
accept_ranges = response.headers.get("accept-ranges")
assert accept_ranges == "bytes"
def test_media_endpoint_handles_range_request(app: Page, app_base_url: str):
"""Test that media endpoint correctly handles Range header requests."""
media_url = _get_media_url_from_image(app, app_base_url)
assert media_url is not None
# Make a range request for first 10 bytes
response = app.request.get(media_url, headers={"Range": "bytes=0-9"})
# Should return 206 Partial Content
assert response.status == 206
# Should have Content-Range header
content_range = response.headers.get("content-range")
assert content_range is not None
assert content_range.startswith("bytes 0-9/")
# Should return exactly 10 bytes
assert len(response.body()) == 10
def test_media_endpoint_returns_404_for_invalid_file(app: Page, app_base_url: str):
"""Test that media endpoint returns 404 for non-existent files."""
response = app.request.get(
build_app_url(app_base_url, path="/media/nonexistent-file-id.txt")
)
assert response.status == 404
# =============================================================================
# File Upload Endpoint Tests
# =============================================================================
def test_upload_endpoint_options_returns_cors_headers(app: Page, app_base_url: str):
"""Test that upload endpoint OPTIONS request returns CORS headers."""
response = app.request.fetch(
build_app_url(app_base_url, path="/_stcore/upload_file/test-session/test-file"),
method="OPTIONS",
)
# OPTIONS should return 204
assert response.status == 204
# Should have CORS headers
assert "access-control-allow-methods" in response.headers
assert "PUT" in response.headers.get("access-control-allow-methods", "")
assert "DELETE" in response.headers.get("access-control-allow-methods", "")
def test_upload_endpoint_rejects_invalid_session(app: Page, app_base_url: str):
"""Test that upload endpoint rejects uploads with invalid session_id.
When XSRF protection is enabled (default), the server will return 403 Forbidden
for requests without a valid XSRF token before even checking the session.
When XSRF is disabled, it returns 400 Bad Request for invalid sessions.
"""
wait_for_app_loaded(app)
# Try to upload with an invalid session ID
response = app.request.put(
build_app_url(
app_base_url, path="/_stcore/upload_file/invalid-session-id/test-file"
),
multipart={
"file": {
"name": "test.txt",
"mimeType": "text/plain",
"buffer": b"test content",
}
},
)
# Should return 400 (invalid session) or 403 (XSRF protection)
assert response.status in {400, 403}, f"Expected 400 or 403, got {response.status}"
def test_upload_delete_endpoint(app: Page, app_base_url: str):
"""Test that upload DELETE endpoint responds correctly.
When XSRF protection is enabled (default), the server will return 403 Forbidden
for requests without a valid XSRF token.
When XSRF is disabled, it returns 204 No Content for DELETE operations.
"""
wait_for_app_loaded(app)
# DELETE on a non-existent file
response = app.request.delete(
build_app_url(app_base_url, path="/_stcore/upload_file/any-session/any-file")
)
# Should return 204 (success) or 403 (XSRF protection)
assert response.status in {204, 403}, f"Expected 204 or 403, got {response.status}"
# =============================================================================
# XSRF Cookie Tests
# =============================================================================
def test_xsrf_cookie_format(app: Page):
"""Test that XSRF cookie is set with expected format."""
wait_for_app_loaded(app)
# Get cookies from the page context
cookies = app.context.cookies()
# Find the XSRF cookie
xsrf_cookie = None
for cookie in cookies:
if cookie["name"] == "_streamlit_xsrf":
xsrf_cookie = cookie
break
assert xsrf_cookie is not None
# Cookie should have a value.
assert len(xsrf_cookie["value"]) > 0
# Cookie should have SameSite=Lax (or "None" in Firefox due to Playwright reporting differences).
# The key security property is that the cookie exists and has a value.
same_site = xsrf_cookie.get("sameSite")
assert same_site in {"Lax", "None"}, (
f"Expected SameSite 'Lax' or 'None', got: {same_site}"
)
# =============================================================================
# Static File Endpoint Tests
# =============================================================================
def test_frontend_static_files_served(app: Page, app_base_url: str):
"""Test that frontend static files (JS, CSS) are served correctly."""
# Request the main page
response = app.request.get(build_app_url(app_base_url, path="/"))
expect(response).to_be_ok()
assert response.status == 200
# Should return HTML
content_type = response.headers.get("content-type", "")
assert "text/html" in content_type
def test_frontend_static_files_have_cache_headers(app: Page, app_base_url: str):
"""Test that index.html has no-cache but assets have long cache."""
# Index should have no-cache
response = app.request.get(build_app_url(app_base_url, path="/"))
expect(response).to_be_ok()
cache_control = response.headers.get("cache-control", "")
assert "no-cache" in cache_control
def test_nonexistent_route_returns_index(app: Page, app_base_url: str):
"""Test that non-existent routes return index.html (SPA behavior)."""
response = app.request.get(build_app_url(app_base_url, path="/nonexistent-page"))
# Should still return 200 and serve the SPA
expect(response).to_be_ok()
assert response.status == 200
content_type = response.headers.get("content-type", "")
assert "text/html" in content_type
# =============================================================================
# Slash Redirect Tests
# =============================================================================
def test_trailing_slash_redirect_on_static_paths(app: Page, app_base_url: str):
"""Test that trailing slashes on static paths are handled correctly.
The server should either:
- Redirect /path/ to /path (RemoveSlashHandler behavior)
- Or serve the content directly
This is important for consistent URL handling and avoiding duplicate content.
"""
# Request a path with trailing slash
response = app.request.get(
build_app_url(app_base_url, path="/some-path/"),
max_redirects=0, # Don't follow redirects to see the redirect response
)
# Tornado uses 301 (permanent redirect) via @removeslash decorator
assert response.status == 301, f"Expected 301, got {response.status}"
# Should redirect to path without trailing slash.
location = response.headers.get("location", "")
assert location, "Redirect location header is empty"
assert not location.endswith("/") or location == "/", (
f"Redirect should remove trailing slash, got: {location}"
)
def test_base_url_without_trailing_slash(app: Page, app_base_url: str):
"""Test that base URL without trailing slash is handled correctly.
The server should either:
- Redirect / to /
- Or serve content directly
"""
# The root path should work (may redirect to add trailing slash or serve directly)
response = app.request.get(app_base_url.rstrip("/"))
# Should succeed (possibly after redirect)
expect(response).to_be_ok()
assert response.status == 200
def test_double_slash_not_redirected_to_external(app: Page, app_base_url: str):
"""Test that double slashes don't cause redirect to external host.
A path like //example.com could be misinterpreted as a protocol-relative URL.
The path security middleware blocks these paths with 400 Bad Request.
"""
# Request with double slash at start
response = app.request.get(
build_app_url(app_base_url, path="//some-path"),
max_redirects=0,
)
# Should be blocked with 400 Bad Request by PathSecurityMiddleware
assert response.status == 400, f"Expected 400, got {response.status}"
# =============================================================================
# WebSocket Endpoint Tests
# =============================================================================
def test_websocket_connection_to_stream_endpoint(app: Page):
"""Test that WebSocket connects to the correct stream endpoint.
The frontend establishes a WebSocket connection to /_stcore/stream
using the Sec-WebSocket-Protocol header.
"""
from playwright.sync_api import WebSocket
# Capture WebSocket connections
ws_connections: list[WebSocket] = []
def capture_ws(ws: WebSocket) -> None:
ws_connections.append(ws)
# Note: We need to register the handler before navigation, but the app fixture
# already navigated. So we reload to capture the WebSocket connection.
app.on("websocket", capture_ws)
app.reload()
wait_for_app_loaded(app)
# Verify WebSocket connection was established
assert len(ws_connections) > 0, "No WebSocket connection established"
# Verify it connected to the correct endpoint
ws = ws_connections[0]
assert "/_stcore/stream" in ws.url, (
f"WebSocket URL should contain /_stcore/stream, got: {ws.url}"
)
def test_direct_websocket_connection_with_subprotocol(app: Page, app_base_url: str):
"""Test direct WebSocket connection with Sec-WebSocket-Protocol header.
This verifies the server correctly handles the subprotocol negotiation.
Uses browser's native WebSocket API via page.evaluate() to avoid the complexity
of Python async WebSocket libraries conflicting with Playwright's event loop.
"""
# The app fixture automatically navigates and waits for the app to load.
ws_url = _app_ws_url(app_base_url, path="/_stcore/stream")
result = app.evaluate(
"""
(url) => new Promise((resolve, reject) => {
const ws = new WebSocket(url, ['streamlit']);
ws.onopen = () => {
resolve(ws.protocol);
ws.close();
};
ws.onerror = () => reject('connection failed');
setTimeout(() => reject('timeout'), 5000);
})
""",
ws_url,
)
assert result == "streamlit", f"Expected 'streamlit' subprotocol, got: {result}"
def test_direct_websocket_with_session_id_in_subprotocol(app: Page, app_base_url: str):
"""Test that server accepts session ID in Sec-WebSocket-Protocol for reconnection.
This verifies the server correctly parses the third entry (session ID)
from the Sec-WebSocket-Protocol header.
Uses browser's native WebSocket API via page.evaluate() to avoid the complexity
of Python async WebSocket libraries conflicting with Playwright's event loop.
"""
wait_for_app_loaded(app)
# Get the current session ID from the app (stored in sessionStorage)
session_id = app.evaluate("window.sessionStorage.getItem('stStreamlitSessionId')")
# If no session ID in storage, use a test session ID
if session_id is None:
session_id = "test-session-id-12345"
# Subprotocol entries: protocol name, XSRF token placeholder, session ID
ws_url = _app_ws_url(app_base_url, path="/_stcore/stream")
result = app.evaluate(
"""
([url, sessionId]) => new Promise((resolve, reject) => {
const ws = new WebSocket(
url,
['streamlit', 'placeholder', sessionId]
);
ws.onopen = () => {
resolve({ connected: true, protocol: ws.protocol });
ws.close();
};
ws.onerror = () => resolve({ connected: false, error: 'connection failed' });
setTimeout(() => resolve({ connected: false, error: 'timeout' }), 5000);
})
""",
[ws_url, session_id],
)
assert result["connected"], f"WebSocket connection failed: {result.get('error')}"
assert result["protocol"] == "streamlit", (
f"Expected 'streamlit' subprotocol, got: {result['protocol']}"
)
def test_websocket_reconnection_preserves_state(app: Page):
"""Test that WebSocket reconnection preserves session state.
This test verifies that the session ID passed via Sec-WebSocket-Protocol
(third entry) is correctly parsed by the server, allowing session
reconnection without losing state.
"""
wait_for_app_loaded(app)
# Increment counter.
click_button(app, "Increment counter")
wait_for_app_run(app)
# Verify counter is 1.
expect(app.get_by_text("Counter: 1")).to_be_visible()
# Disconnect WebSocket using debug command.
app.evaluate("window.streamlitDebug.disconnectWebsocket();")
# Wait for reconnection (status widget should appear and disappear).
expect(app.get_by_test_id("stStatusWidget")).to_be_visible()
expect(app.get_by_test_id("stStatusWidget")).not_to_be_attached(timeout=10000)
# Counter should still be 1 after reconnection.
# This proves the server correctly parsed the session ID from
# the third entry of Sec-WebSocket-Protocol header.
expect(app.get_by_text("Counter: 1")).to_be_visible()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/web_server_test.py",
"license": "Apache License 2.0",
"lines": 558,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/cursor_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from unittest.mock import MagicMock, patch
import pytest
from streamlit.cursor import (
LockedCursor,
RunningCursor,
SparseList,
get_container_cursor,
make_delta_path,
)
from streamlit.proto.RootContainer_pb2 import RootContainer
class TestSparseList:
def test_set_and_get_item(self) -> None:
"""Test setting and getting items in SparseList."""
sl = SparseList()
sl[0] = "a"
sl[2] = "c"
assert sl[0] == "a"
assert sl[2] == "c"
with pytest.raises(KeyError):
_ = sl[1]
def test_set_invalid_index(self):
"""Test setting invalid indices raises IndexError."""
sl = SparseList()
with pytest.raises(IndexError):
sl[-1] = "fail"
with pytest.raises(IndexError):
sl["not_int"] = "fail" # type: ignore
def test_del_item(self):
"""Test deleting items."""
sl = SparseList()
sl[0] = "a"
del sl[0]
with pytest.raises(KeyError):
_ = sl[0]
with pytest.raises(KeyError):
del sl[1]
def test_len(self):
"""Test length of SparseList."""
sl = SparseList()
sl[0] = "a"
sl[10] = "b"
assert len(sl) == 2
def test_iteration(self):
"""Test iteration over SparseList."""
sl = SparseList()
sl[2] = "c"
sl[0] = "a"
sl[1] = "b"
assert list(sl) == ["a", "b", "c"]
assert list(sl.items()) == [(0, "a"), (1, "b"), (2, "c")]
def test_contains(self):
"""Test __contains__."""
sl = SparseList()
sl[0] = "a"
assert 0 in sl
assert 1 not in sl
def test_repr(self):
"""Test __repr__."""
sl = SparseList()
sl[0] = "a"
sl[2] = "c"
assert repr(sl) == "SparseList({0: a, 2: c})"
class TestCursorFunctions:
def test_make_delta_path(self):
"""Test make_delta_path."""
path = make_delta_path(RootContainer.MAIN, (1, 2), 3)
assert path == [RootContainer.MAIN, 1, 2, 3]
@patch("streamlit.cursor.get_script_run_ctx")
def test_get_container_cursor_no_ctx(self, mock_get_ctx):
"""Test get_container_cursor when no context exists."""
mock_get_ctx.return_value = None
cursor = get_container_cursor(RootContainer.MAIN)
assert cursor is None
def test_get_container_cursor_none_root(self):
"""Test get_container_cursor with None root."""
cursor = get_container_cursor(None)
assert cursor is None
@patch("streamlit.cursor.get_script_run_ctx")
def test_get_container_cursor_creates_new(self, mock_get_ctx):
"""Test get_container_cursor creates a new cursor if not present."""
mock_ctx = MagicMock()
mock_ctx.cursors = {}
mock_get_ctx.return_value = mock_ctx
cursor = get_container_cursor(RootContainer.MAIN)
assert isinstance(cursor, RunningCursor)
assert cursor.root_container == RootContainer.MAIN
assert RootContainer.MAIN in mock_ctx.cursors
assert mock_ctx.cursors[RootContainer.MAIN] == cursor
@patch("streamlit.cursor.get_script_run_ctx")
def test_get_container_cursor_returns_existing(self, mock_get_ctx):
"""Test get_container_cursor returns existing cursor."""
mock_ctx = MagicMock()
existing_cursor = RunningCursor(RootContainer.MAIN)
mock_ctx.cursors = {RootContainer.MAIN: existing_cursor}
mock_get_ctx.return_value = mock_ctx
cursor = get_container_cursor(RootContainer.MAIN)
assert cursor == existing_cursor
class TestRunningCursor:
def test_initialization(self):
"""Test initialization of RunningCursor."""
cursor = RunningCursor(RootContainer.MAIN, (1, 2))
assert cursor.root_container == RootContainer.MAIN
assert cursor.parent_path == (1, 2)
assert cursor.index == 0
assert cursor.delta_path == [RootContainer.MAIN, 1, 2, 0]
assert not cursor.is_locked
assert len(cursor.transient_elements) == 0
def test_get_locked_cursor(self):
"""Test get_locked_cursor from RunningCursor."""
cursor = RunningCursor(RootContainer.MAIN)
# First lock
locked1 = cursor.get_locked_cursor(foo="bar")
assert isinstance(locked1, LockedCursor)
assert locked1.index == 0
assert locked1.props == {"foo": "bar"}
assert cursor.index == 1
# Second lock
locked2 = cursor.get_locked_cursor()
assert locked2.index == 1
assert cursor.index == 2
def test_get_transient_cursor(self):
"""Test get_transient_cursor from RunningCursor."""
cursor = RunningCursor(RootContainer.MAIN)
# First transient
t1 = cursor.get_transient_cursor()
assert t1 == cursor
assert cursor.transient_index == 0
# Second transient
cursor.get_transient_cursor()
assert cursor.transient_index == 1
def test_locked_cursor_resets_transient(self):
"""Test that get_locked_cursor resets transient state."""
cursor = RunningCursor(RootContainer.MAIN)
cursor.get_transient_cursor()
cursor.transient_elements[0] = "element" # Simulate adding element
assert cursor.transient_index == 0
assert len(cursor.transient_elements) == 1
cursor.get_locked_cursor()
# Should be reset
assert cursor.transient_index == 0
assert len(cursor.transient_elements) == 0
class TestLockedCursor:
def test_initialization(self):
"""Test initialization of LockedCursor."""
cursor = LockedCursor(RootContainer.MAIN, (1,), 5, foo="bar")
assert cursor.root_container == RootContainer.MAIN
assert cursor.parent_path == (1,)
assert cursor.index == 5
assert cursor.is_locked
assert cursor.props == {"foo": "bar"}
def test_get_locked_cursor(self):
"""Test get_locked_cursor from LockedCursor."""
cursor = LockedCursor(RootContainer.MAIN, index=5)
locked = cursor.get_locked_cursor(new_prop="value")
assert locked == cursor
assert cursor.index == 5 # Index doesn't change
assert cursor.props == {"new_prop": "value"}
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/cursor_test.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/st_navigation.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Literal
import streamlit as st
def a():
st.header("Header A")
def b():
st.header("Header B")
position: Literal["sidebar", "top", "hidden"] = st.radio(
"Position", ["sidebar", "top", "hidden"]
)
st.sidebar.header("Sidebar Header")
st.navigation((a, b), position=position).run()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_navigation.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/st_navigation_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import wait_for_app_run
from e2e_playwright.shared.app_utils import select_radio_option
def test_sidebar_navigation_mode_shows_sidebar_nav_only(app: Page) -> None:
"""Sidebar position: sidebar navigation is visible, top navigation is not."""
# Sidebar navigation should be visible by default (position='sidebar').
expect(app.get_by_test_id("stSidebar")).to_be_visible()
expect(app.get_by_test_id("stSidebarNav")).to_be_visible()
# Sidebar navigation should have links for both pages.
sidebar_nav_links = app.get_by_test_id("stSidebarNavLink")
expect(sidebar_nav_links).to_have_count(2)
expect(sidebar_nav_links.first).to_be_visible()
# Clicking the second page should change the header from A to B.
sidebar_nav_links.nth(1).click()
wait_for_app_run(app)
expect(app.get_by_test_id("stHeading").filter(has_text="Header B")).to_be_visible()
# Top navigation should not be visible in sidebar mode.
expect(app.get_by_test_id("stTopNavLink")).not_to_be_visible()
def test_top_navigation_mode_shows_top_nav_only(app: Page) -> None:
"""Top position: top navigation is visible, sidebar navigation is hidden."""
# Switch navigation position to top.
select_radio_option(app, option="top", label="Position")
# Sidebar container still exists (there is user content in the sidebar),
# but the sidebar navigation menu should be hidden.
expect(app.get_by_test_id("stSidebar")).to_be_visible()
expect(app.get_by_test_id("stSidebarNav")).not_to_be_visible()
# Top navigation should be visible with links for both pages.
nav_links = app.get_by_test_id("stTopNavLink")
expect(nav_links).to_have_count(2)
expect(nav_links.first).to_be_visible()
# Clicking the second page should change the header from A to B.
nav_links.nth(1).click()
wait_for_app_run(app)
expect(app.get_by_test_id("stHeading").filter(has_text="Header B")).to_be_visible()
def test_hidden_navigation_mode_hides_both_navs(app: Page) -> None:
"""Hidden position: both sidebar and top navigation UIs are hidden."""
# Switch navigation position to hidden.
select_radio_option(app, option="hidden", label="Position")
# Sidebar container still exists because the app adds sidebar content,
# but there should be no navigation menu in sidebar or header.
expect(app.get_by_test_id("stSidebar")).to_be_visible()
expect(app.get_by_test_id("stSidebarNav")).not_to_be_visible()
expect(app.get_by_test_id("stTopNavLink")).not_to_be_visible()
# The app should still show content from the current page.
expect(app.get_by_test_id("stHeading").filter(has_text="Header A")).to_be_visible()
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_navigation_test.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/time_input_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
if TYPE_CHECKING:
from datetime import datetime, time, timedelta
from streamlit.elements.widgets.time_widgets import TimeWidgetsMixin
time_input = TimeWidgetsMixin().time_input
# Test default return type
assert_type(time_input("label"), time)
# Test positional args
assert_type(time_input("label", "now"), time)
assert_type(time_input("label", None), time | None)
# Test value types
assert_type(time_input("label", value="now"), time)
assert_type(time_input("label", value=time(12, 0)), time)
assert_type(time_input("label", value=datetime(2020, 1, 1, 12, 0)), time)
assert_type(time_input("label", value="12:00"), time)
# Test None value
assert_type(time_input("label", value=None), time | None)
# Test optional value
v: time | datetime | str | None = None
assert_type(time_input("label", value=v), time | None)
# Test kwargs
assert_type(time_input("label", step=60), time)
assert_type(time_input("label", step=timedelta(minutes=15)), time)
assert_type(time_input("label", disabled=True), time)
assert_type(time_input("label", label_visibility="hidden"), time)
assert_type(time_input("label", help="help"), time)
assert_type(time_input("label", key="foo"), time)
assert_type(time_input("label", key=123), time)
assert_type(time_input("label", on_change=lambda: None), time)
assert_type(time_input("label", args=("arg",)), time)
assert_type(time_input("label", kwargs={"k": "v"}), time)
assert_type(time_input("label", width="stretch"), time)
assert_type(time_input("label", width=100), time)
# Test with bind parameter
assert_type(
time_input("label", time(12, 0), key="my_key", bind="query-params"), time
)
assert_type(
time_input("label", value=None, key="my_key", bind="query-params"),
time | None,
)
assert_type(time_input("label", time(12, 0), key="my_key", bind=None), time)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/time_input_types.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/elements/lib/shortcut_utils.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import (
Final,
)
from streamlit.errors import (
StreamlitAPIException,
)
_MODIFIER_ALIASES: Final[dict[str, str]] = {
"ctrl": "ctrl",
"control": "ctrl",
"cmd": "cmd",
"command": "cmd",
"meta": "cmd",
"alt": "alt",
"option": "alt",
"shift": "shift",
"mod": "ctrl",
}
_MODIFIER_ORDER: Final[tuple[str, ...]] = ("ctrl", "cmd", "alt", "shift")
_KEY_ALIASES: Final[dict[str, str]] = {
"enter": "enter",
"return": "enter",
"space": "space",
"spacebar": "space",
"tab": "tab",
"escape": "escape",
"esc": "escape",
"backspace": "backspace",
"delete": "delete",
"del": "delete",
"home": "home",
"end": "end",
"pageup": "pageup",
"pagedown": "pagedown",
"left": "left",
"arrowleft": "left",
"right": "right",
"arrowright": "right",
"up": "up",
"arrowup": "up",
"down": "down",
"arrowdown": "down",
}
_RESERVED_KEYS: Final[set[str]] = {"c", "r"}
def _normalize_key_token(lower_token: str) -> str:
"""Normalize a key token to a format that can be used on the client side."""
if lower_token in _KEY_ALIASES:
return _KEY_ALIASES[lower_token]
if len(lower_token) == 1 and lower_token.isalnum():
return lower_token
if lower_token.startswith("f") and lower_token[1:].isdigit():
return lower_token
raise StreamlitAPIException(
"shortcut must include a single character or one of the supported keys "
"(e.g. Enter, Space, Tab, Escape)."
)
def normalize_shortcut(shortcut: str) -> str:
"""Normalize a shortcut string to a format that can be used on the client side.
Parameters
----------
shortcut : str
The shortcut string to normalize.
Returns
-------
str
The normalized shortcut string.
Raises
------
StreamlitAPIException
If the shortcut is not a string value.
If the shortcut does not contain at least one key or modifier.
If the shortcut contains a single non-modifier key.
If the shortcut uses the keys 'C' or 'R', with or without modifiers.
If the shortcut does not include a non-modifier key.
"""
if not isinstance(shortcut, str):
raise StreamlitAPIException("shortcut must be a string value.")
tokens = [token.strip() for token in shortcut.split("+") if token.strip()]
if not tokens:
raise StreamlitAPIException(
"The `shortcut` must contain at least one key or modifier."
)
modifiers: list[str] = []
key: str | None = None
for raw_token in tokens:
lower_token = raw_token.lower()
if lower_token in _MODIFIER_ALIASES:
normalized_modifier = _MODIFIER_ALIASES[lower_token]
if normalized_modifier not in modifiers:
modifiers.append(normalized_modifier)
continue
if key is not None:
raise StreamlitAPIException(
"The `shortcut` may only specify a single non-modifier key."
)
normalized_key = _normalize_key_token(lower_token)
if normalized_key in _RESERVED_KEYS:
raise StreamlitAPIException(
"The `shortcut` cannot use the keys 'C' or 'R', with or without modifiers."
)
key = normalized_key
if key is None:
raise StreamlitAPIException(
"The `shortcut` must include a non-modifier key such as 'K' or 'Ctrl+K'."
)
normalized_tokens: list[str] = [
modifier for modifier in _MODIFIER_ORDER if modifier in modifiers
]
if key is not None:
normalized_tokens.append(key)
return "+".join(normalized_tokens)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/elements/lib/shortcut_utils.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/elements/lib/shortcut_utils_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for shortcut utils."""
from __future__ import annotations
import pytest
from streamlit.elements.lib.shortcut_utils import normalize_shortcut
from streamlit.errors import StreamlitAPIException
@pytest.mark.parametrize(
("shortcut", "expected"),
[
("Alt+S", "alt+s"),
("Mod+Enter", "ctrl+enter"),
("Meta+Enter", "cmd+enter"),
("Command+Enter", "cmd+enter"),
("Control+Enter", "ctrl+enter"),
("Option+Enter", "alt+enter"),
("Cmd+Shift+P", "cmd+shift+p"),
("Ctrl+Alt+Delete", "ctrl+alt+delete"),
("Shift+Enter", "shift+enter"),
("Enter", "enter"),
("Esc", "escape"),
("Space", "space"),
("Tab", "tab"),
("Backspace", "backspace"),
("Delete", "delete"),
("Home", "home"),
("End", "end"),
("PageUp", "pageup"),
("PageDown", "pagedown"),
("Left", "left"),
("ArrowLeft", "left"),
("Right", "right"),
("ArrowRight", "right"),
("Up", "up"),
("ArrowUp", "up"),
("Down", "down"),
("ArrowDown", "down"),
("f1", "f1"),
("f12", "f12"),
(" alt + s ", "alt+s"),
(" cmd + shift + p ", "cmd+shift+p"),
(" ctrl + alt + delete ", "ctrl+alt+delete"),
(" shift + enter ", "shift+enter"),
(" enter ", "enter"),
(" esc ", "escape"),
(" space ", "space"),
(" tab ", "tab"),
(" backspace ", "backspace"),
(" delete ", "delete"),
(" home ", "home"),
(" end ", "end"),
(" pageup ", "pageup"),
(" pagedown ", "pagedown"),
(" left ", "left"),
(" arrowleft ", "left"),
(" right ", "right"),
(" arrowright ", "right"),
(" up ", "up"),
(" arrowup ", "up"),
(" down ", "down"),
(" arrowdown ", "down"),
(" f1 ", "f1"),
(" f12 ", "f12"),
],
)
def test_normalize_shortcut_returns_normalized(shortcut: str, expected: str) -> None:
"""Test that normalize_shortcut returns the expected normalized string."""
assert normalize_shortcut(shortcut) == expected
@pytest.mark.parametrize(
"shortcut",
[
"",
" ",
"+",
"++",
" + ",
"Ctrl+",
"+C",
"Ctrl+Shift+",
"Ctrl++C",
"Ctrl+Shift+Alt+",
"Ctrl+Shift+Alt++",
],
)
def test_normalize_shortcut_rejects_invalid_format(shortcut: str) -> None:
"""Test that normalize_shortcut raises StreamlitAPIException for invalid format."""
with pytest.raises(StreamlitAPIException):
normalize_shortcut(shortcut)
@pytest.mark.parametrize(
"shortcut",
[
"Ctrl+Shift",
"Alt",
"Cmd",
"Shift",
"Ctrl+Alt",
"Ctrl+Cmd",
"Ctrl+Shift+Alt",
],
)
def test_normalize_shortcut_rejects_modifiers_only(shortcut: str) -> None:
"""Test that normalize_shortcut raises StreamlitAPIException for modifiers only."""
with pytest.raises(StreamlitAPIException):
normalize_shortcut(shortcut)
@pytest.mark.parametrize(
"shortcut",
[
"Ctrl+C+D",
"A+B",
"Ctrl+Shift+C+D",
"Ctrl+Alt+Delete+Insert",
],
)
def test_normalize_shortcut_rejects_multiple_keys(shortcut: str) -> None:
"""Test that normalize_shortcut raises StreamlitAPIException for multiple keys."""
with pytest.raises(StreamlitAPIException):
normalize_shortcut(shortcut)
@pytest.mark.parametrize(
"shortcut",
[
"Ctrl+C",
"Cmd+R",
"Alt+Shift+c",
"r",
"C",
],
)
def test_normalize_shortcut_rejects_reserved_keys(shortcut: str) -> None:
"""Test that normalize_shortcut raises StreamlitAPIException for reserved keys."""
with pytest.raises(StreamlitAPIException):
normalize_shortcut(shortcut)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/elements/lib/shortcut_utils_test.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/st_datetime_input.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
import streamlit as st
from streamlit import runtime
BASE_DATETIME = datetime(2025, 11, 19, 16, 45)
v1 = st.datetime_input("Datetime input 1 (base)", BASE_DATETIME)
st.write("Value 1:", v1)
v2 = st.datetime_input(
"Datetime input 2 (help)",
BASE_DATETIME + timedelta(hours=2),
help="Help text",
)
st.write("Value 2:", v2)
v3 = st.datetime_input("Datetime input 3 (disabled)", BASE_DATETIME, disabled=True)
st.write("Value 3:", v3)
v4 = st.datetime_input(
"Datetime input 4 (hidden label)",
BASE_DATETIME,
label_visibility="hidden",
)
st.write("Value 4:", v4)
v5 = st.datetime_input(
"Datetime input 5 (collapsed label)",
BASE_DATETIME,
label_visibility="collapsed",
)
st.write("Value 5:", v5)
if runtime.exists():
def on_change() -> None:
st.session_state.datetime_input_changed = True
st.text("Datetime input callback triggered")
st.datetime_input(
"Datetime input 6 (with callback)",
BASE_DATETIME,
key="datetime_input_6",
on_change=on_change,
)
st.write("Value 6:", st.session_state.datetime_input_6)
st.write(
"datetime input changed:",
st.session_state.get("datetime_input_changed") is True,
)
st.session_state.datetime_input_changed = False
v7 = st.datetime_input("Datetime input 7 (step=60)", BASE_DATETIME, step=60)
st.write("Value 7:", v7)
v8 = st.datetime_input("Datetime input 8 (empty)", value=None)
st.write("Value 8:", v8)
# Initialize default value for datetime input 9
if "datetime_input_9_default" not in st.session_state:
st.session_state["datetime_input_9_default"] = BASE_DATETIME + timedelta(minutes=5)
v9 = st.datetime_input(
"Datetime input 9 (empty, from state)",
value=st.session_state["datetime_input_9_default"],
key="datetime_input_9",
)
st.write("Value 9:", v9)
st.datetime_input(
"Datetime input 10 -> :material/check: :rainbow[Fancy] _**markdown** `label` _support_",
BASE_DATETIME,
)
st.datetime_input(
"Datetime input 11 (width=200px)", BASE_DATETIME, width=200, format="MM/DD/YYYY"
)
st.datetime_input("Datetime input 12 (width='stretch')", BASE_DATETIME, width="stretch")
with st.form("datetime_form", clear_on_submit=True):
form_value = st.datetime_input(
"Datetime input 13 (form)", value=None, key="datetime_form_input"
)
submitted = st.form_submit_button("Submit datetime form")
if submitted:
st.write("Form submitted value:", form_value)
@st.fragment
def datetime_fragment() -> None:
st.datetime_input(
"Datetime input 14 (fragment)",
value=None,
key="datetime_fragment_input",
)
datetime_fragment()
# --- Bound widgets (query-params) ---
bound_dt = st.datetime_input(
"Bound datetime",
value=BASE_DATETIME,
key="bound_datetime",
bind="query-params",
)
st.write("Bound datetime:", bound_dt)
bound_clearable_dt = st.datetime_input(
"Bound clearable datetime",
value=None,
key="bound_clearable_dt",
bind="query-params",
)
st.write("Bound clearable datetime:", bound_clearable_dt)
bound_minmax_dt = st.datetime_input(
"Bound minmax datetime",
value=BASE_DATETIME,
key="bound_minmax_dt",
min_value=datetime(2025, 1, 1, 0, 0),
max_value=datetime(2025, 12, 31, 23, 59),
bind="query-params",
)
st.write("Bound minmax datetime:", bound_minmax_dt)
st.markdown("Dynamic datetime input:")
if st.toggle("Update datetime input props"):
dval = st.datetime_input(
"Updated dynamic datetime input",
value=BASE_DATETIME + timedelta(hours=3, minutes=15),
width=250,
help="updated help",
key="dynamic_datetime_input_with_key",
on_change=lambda: None,
step=900,
min_value=datetime(2020, 1, 1, 0, 0),
max_value=datetime(2025, 12, 31, 23, 59),
)
st.write("Updated datetime input value:", dval)
else:
dval = st.datetime_input(
"Initial dynamic datetime input",
value=BASE_DATETIME,
width="stretch",
help="initial help",
key="dynamic_datetime_input_with_key",
on_change=lambda: None,
step=900,
min_value=datetime(2010, 1, 1, 0, 0),
max_value=datetime(2030, 12, 31, 23, 59),
)
st.write("Initial datetime input value:", dval)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_datetime_input.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/st_datetime_input_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import re
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import (
ImageCompareFunction,
build_app_url,
wait_for_app_loaded,
wait_for_app_run,
)
from e2e_playwright.shared.app_utils import (
check_top_level_class,
click_toggle,
expect_help_tooltip,
expect_markdown,
expect_prefixed_markdown,
get_datetime_input,
get_element_by_key,
)
NUM_DATETIME_INPUTS = 18
def test_datetime_input_widget_rendering(
app: Page, assert_snapshot: ImageCompareFunction
):
"""Test that the datetime input widgets are correctly rendered via screenshot matching."""
datetime_inputs = app.get_by_test_id("stDateTimeInput")
expect(datetime_inputs).to_have_count(NUM_DATETIME_INPUTS)
assert_snapshot(
get_datetime_input(app, "Datetime input 1 (base)"),
name="st_datetime_input-base",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 2 (help)"),
name="st_datetime_input-help",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 3 (disabled)"),
name="st_datetime_input-disabled",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 4 (hidden label)"),
name="st_datetime_input-hidden_label",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 5 (collapsed label)"),
name="st_datetime_input-collapsed_label",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 6 (with callback)"),
name="st_datetime_input-callback",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 7 (step=60)"),
name="st_datetime_input-step_60",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 8 (empty)"),
name="st_datetime_input-empty",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 9 (empty, from state)"),
name="st_datetime_input-state",
)
assert_snapshot(
get_datetime_input(
app,
re.compile(r"^Datetime input 10"),
),
name="st_datetime_input-markdown_label",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 11 (width=200px)"),
name="st_datetime_input-width_200px",
)
assert_snapshot(
get_datetime_input(app, "Datetime input 12 (width='stretch')"),
name="st_datetime_input-width_stretch",
)
def test_datetime_input_dropdown(app: Page, assert_snapshot: ImageCompareFunction):
"""Test that the datetime input dropdown is correctly rendered."""
datetime_input = get_datetime_input(app, "Datetime input 1 (base)")
datetime_input.locator("input").click()
# Wait for the calendar to be visible
calendar = app.locator('[data-baseweb="calendar"]')
expect(calendar).to_be_visible()
assert_snapshot(calendar, name="st_datetime_input-dropdown")
def test_help_tooltip(app: Page):
element_with_help = get_datetime_input(app, "Datetime input 2 (help)")
expect_help_tooltip(app, element_with_help, "Help text")
def test_datetime_input_initial_values(app: Page):
expect_markdown(app, "Value 1: 2025-11-19 16:45:00")
expect_markdown(app, "Value 2: 2025-11-19 18:45:00")
expect_markdown(app, "Value 3: 2025-11-19 16:45:00")
expect_markdown(app, "Value 4: 2025-11-19 16:45:00")
expect_markdown(app, "Value 5: 2025-11-19 16:45:00")
expect_markdown(app, "Value 6: 2025-11-19 16:45:00")
expect_markdown(app, "datetime input changed: False")
expect_markdown(app, "Value 7: 2025-11-19 16:45:00")
expect_markdown(app, "Value 8: None")
expect_markdown(app, "Value 9: 2025-11-19 16:50:00")
def test_handles_typing_selection(app: Page):
datetime_input_field = get_datetime_input(app, "Datetime input 1 (base)").locator(
"input"
)
datetime_input_field.type("2026/01/01, 09:30")
datetime_input_field.press("Enter")
# Click on the label of another widget to close the popover and commit the value
get_datetime_input(app, "Datetime input 2 (help)").click()
wait_for_app_run(app)
expect_markdown(app, "Value 1: 2026-01-01 09:30:00")
def test_handles_datetime_selection_with_popover(app: Page):
datetime_input = get_datetime_input(app, "Datetime input 1 (base)")
datetime_input_field = datetime_input.locator("input")
# Clear the input and type the new value
datetime_input_field.click()
datetime_input_field.fill("")
datetime_input_field.type("2025/11/25, 09:30")
datetime_input_field.press("Enter")
# Click on another element to close the popover and commit the value
get_datetime_input(app, "Datetime input 2 (help)").click()
wait_for_app_run(app)
expect_markdown(app, "Value 1: 2025-11-25 09:30:00")
def test_step_interval_applied(app: Page):
datetime_input_field = get_datetime_input(
app, "Datetime input 7 (step=60)"
).locator("input")
datetime_input_field.type("2025/11/19, 16:46")
datetime_input_field.press("Enter")
# Click on another element to close the popover and commit the value
get_datetime_input(app, "Datetime input 1 (base)").click()
wait_for_app_run(app)
expect_markdown(app, "Value 7: 2025-11-19 16:46:00")
def test_clearable_datetime_input(app: Page):
datetime_input = get_datetime_input(app, "Datetime input 8 (empty)")
datetime_input_field = datetime_input.locator("input")
datetime_input_field.type("2025/11/19, 10:15")
datetime_input_field.press("Enter")
# Click on another element to close the popover and commit the value
get_datetime_input(app, "Datetime input 1 (base)").click()
wait_for_app_run(app)
expect_markdown(app, "Value 8: 2025-11-19 10:15:00")
datetime_input.get_by_role("button", name="Clear value").click()
# Click on another element to close the popover and commit the cleared value
get_datetime_input(app, "Datetime input 1 (base)").click()
wait_for_app_run(app)
expect_markdown(app, "Value 8: None")
def test_callback_invoked(app: Page):
datetime_input_field = get_datetime_input(
app, "Datetime input 6 (with callback)"
).locator("input")
datetime_input_field.type("2025/12/01, 08:00")
datetime_input_field.press("Enter")
# Click on another element to close the popover and commit the value, triggering callback
get_datetime_input(app, "Datetime input 1 (base)").click()
wait_for_app_run(app)
expect_markdown(app, "datetime input changed: True")
def test_form_submission_resets_value(app: Page):
form_input = get_datetime_input(app, "Datetime input 13 (form)").locator("input")
form_input.type("2025/12/24, 12:00")
form_input.press("Enter")
# Click submit button which will close the popover and submit the form
app.get_by_role("button", name="Submit datetime form").click()
wait_for_app_run(app)
expect_markdown(app, "Form submitted value: 2025-12-24 12:00:00")
def test_fragment_reruns(app: Page):
"""Test that datetime input works correctly inside a fragment."""
fragment_input = get_datetime_input(app, "Datetime input 14 (fragment)")
fragment_input_field = fragment_input.locator("input")
# Type a value in the fragment datetime input
fragment_input_field.type("2025/11/19, 09:00")
fragment_input_field.press("Enter")
# Click on another element to close the popover and commit the value
get_datetime_input(app, "Datetime input 1 (base)").click()
wait_for_app_run(app)
# Verify that other inputs are not affected (value1 should still be the original)
expect_markdown(app, "Value 1: 2025-11-19 16:45:00")
def test_top_level_class_for_key(app: Page):
"""Check that custom CSS class is applied via key."""
datetime_input = get_element_by_key(app, "dynamic_datetime_input_with_key")
expect(datetime_input).to_be_visible()
check_top_level_class(app, "stDateTimeInput")
def test_dynamic_props_update(app: Page):
"""Test that the datetime input can be updated dynamically while keeping the state."""
# First verify the initial state
expect_prefixed_markdown(
app, "Initial datetime input value:", "2025-11-19 16:45:00"
)
# Verify the dynamic datetime input exists
dynamic_input = get_element_by_key(app, "dynamic_datetime_input_with_key")
expect(dynamic_input).to_be_visible()
# Type a new value into the datetime input
input_field = dynamic_input.locator("input")
input_field.type("2025/12/01, 14:30", delay=50)
input_field.press("Enter")
# Click on a markdown element to close the popover without toggling
app.get_by_text("Dynamic datetime input:").click()
wait_for_app_run(app)
expect(app.locator('[data-baseweb="calendar"]')).not_to_be_visible()
expect_prefixed_markdown(
app, "Initial datetime input value:", "2025-12-01 14:30:00"
)
# Click the toggle to update the datetime input props
click_toggle(app, "Update datetime input props")
# new datetime input is visible:
expect(dynamic_input).to_contain_text("Updated dynamic datetime input")
# Ensure the previously entered value remains visible
expect_prefixed_markdown(
app, "Updated datetime input value:", "2025-12-01 14:30:00"
)
# Test dynamic min/max behavior when bounds change:
# Toggle back to initial bounds (2010-2030)
click_toggle(app, "Update datetime input props")
expect_prefixed_markdown(
app, "Initial datetime input value:", "2025-12-01 14:30:00"
)
# Set value to 2028/01/01 which is valid in initial bounds (2010-2030)
input_field.fill("2028/01/01, 10:00")
input_field.press("Enter")
# Click on a markdown element to close the popover
app.get_by_text("Dynamic datetime input:").click()
wait_for_app_run(app)
expect_prefixed_markdown(
app, "Initial datetime input value:", "2028-01-01 10:00:00"
)
# Toggle to updated bounds (2020-2025) - value 2028 is outside, should reset to default
click_toggle(app, "Update datetime input props")
# The default value for updated state is BASE_DATETIME + 3h15m = 2025-11-19 20:00:00
expect_prefixed_markdown(
app, "Updated datetime input value:", "2025-11-19 20:00:00"
)
# Anti-regression: ensure the old out-of-bounds value is not retained
expect(app.get_by_text("2028-01-01")).not_to_be_visible()
# --- Query param binding tests ---
def test_datetime_input_query_param_seeding(page: Page, app_base_url: str):
"""Test that datetime input value can be seeded from URL query params using ISO format."""
page.goto(build_app_url(app_base_url, query={"bound_datetime": "2025-11-20T10:30"}))
wait_for_app_loaded(page)
expect_prefixed_markdown(page, "Bound datetime:", "2025-11-20 10:30:00")
expect(page).to_have_url(re.compile(r"bound_datetime=2025-11-20T10%3A30"))
def test_datetime_input_query_param_clearable_empty(page: Page, app_base_url: str):
"""Test that a clearable datetime input (value=None) can be seeded as empty from URL."""
page.goto(build_app_url(app_base_url, query={"bound_clearable_dt": ""}))
wait_for_app_loaded(page)
expect_prefixed_markdown(page, "Bound clearable datetime:", "None")
def test_datetime_input_query_param_invalid_reverts_to_default(
page: Page, app_base_url: str
):
"""Test that an invalid URL value reverts to the default."""
page.goto(build_app_url(app_base_url, query={"bound_datetime": "not-a-datetime"}))
wait_for_app_loaded(page)
expect_prefixed_markdown(page, "Bound datetime:", "2025-11-19 16:45:00")
expect(page).not_to_have_url(re.compile(r"[?&]bound_datetime="))
def test_datetime_input_query_param_out_of_range_resets(page: Page, app_base_url: str):
"""Test that out-of-bounds datetime values revert to default."""
page.goto(
build_app_url(app_base_url, query={"bound_minmax_dt": "2024-06-15T12:00"})
)
wait_for_app_loaded(page)
expect_prefixed_markdown(page, "Bound minmax datetime:", "2025-11-19 16:45:00")
expect(page).not_to_have_url(re.compile(r"[?&]bound_minmax_dt="))
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_datetime_input_test.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/elements/datetime_input_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""datetime_input unit test."""
from __future__ import annotations
from datetime import date, datetime, time, timedelta, timezone
from unittest.mock import MagicMock, patch
import pytest
from parameterized import parameterized
import streamlit as st
from streamlit.elements.widgets.time_widgets import DateTimeInputSerde
from streamlit.errors import (
StreamlitAPIException,
StreamlitInvalidBindValueError,
StreamlitInvalidWidthError,
)
from streamlit.proto.LabelVisibility_pb2 import LabelVisibility
from streamlit.testing.v1.app_test import AppTest
from streamlit.testing.v1.element_tree import DateTimeInput
from tests.delta_generator_test_case import DeltaGeneratorTestCase
from tests.streamlit.elements.layout_test_utils import WidthConfigFields
DATETIME_FORMAT = "%Y-%m-%dT%H:%M"
class DateTimeInputTest(DeltaGeneratorTestCase):
"""Test ability to marshall datetime_input protos."""
def test_just_label(self):
"""Test rendering with default value."""
st.datetime_input("the label")
proto = self.get_delta_from_queue().new_element.date_time_input
assert proto.label == "the label"
assert (
proto.label_visibility.value
== LabelVisibility.LabelVisibilityOptions.VISIBLE
)
assert proto.format == "YYYY/MM/DD"
assert proto.step == timedelta(minutes=15).seconds
assert not proto.disabled
assert len(proto.default) == 1
assert proto.default[0] is not None
parsed_default = datetime.strptime(proto.default[0], DATETIME_FORMAT)
assert parsed_default <= datetime.now()
parsed_min = datetime.strptime(proto.min, DATETIME_FORMAT)
parsed_max = datetime.strptime(proto.max, DATETIME_FORMAT)
assert parsed_min < parsed_default < parsed_max
def test_none_value(self):
"""Test that it can be called with None as initial value."""
st.datetime_input("the label", value=None)
proto = self.get_delta_from_queue().new_element.date_time_input
assert proto.label == "the label"
assert not proto.default
@parameterized.expand(
[
(
datetime(2025, 11, 19, 16, 45),
datetime(2025, 11, 19, 16, 45),
),
(
date(2025, 11, 19),
datetime(2025, 11, 19, 0, 0),
),
(
time(16, 45),
datetime.combine(date.today(), time(16, 45)),
),
(
"2025-11-19 16:45:00",
datetime(2025, 11, 19, 16, 45),
),
]
)
def test_value_types(
self, arg_value: datetime | date | time | str, expected: datetime
):
"""Test that it supports different types of values."""
st.datetime_input("the label", arg_value)
proto = self.get_delta_from_queue().new_element.date_time_input
assert proto.label == "the label"
assert proto.default[0] == expected.strftime(DATETIME_FORMAT)
def test_min_max_values(self):
"""Test custom min/max values."""
min_value = datetime(2020, 1, 1, 8, 0)
max_value = datetime(2030, 1, 1, 18, 0)
st.datetime_input(
"Range",
datetime(2025, 1, 1, 12, 0),
min_value=min_value,
max_value=max_value,
)
proto = self.get_delta_from_queue().new_element.date_time_input
assert proto.min == min_value.strftime(DATETIME_FORMAT)
assert proto.max == max_value.strftime(DATETIME_FORMAT)
def test_label_visibility(self):
"""Test that label visibility works."""
st.datetime_input("the label", label_visibility="hidden")
proto = self.get_delta_from_queue().new_element.date_time_input
assert (
proto.label_visibility.value
== LabelVisibility.LabelVisibilityOptions.HIDDEN
)
def test_label_visibility_wrong_value(self):
"""Test that invalid label visibility raises."""
with pytest.raises(StreamlitAPIException):
st.datetime_input("the label", label_visibility="wrong_value")
def test_step_validation(self):
"""Test invalid step values."""
with pytest.raises(StreamlitAPIException):
st.datetime_input("The label", step=True)
with pytest.raises(StreamlitAPIException):
st.datetime_input("The label", step=(1, 0))
with pytest.raises(StreamlitAPIException):
st.datetime_input("The label", step=30)
with pytest.raises(StreamlitAPIException):
st.datetime_input("The label", step=timedelta(hours=24))
def test_format_validation(self):
"""Test invalid format raises."""
with pytest.raises(StreamlitAPIException):
st.datetime_input("the label", format="YY/MM/DD")
def test_width_config_default(self):
"""Test that default width is 'stretch'."""
st.datetime_input("the label")
proto = self.get_delta_from_queue().new_element
assert (
proto.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert proto.width_config.use_stretch
def test_width_config_pixel(self):
"""Test that pixel width works properly."""
st.datetime_input("the label", width=200)
proto = self.get_delta_from_queue().new_element
assert (
proto.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert proto.width_config.pixel_width == 200
def test_invalid_width(self):
"""Test that invalid width raises."""
with pytest.raises(StreamlitInvalidWidthError):
st.datetime_input("the label", width="invalid") # type: ignore[arg-type]
def test_inside_column(self):
"""Test that it works correctly inside of a column."""
col1, _ = st.columns([3, 2])
with col1:
st.datetime_input("foo")
all_deltas = self.get_all_deltas_from_queue()
assert len(all_deltas) == 4
proto = self.get_delta_from_queue().new_element.date_time_input
assert proto.label == "foo"
def test_stable_id_with_key(self):
"""Test ID stability when key provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
st.datetime_input(
label="Label 1",
key="datetime_key",
value=datetime(2025, 1, 1, 9, 0),
min_value=datetime(2020, 1, 1, 0, 0),
max_value=datetime(2030, 1, 1, 0, 0),
format="YYYY/MM/DD",
step=timedelta(minutes=15),
)
proto1 = self.get_delta_from_queue().new_element.date_time_input
id1 = proto1.id
st.datetime_input(
label="Label 2",
key="datetime_key",
value=datetime(2025, 1, 2, 9, 0),
min_value=datetime(2020, 1, 1, 0, 0),
max_value=datetime(2030, 1, 1, 0, 0),
format="YYYY/MM/DD",
step=timedelta(minutes=15),
)
proto2 = self.get_delta_from_queue().new_element.date_time_input
id2 = proto2.id
assert id1 == id2
def test_whitelisted_key_changes(self):
"""Test that whitelisted kwargs update the ID."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
base_kwargs = {
"label": "Label",
"key": "datetime_key",
"value": datetime(2025, 1, 1, 9, 0),
"min_value": datetime(2020, 1, 1, 0, 0),
"max_value": datetime(2030, 1, 1, 0, 0),
"format": "YYYY/MM/DD",
"step": timedelta(minutes=15),
}
st.datetime_input(**base_kwargs)
proto1 = self.get_delta_from_queue().new_element.date_time_input
id1 = proto1.id
base_kwargs["step"] = timedelta(minutes=30)
st.datetime_input(**base_kwargs)
proto2 = self.get_delta_from_queue().new_element.date_time_input
id2 = proto2.id
assert id1 != id2
def test_min_value_now(self):
"""Test min_value='now'."""
class MockDatetime(datetime):
@classmethod
def now(cls, tz=None):
return cls(2024, 1, 1, 12, 0, 0)
with patch("streamlit.elements.widgets.time_widgets.datetime", MockDatetime):
# We must use MockDatetime for the value passed in, because time_widgets.datetime
# is patched to MockDatetime, so isinstance(val, datetime) checks against MockDatetime.
# Real datetime objects would fail this check.
val = MockDatetime(2024, 1, 1, 13, 0, 0)
st.datetime_input("min_now", value=val, min_value="now")
proto = self.get_delta_from_queue().new_element.date_time_input
# min should be exactly the mocked now
assert proto.min == "2024-01-01T12:00"
def test_max_value_now(self):
"""Test max_value='now'."""
class MockDatetime(datetime):
@classmethod
def now(cls, tz=None):
return cls(2024, 1, 1, 12, 0, 0)
with patch("streamlit.elements.widgets.time_widgets.datetime", MockDatetime):
val = MockDatetime(2024, 1, 1, 11, 0, 0)
st.datetime_input("max_now", value=val, max_value="now")
proto = self.get_delta_from_queue().new_element.date_time_input
# max should be exactly the mocked now
assert proto.max == "2024-01-01T12:00"
def test_min_max_exception(self):
"""Test that min_value > max_value raises an exception."""
min_value = datetime(2030, 1, 1, 12, 0)
max_value = datetime(2020, 1, 1, 12, 0)
with pytest.raises(StreamlitAPIException):
st.datetime_input("Label", min_value=min_value, max_value=max_value)
def test_initial_value_out_of_bounds_exception(self):
"""Test that initial value out of min/max bounds raises an exception."""
min_value = datetime(2020, 1, 1, 12, 0)
max_value = datetime(2030, 1, 1, 12, 0)
with pytest.raises(StreamlitAPIException):
st.datetime_input(
"Label",
value=datetime(2010, 1, 1),
min_value=min_value,
max_value=max_value,
)
with pytest.raises(StreamlitAPIException):
st.datetime_input(
"Label",
value=datetime(2040, 1, 1),
min_value=min_value,
max_value=max_value,
)
def test_timezone_handling(self):
"""Test that timezone-aware datetimes are normalized to naive."""
# Create a timezone-aware datetime
dt_aware = datetime(2025, 1, 1, 12, 0, tzinfo=timezone.utc)
st.datetime_input("Label", value=dt_aware)
proto = self.get_delta_from_queue().new_element.date_time_input
# Proto string should not contain timezone info
assert proto.default[0] == "2025-01-01T12:00"
def test_invalid_value_exception(self):
"""Test that passing an invalid value raises an exception."""
with pytest.raises(StreamlitAPIException):
st.datetime_input("Label", value="invalid-date-string")
def test_help_and_disabled(self):
"""Test help and disabled parameters."""
st.datetime_input("Label", help="The help text", disabled=True)
proto = self.get_delta_from_queue().new_element.date_time_input
assert proto.help == "The help text"
assert proto.disabled is True
def test_datetime_input_interaction():
"""Test interactions with an empty datetime_input widget."""
def script():
import streamlit as st
st.datetime_input("the label", value=None)
at = AppTest.from_function(script).run()
widget = at.datetime_input[0]
assert widget.value is None
new_value = datetime(2025, 11, 19, 16, 45)
DateTimeInput.set_value(widget, new_value)
at = widget.run()
widget = at.datetime_input[0]
assert widget.value == new_value
at = widget.set_value(None).run()
widget = at.datetime_input[0]
assert widget.value is None
def test_datetime_input_min_max_validation():
"""Test that datetime_input rejects values outside min/max bounds."""
def script():
from datetime import datetime
import streamlit as st
min_value = datetime(2020, 1, 1, 8, 0)
max_value = datetime(2030, 1, 1, 18, 0)
initial_value = datetime(2025, 1, 1, 12, 0)
st.datetime_input(
"the label",
value=initial_value,
min_value=min_value,
max_value=max_value,
)
at = AppTest.from_function(script).run()
widget = at.datetime_input[0]
assert widget.value == datetime(2025, 1, 1, 12, 0)
# Try to set a value below min - should keep the current value
below_min_value = datetime(2019, 12, 31, 23, 0)
at = widget.set_value(below_min_value).run()
widget = at.datetime_input[0]
assert widget.value == datetime(2025, 1, 1, 12, 0)
# Try to set a value above max - should keep the current value
above_max_value = datetime(2030, 1, 2, 0, 0)
at = widget.set_value(above_max_value).run()
widget = at.datetime_input[0]
assert widget.value == datetime(2025, 1, 1, 12, 0)
# Valid value within bounds should work
valid_value = datetime(2025, 6, 15, 14, 30)
at = widget.set_value(valid_value).run()
widget = at.datetime_input[0]
assert widget.value == valid_value
def test_datetime_input_callback():
"""Test that on_change callback is triggered."""
def script():
from datetime import datetime
import streamlit as st
if "called" not in st.session_state:
st.session_state.called = False
def callback():
st.session_state.called = True
st.datetime_input(
"Label", value=datetime(2020, 1, 1, 10, 0), on_change=callback, key="dt"
)
at = AppTest.from_function(script).run()
new_value = datetime(2025, 1, 1, 12, 0)
at.datetime_input[0].set_value(new_value).run()
assert at.session_state.called
assert at.session_state.dt == new_value
@patch("streamlit.elements.lib.policies._shown_default_value_warning", new=False)
def test_session_state_takes_precedence():
"""Test that session state value takes precedence over default value."""
def script():
from datetime import datetime
import streamlit as st
if "my_datetime" not in st.session_state:
st.session_state.my_datetime = datetime(2024, 12, 25, 10, 0)
st.datetime_input("Label", value=datetime(2025, 1, 1), key="my_datetime")
at = AppTest.from_function(script).run()
widget = at.datetime_input[0]
assert widget.value == datetime(2024, 12, 25, 10, 0)
def test_dynamic_min_value_resets_value_when_below_new_min():
"""Test that value resets to default when dynamically changing min_value makes current value invalid."""
def script():
from datetime import datetime
import streamlit as st
if "update_bounds" not in st.session_state:
st.session_state["update_bounds"] = False
if st.session_state["update_bounds"]:
# New min_value makes the previous value invalid
value = st.datetime_input(
"datetime",
min_value=datetime(2024, 6, 1, 0, 0),
max_value=datetime(2024, 12, 31, 23, 59),
key="datetime",
value=datetime(2024, 7, 15, 12, 0),
)
else:
value = st.datetime_input(
"datetime",
min_value=datetime(2024, 1, 1, 0, 0),
max_value=datetime(2024, 12, 31, 23, 59),
key="datetime",
value=datetime(2024, 5, 15, 12, 0),
)
st.write(f"value: {value}")
if st.button("Toggle bounds"):
st.session_state["update_bounds"] = not st.session_state["update_bounds"]
at = AppTest.from_function(script).run()
assert at.datetime_input[0].value == datetime(2024, 5, 15, 12, 0)
# Set value to March 1 (valid with min_value=Jan 1)
at = at.datetime_input[0].set_value(datetime(2024, 3, 1, 10, 0)).run()
assert at.datetime_input[0].value == datetime(2024, 3, 1, 10, 0)
# Toggle bounds - the click updates session_state["update_bounds"] to True
at = at.button[0].click().run()
# AppTest requires an additional run to process the widget with the new bounds
at = at.run()
# Now min_value=June 1, so March 1 is invalid and should reset to default (July 15)
assert at.datetime_input[0].value == datetime(2024, 7, 15, 12, 0)
def test_dynamic_max_value_resets_value_when_above_new_max():
"""Test that value resets to default when dynamically changing max_value makes current value invalid."""
def script():
from datetime import datetime
import streamlit as st
if "update_bounds" not in st.session_state:
st.session_state["update_bounds"] = False
if st.session_state["update_bounds"]:
# New max_value makes the previous value invalid
value = st.datetime_input(
"datetime",
min_value=datetime(2024, 1, 1, 0, 0),
max_value=datetime(2024, 6, 30, 23, 59),
key="datetime",
value=datetime(2024, 3, 15, 12, 0),
)
else:
value = st.datetime_input(
"datetime",
min_value=datetime(2024, 1, 1, 0, 0),
max_value=datetime(2024, 12, 31, 23, 59),
key="datetime",
value=datetime(2024, 5, 15, 12, 0),
)
st.write(f"value: {value}")
if st.button("Toggle bounds"):
st.session_state["update_bounds"] = not st.session_state["update_bounds"]
at = AppTest.from_function(script).run()
assert at.datetime_input[0].value == datetime(2024, 5, 15, 12, 0)
# Set value to October 1 (valid with max_value=Dec 31)
at = at.datetime_input[0].set_value(datetime(2024, 10, 1, 14, 0)).run()
assert at.datetime_input[0].value == datetime(2024, 10, 1, 14, 0)
# Toggle bounds - the click updates session_state["update_bounds"] to True
at = at.button[0].click().run()
# AppTest requires an additional run to process the widget with the new bounds
at = at.run()
# Now max_value=June 30, so October 1 is invalid and should reset to default (March 15)
assert at.datetime_input[0].value == datetime(2024, 3, 15, 12, 0)
def test_dynamic_bounds_preserves_valid_value():
"""Test that value is preserved when it remains valid after bound changes."""
def script():
from datetime import datetime
import streamlit as st
if "update_bounds" not in st.session_state:
st.session_state["update_bounds"] = False
if st.session_state["update_bounds"]:
# Changing bounds but May 15 is still valid (between Apr 1 and Sep 30)
value = st.datetime_input(
"datetime",
min_value=datetime(2024, 4, 1, 0, 0),
max_value=datetime(2024, 9, 30, 23, 59),
key="datetime",
value=datetime(2024, 5, 15, 12, 0),
)
else:
value = st.datetime_input(
"datetime",
min_value=datetime(2024, 1, 1, 0, 0),
max_value=datetime(2024, 12, 31, 23, 59),
key="datetime",
value=datetime(2024, 5, 15, 12, 0),
)
st.write(f"value: {value}")
if st.button("Toggle bounds"):
st.session_state["update_bounds"] = not st.session_state["update_bounds"]
at = AppTest.from_function(script).run()
assert at.datetime_input[0].value == datetime(2024, 5, 15, 12, 0)
# Toggle bounds - the click updates session_state["update_bounds"] to True
at = at.button[0].click().run()
# AppTest requires an additional run to process the widget with the new bounds
at = at.run()
# Value should be preserved because it's still within the new bounds
assert at.datetime_input[0].value == datetime(2024, 5, 15, 12, 0)
def test_dynamic_bounds_preserves_user_set_valid_value():
"""Test that a user-set value (different from default) is preserved when still valid after bound changes."""
def script():
from datetime import datetime
import streamlit as st
if "update_bounds" not in st.session_state:
st.session_state["update_bounds"] = False
if st.session_state["update_bounds"]:
# New bounds: Apr 1 to Sep 30 - July 1 is still valid
value = st.datetime_input(
"datetime",
min_value=datetime(2024, 4, 1, 0, 0),
max_value=datetime(2024, 9, 30, 23, 59),
key="datetime",
value=datetime(2024, 5, 15, 12, 0),
)
else:
value = st.datetime_input(
"datetime",
min_value=datetime(2024, 1, 1, 0, 0),
max_value=datetime(2024, 12, 31, 23, 59),
key="datetime",
value=datetime(2024, 5, 15, 12, 0),
)
st.write(f"value: {value}")
if st.button("Toggle bounds"):
st.session_state["update_bounds"] = not st.session_state["update_bounds"]
at = AppTest.from_function(script).run()
assert at.datetime_input[0].value == datetime(2024, 5, 15, 12, 0)
# Set value to July 1 (different from default May 15, valid in both bound ranges)
at = at.datetime_input[0].set_value(datetime(2024, 7, 1, 10, 0)).run()
assert at.datetime_input[0].value == datetime(2024, 7, 1, 10, 0)
# Toggle bounds - the click updates session_state["update_bounds"] to True
at = at.button[0].click().run()
# AppTest requires an additional run to process the widget with the new bounds
at = at.run()
# User-set value (July 1) should be preserved, not reset to default (May 15)
assert at.datetime_input[0].value == datetime(2024, 7, 1, 10, 0)
# Ensure it's not reset to the default value
assert at.datetime_input[0].value != datetime(2024, 5, 15, 12, 0)
class DateTimeInputBindQueryParamsTest(DeltaGeneratorTestCase):
"""Test query param binding for st.datetime_input."""
def test_bind_query_params_sets_query_param_key(self):
"""Test that bind='query-params' sets query_param_key."""
st.datetime_input("the label", key="my_key", bind="query-params")
c = self.get_delta_from_queue().new_element.date_time_input
assert c.query_param_key == "my_key"
def test_no_bind_does_not_set_query_param_key(self):
"""Test that query_param_key is empty without bind."""
st.datetime_input("the label", key="my_key")
c = self.get_delta_from_queue().new_element.date_time_input
assert c.query_param_key == ""
def test_bind_requires_key(self):
"""Test that bind without key raises StreamlitAPIException."""
with pytest.raises(StreamlitAPIException):
st.datetime_input("the label", bind="query-params")
def test_invalid_bind_value_raises_exception(self):
"""Test that an invalid bind value raises StreamlitInvalidBindValueError."""
with pytest.raises(StreamlitInvalidBindValueError, match=r"invalid-value"):
st.datetime_input("the label", key="my_key", bind="invalid-value")
def test_bind_query_params_with_explicit_value(self):
"""Test that bind works when value is explicitly set."""
st.datetime_input(
"the label",
value=datetime(2025, 11, 19, 16, 45),
key="my_key",
bind="query-params",
)
c = self.get_delta_from_queue().new_element.date_time_input
assert c.query_param_key == "my_key"
assert c.default == ["2025-11-19T16:45"]
def test_bind_query_params_with_none_value(self):
"""Test that bind works with value=None (clearable)."""
st.datetime_input("the label", value=None, key="my_key", bind="query-params")
c = self.get_delta_from_queue().new_element.date_time_input
assert c.query_param_key == "my_key"
assert list(c.default) == []
class TestDateTimeInputSerdeISO:
"""Tests for DateTimeInputSerde ISO 8601 format parsing."""
def test_deserialize_internal_format(self):
"""Test that the internal YYYY/MM/DD, HH:MM format is correctly parsed."""
serde = DateTimeInputSerde(
value=datetime(2025, 1, 15, 10, 0),
min=datetime(2020, 1, 1, 0, 0),
max=datetime(2030, 12, 31, 23, 59),
)
result = serde.deserialize(["2025/01/15, 10:00"])
assert result == datetime(2025, 1, 15, 10, 0)
def test_deserialize_iso_format(self):
"""Test that ISO YYYY-MM-DDThh:mm format is correctly parsed."""
serde = DateTimeInputSerde(
value=datetime(2025, 1, 15, 10, 0),
min=datetime(2020, 1, 1, 0, 0),
max=datetime(2030, 12, 31, 23, 59),
)
result = serde.deserialize(["2025-06-20T14:30"])
assert result == datetime(2025, 6, 20, 14, 30)
def test_deserialize_invalid_format_reverts_to_default(self):
"""Test that unparseable strings revert to the default value."""
serde = DateTimeInputSerde(
value=datetime(2025, 1, 15, 10, 0),
min=datetime(2020, 1, 1, 0, 0),
max=datetime(2030, 12, 31, 23, 59),
)
result = serde.deserialize(["not-a-datetime"])
assert result == datetime(2025, 1, 15, 10, 0)
def test_deserialize_none_returns_default(self):
"""Test that None ui_value returns the default value."""
serde = DateTimeInputSerde(
value=datetime(2025, 1, 15, 10, 0),
min=datetime(2020, 1, 1, 0, 0),
max=datetime(2030, 12, 31, 23, 59),
)
result = serde.deserialize(None)
assert result == datetime(2025, 1, 15, 10, 0)
def test_deserialize_empty_returns_default(self):
"""Test that empty list returns the default value."""
serde = DateTimeInputSerde(
value=datetime(2025, 1, 15, 10, 0),
min=datetime(2020, 1, 1, 0, 0),
max=datetime(2030, 12, 31, 23, 59),
)
result = serde.deserialize([])
assert result == datetime(2025, 1, 15, 10, 0)
def test_deserialize_out_of_bounds_reverts_to_default(self):
"""Test that out-of-bounds values revert to the default."""
serde = DateTimeInputSerde(
value=datetime(2025, 6, 15, 12, 0),
min=datetime(2025, 1, 1, 0, 0),
max=datetime(2025, 12, 31, 23, 59),
)
result = serde.deserialize(["2024-06-15T12:00"])
assert result == datetime(2025, 6, 15, 12, 0)
def test_deserialize_above_max_reverts_to_default(self):
"""Test that values above max revert to the default."""
serde = DateTimeInputSerde(
value=datetime(2025, 6, 15, 12, 0),
min=datetime(2025, 1, 1, 0, 0),
max=datetime(2025, 12, 31, 23, 59),
)
result = serde.deserialize(["2026-01-01T00:00"])
assert result == datetime(2025, 6, 15, 12, 0)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/elements/datetime_input_test.py",
"license": "Apache License 2.0",
"lines": 608,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/tests/streamlit/typing/datetime_input_types.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import assert_type
if TYPE_CHECKING:
from datetime import date, datetime, time, timedelta
from streamlit.elements.widgets.time_widgets import TimeWidgetsMixin
datetime_input = TimeWidgetsMixin().datetime_input
# Single datetime input
assert_type(datetime_input("foo", datetime(2025, 11, 19, 16, 45)), datetime)
assert_type(datetime_input("foo", date(2025, 11, 19)), datetime)
assert_type(datetime_input("foo", time(16, 45)), datetime)
assert_type(datetime_input("foo", value="now"), datetime)
assert_type(datetime_input("foo", value="2025-11-19 16:45"), datetime)
# Should return datetime or None if value is None
assert_type(datetime_input("foo", value=None), datetime | None)
assert_type(
datetime_input(
"foo",
value=None,
min_value=datetime(2020, 1, 1, 0, 0),
max_value=datetime(2030, 1, 1, 0, 0),
),
datetime | None,
)
# With min_value / max_value using different input types
assert_type(
datetime_input(
"foo",
datetime(2025, 11, 19, 16, 45),
min_value=date(2020, 1, 1),
max_value="2030-01-01 23:59",
),
datetime,
)
# With format and step overrides
assert_type(
datetime_input(
"foo",
datetime(2025, 11, 19, 16, 45),
format="MM/DD/YYYY",
step=900,
),
datetime,
)
assert_type(
datetime_input(
"foo",
datetime(2025, 11, 19, 16, 45),
format="DD.MM.YYYY",
step=timedelta(minutes=30),
),
datetime,
)
# With disabled and label visibility options
assert_type(
datetime_input("foo", datetime(2025, 11, 19, 16, 45), disabled=True), datetime
)
assert_type(
datetime_input(
"foo", datetime(2025, 11, 19, 16, 45), label_visibility="hidden"
),
datetime,
)
# With callbacks
def on_change_callback(value: datetime | None) -> None:
pass
assert_type(
datetime_input(
"foo",
datetime(2025, 11, 19, 16, 45),
on_change=on_change_callback,
args=(1,),
kwargs={"key": "value"},
),
datetime,
)
# With key and help
assert_type(
datetime_input("foo", datetime(2025, 11, 19, 16, 45), key="unique_key"),
datetime,
)
assert_type(
datetime_input(
"foo",
datetime(2025, 11, 19, 16, 45),
help="This is a helpful tooltip",
),
datetime,
)
# Test with bind parameter
assert_type(
datetime_input(
"foo", datetime(2025, 11, 19, 16, 45), key="my_key", bind="query-params"
),
datetime,
)
assert_type(
datetime_input("foo", value=None, key="my_key", bind="query-params"),
datetime | None,
)
assert_type(
datetime_input("foo", datetime(2025, 11, 19, 16, 45), key="my_key", bind=None),
datetime,
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/typing/datetime_input_types.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/st_download_button_deferred.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test app for deferred download button functionality."""
import time
import streamlit as st
st.header("Deferred Download Button Tests")
st.subheader("1. Regular Download Button (for comparison)")
st.download_button(
"Download Regular String",
data="This is regular string data",
file_name="regular.txt",
mime="text/plain",
)
st.subheader("2. Deferred Download with Callable")
def generate_csv_data() -> str:
"""Generate CSV data when download is clicked."""
return "Name,Age,City\nAlice,30,NYC\nBob,25,LA\nCharlie,35,Chicago"
st.download_button(
"Download CSV (Deferred)",
data=generate_csv_data,
file_name="data.csv",
mime="text/csv",
on_click="ignore",
)
st.subheader("3. Deferred Download with Lambda")
st.download_button(
"Download Lambda Text",
data=lambda: f"Generated at {time.time()}\nThis is dynamically generated text",
file_name="lambda_output.txt",
mime="text/plain",
on_click="ignore",
)
st.subheader("4. Deferred Download Returning Bytes")
def generate_binary_data() -> bytes:
"""Generate binary data."""
return b"Binary data: \x00\x01\x02\x03\x04\x05"
st.download_button(
"Download Binary (Deferred)",
data=generate_binary_data,
file_name="binary.dat",
mime="application/octet-stream",
on_click="ignore",
)
st.subheader("5. Deferred Download that Raises Error")
def failing_callable() -> str:
"""Callable that always raises an error."""
raise ValueError("This callable intentionally fails!")
st.download_button(
"Download Error (Should Fail)",
data=failing_callable,
file_name="error.txt",
mime="text/plain",
on_click="ignore",
)
st.subheader("6. Large Deferred Download")
def generate_large_data() -> str:
"""Generate a larger dataset."""
lines = []
for i in range(1000):
lines.append(f"Row {i}: " + "x" * 100)
return "\n".join(lines)
st.download_button(
"Download Large File (Deferred)",
data=generate_large_data,
file_name="large.txt",
mime="text/plain",
on_click="ignore",
)
st.subheader("7. Deferred Download with on_click='ignore'")
st.download_button(
"Download (No Rerun)",
data=lambda: "No rerun content",
file_name="no_rerun.txt",
mime="text/plain",
on_click="ignore",
)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_download_button_deferred.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:e2e_playwright/st_download_button_deferred_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from playwright.sync_api import Page, expect
def test_deferred_download_csv_success(app: Page):
"""Test successful deferred CSV download."""
# Get the CSV download button (second button in the app)
csv_button = app.get_by_role("button", name="Download CSV (Deferred)")
expect(csv_button).to_be_visible()
# Deferred downloads are async: button click → backend request → download
# We need to set up the download listener first, then trigger it
with app.expect_download() as download_info:
csv_button.click()
download = download_info.value
assert download.suggested_filename == "data.csv"
# Verify downloaded content
content = download.path().read_text()
assert "Name,Age,City" in content
assert "Alice,30,NYC" in content
assert "Bob,25,LA" in content
def test_deferred_download_lambda_text(app: Page):
"""Test deferred download with lambda function."""
# Get the lambda download button (third button)
lambda_button = app.get_by_role("button", name="Download Lambda Text")
expect(lambda_button).to_be_visible()
# Deferred downloads are async: button click → backend request → download
with app.expect_download() as download_info:
lambda_button.click()
download = download_info.value
assert download.suggested_filename == "lambda_output.txt"
# Verify downloaded content contains dynamic text
content = download.path().read_text()
assert "Generated at " in content
assert "This is dynamically generated text" in content
def test_deferred_download_binary(app: Page):
"""Test deferred download returning binary data."""
# Get the binary download button (fourth button)
binary_button = app.get_by_role("button", name="Download Binary (Deferred)")
expect(binary_button).to_be_visible()
# Deferred downloads are async: button click → backend request → download
with app.expect_download() as download_info:
binary_button.click()
download = download_info.value
assert download.suggested_filename == "binary.dat"
# Verify downloaded binary content
content = download.path().read_bytes()
assert content == b"Binary data: \x00\x01\x02\x03\x04\x05"
def test_deferred_download_error_handling(app: Page):
"""Test that deferred download with failing callable shows error."""
# Get the error button (fifth button)
error_button = app.get_by_role("button", name="Download Error (Should Fail)")
expect(error_button).to_be_visible()
# Click and wait for the async operation to complete
error_button.click()
# Error message should be visible after the failed request
error_message = app.get_by_test_id("stDownloadButtonError")
expect(error_message).to_be_visible(timeout=5000)
expect(error_message).to_contain_text("Callable execution failed")
def test_deferred_download_with_ignore_rerun(app: Page):
"""Test deferred download with on_click='ignore'."""
# Get the no-rerun download button (seventh button)
no_rerun_button = app.get_by_role("button", name="Download (No Rerun)")
expect(no_rerun_button).to_be_visible()
# Deferred downloads are async: button click → backend request → download
with app.expect_download() as download_info:
no_rerun_button.click()
download = download_info.value
assert download.suggested_filename == "no_rerun.txt"
assert download.path().read_text() == "No rerun content"
# Since we used on_click="ignore", the app should not rerun
# We can verify this by checking that no new script run happened
# (In a real scenario, we'd check that some state didn't change)
def test_regular_download_still_works(app: Page):
"""Test that regular (non-deferred) download still works."""
# Get the first button (regular download)
regular_button = app.get_by_role("button", name="Download Regular String")
expect(regular_button).to_be_visible()
# Click and expect download
with app.expect_download() as download_info:
regular_button.click()
download = download_info.value
assert download.suggested_filename == "regular.txt"
assert download.path().read_text() == "This is regular string data"
def test_deferred_download_button_count(app: Page):
"""Test that all download buttons are rendered correctly."""
download_buttons = app.get_by_test_id("stDownloadButton")
expect(download_buttons).to_have_count(7)
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_download_button_deferred_test.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:lib/streamlit/runtime/download_data_util.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import io
def convert_data_to_bytes_and_infer_mime(
data: object, unsupported_error: Exception
) -> tuple[bytes, str]:
# Convert data to bytes and infer mimetype if needed
data_as_bytes: bytes
inferred_mime_type: str
if isinstance(data, str):
data_as_bytes = data.encode()
inferred_mime_type = "text/plain"
elif isinstance(data, io.TextIOWrapper):
string_data = data.read()
data_as_bytes = string_data.encode()
inferred_mime_type = "text/plain"
# Assume bytes; try methods until we run out.
elif isinstance(data, bytes):
data_as_bytes = data
inferred_mime_type = "application/octet-stream"
elif isinstance(data, io.BytesIO):
data.seek(0)
data_as_bytes = data.getvalue()
inferred_mime_type = "application/octet-stream"
elif isinstance(data, io.BufferedReader):
data.seek(0)
data_as_bytes = data.read()
inferred_mime_type = "application/octet-stream"
elif isinstance(data, io.RawIOBase):
data.seek(0)
data_as_bytes = data.read() or b""
inferred_mime_type = "application/octet-stream"
else:
raise unsupported_error
return data_as_bytes, inferred_mime_type
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/streamlit/runtime/download_data_util.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
streamlit/streamlit:lib/tests/streamlit/runtime/download_data_util_test.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for convert_data_to_bytes_and_infer_mime"""
from __future__ import annotations
import io
import os
import tempfile
import unittest
import pytest
from streamlit.runtime.download_data_util import convert_data_to_bytes_and_infer_mime
class ConvertDataToBytesAndInferMimeTest(unittest.TestCase):
def test_str_is_converted_to_bytes_and_text_plain(self):
"""Strings are encoded to bytes and inferred as text/plain."""
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
"hello", unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == b"hello"
assert mime == "text/plain"
def test_text_io_wrapper_is_converted_to_bytes_and_text_plain(self):
"""io.TextIOWrapper is read fully and inferred as text/plain."""
content = "Line 1\nLine 2"
fd, path = tempfile.mkstemp(text=True)
os.close(fd)
try:
with open(path, "w", encoding="utf-8") as f:
f.write(content)
with open(path, encoding="utf-8") as text_io:
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
text_io, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == content.encode("utf-8")
assert mime == "text/plain"
finally:
try:
os.unlink(path)
except FileNotFoundError:
pass
def test_bytes_passthrough_and_octet_stream(self):
"""Bytes are returned as-is, with application/octet-stream."""
payload = b"\x00\x01\x02"
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
payload, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == payload
assert mime == "application/octet-stream"
def test_bytesio_rewinds_and_reads_all(self):
"""BytesIO is rewound and read fully."""
payload = b"abcdef"
bio = io.BytesIO(payload)
bio.seek(3) # simulate prior read
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
bio, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == payload
assert mime == "application/octet-stream"
def test_buffered_reader_rewinds_and_reads_all(self):
"""BufferedReader (rb open) is rewound and read fully."""
fd, path = tempfile.mkstemp()
os.close(fd)
payload = b"\x10\x20\x30\x40"
try:
with open(path, "wb") as f:
f.write(payload)
with open(path, "rb") as f:
f.read(2) # simulate prior read
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
f, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == payload
assert mime == "application/octet-stream"
finally:
try:
os.unlink(path)
except FileNotFoundError:
pass
def test_raw_io_base_fileio_rewinds_and_reads_all(self):
"""FileIO (RawIOBase) is rewound and read fully."""
fd, path = tempfile.mkstemp()
os.close(fd)
payload = b"\xaa\xbb\xcc"
try:
with open(path, "wb") as f:
f.write(payload)
with io.FileIO(path, "rb") as raw: # type: ignore[arg-type]
raw.read(1) # simulate prior read
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
raw, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == payload
assert mime == "application/octet-stream"
finally:
try:
os.unlink(path)
except FileNotFoundError:
pass
def test_raw_io_base_empty_file_returns_empty_bytes(self):
"""Empty RawIOBase should return empty bytes and application/octet-stream."""
fd, path = tempfile.mkstemp()
os.close(fd)
try:
# Ensure empty file
with io.FileIO(path, "rb") as raw: # type: ignore[arg-type]
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
raw, unsupported_error=RuntimeError("unsupported")
)
assert data_as_bytes == b""
assert mime == "application/octet-stream"
finally:
try:
os.unlink(path)
except FileNotFoundError:
pass
def test_unsupported_type_raises_given_exception(self):
"""Unsupported types raise the provided exception."""
with pytest.raises(RuntimeError, match="custom unsupported"):
convert_data_to_bytes_and_infer_mime(
["not", "supported"],
unsupported_error=RuntimeError("custom unsupported"),
)
def test_supported_type_ignores_unsupported_error_and_returns_normally(self):
"""Supported types do not raise, even if unsupported_error is provided."""
data_as_bytes, mime = convert_data_to_bytes_and_infer_mime(
b"ok", unsupported_error=RuntimeError("should not raise")
)
assert data_as_bytes == b"ok"
assert mime == "application/octet-stream"
| {
"repo_id": "streamlit/streamlit",
"file_path": "lib/tests/streamlit/runtime/download_data_util_test.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
streamlit/streamlit:e2e_playwright/st_plotly_chart_dimensions.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E test app for st.plotly_chart width and height parameters."""
import plotly.graph_objects as go
import streamlit as st
# Width parameter tests
st.write("## Width Parameter Tests")
# Create a simple chart for width testing
simple_fig = go.Figure()
simple_fig.add_trace(
go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17], name="Simple Chart")
)
simple_fig.update_layout(height=200, width=500, title="Chart with width='content':")
st.plotly_chart(simple_fig, width="content", theme="streamlit")
simple_fig.update_layout(title="Chart with width='stretch':")
st.plotly_chart(simple_fig, width="stretch", theme="streamlit")
simple_fig.update_layout(title="Chart with width=400:")
st.plotly_chart(simple_fig, width=400, theme="streamlit")
large_fig = go.Figure()
large_fig.add_trace(
go.Scatter(x=[1, 2, 3, 4, 5, 6], y=[10, 15, 13, 17, 20, 18], name="Large Chart")
)
large_fig.update_layout(
height=400, width=1000, title="Chart with figure width=1000 and width='content':"
)
st.plotly_chart(large_fig, width="content", theme="streamlit")
# Height parameter tests
st.write("## Height Parameter Tests")
# Create a simple chart for height testing
height_fig = go.Figure()
height_fig.add_trace(
go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17], name="Height Test Chart")
)
height_fig.update_layout(title="Chart with height='content':")
st.plotly_chart(height_fig, height="content", theme="streamlit")
st.write("Chart with height='stretch' (in 600px container):")
with st.container(border=True, key="test_height_stretch", height=600):
height_fig.update_layout(title="Chart with height='stretch':")
st.plotly_chart(height_fig, height="stretch", theme="streamlit")
height_fig.update_layout(title="Chart with height=300:")
st.plotly_chart(height_fig, height=300, theme="streamlit")
# Chart with explicit figure height to test content height resolution
tall_fig = go.Figure()
tall_fig.add_trace(
go.Scatter(x=[1, 2, 3, 4, 5], y=[10, 15, 13, 17, 20], name="Tall Chart")
)
tall_fig.update_layout(
height=600, width=500, title="Chart with figure height=600 and height='content':"
)
st.plotly_chart(tall_fig, height="content", theme="streamlit")
| {
"repo_id": "streamlit/streamlit",
"file_path": "e2e_playwright/st_plotly_chart_dimensions.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.