id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
178,330 | from __future__ import annotations
import collections
import dataclasses
import datetime
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import threading
import uuid
import weakref
from enum import Enum
from typing import Any, Callable, Dict, Final, Pattern, Type, Union
from typing_extensions import TypeAlias
from streamlit import type_util, util
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching.cache_errors import UnhashableTypeError
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
def _float_to_bytes(f: float) -> bytes:
# Lazy-load for performance reasons.
import struct
# Floats are 64bit in Python, so we need to use the "d" format.
return struct.pack("<d", f) | null |
178,331 | from __future__ import annotations
import collections
import dataclasses
import datetime
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import threading
import uuid
import weakref
from enum import Enum
from typing import Any, Callable, Dict, Final, Pattern, Type, Union
from typing_extensions import TypeAlias
from streamlit import type_util, util
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching.cache_errors import UnhashableTypeError
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
class NoResult:
"""Placeholder class for return values when None is meaningful."""
pass
The provided code snippet includes necessary dependencies for implementing the `_key` function. Write a Python function `def _key(obj: Any | None) -> Any` to solve the following problem:
Return key for memoization.
Here is the function:
def _key(obj: Any | None) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or isinstance(obj, uuid.UUID)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if inspect.isbuiltin(obj) or inspect.isroutine(obj) or inspect.iscode(obj):
return id(obj)
return NoResult | Return key for memoization. |
178,332 | from __future__ import annotations
import math
import threading
import types
from datetime import timedelta
from typing import Any, Callable, Final, TypeVar, cast, overload
from cachetools import TTLCache
from typing_extensions import TypeAlias
import streamlit as st
from streamlit.deprecation_util import show_deprecation_warning
from streamlit.logger import get_logger
from streamlit.runtime.caching import cache_utils
from streamlit.runtime.caching.cache_errors import CacheKeyNotFoundError
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cache_utils import (
Cache,
CachedFuncInfo,
make_cached_func_wrapper,
ttl_to_seconds,
)
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
ElementMsgData,
MsgData,
MultiCacheResults,
)
from streamlit.runtime.caching.hashing import HashFuncsDict
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner.script_run_context import get_script_run_ctx
from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
ValidateFunc: TypeAlias = Callable[[Any], bool]
The provided code snippet includes necessary dependencies for implementing the `_equal_validate_funcs` function. Write a Python function `def _equal_validate_funcs(a: ValidateFunc | None, b: ValidateFunc | None) -> bool` to solve the following problem:
True if the two validate functions are equal for the purposes of determining whether a given function cache needs to be recreated.
Here is the function:
def _equal_validate_funcs(a: ValidateFunc | None, b: ValidateFunc | None) -> bool:
"""True if the two validate functions are equal for the purposes of
determining whether a given function cache needs to be recreated.
"""
# To "properly" test for function equality here, we'd need to compare function bytecode.
# For performance reasons, We've decided not to do that for now.
return (a is None and b is None) or (a is not None and b is not None) | True if the two validate functions are equal for the purposes of determining whether a given function cache needs to be recreated. |
178,333 | from __future__ import annotations
import math
import threading
import types
from datetime import timedelta
from typing import Any, Callable, Final, TypeVar, cast, overload
from cachetools import TTLCache
from typing_extensions import TypeAlias
import streamlit as st
from streamlit.deprecation_util import show_deprecation_warning
from streamlit.logger import get_logger
from streamlit.runtime.caching import cache_utils
from streamlit.runtime.caching.cache_errors import CacheKeyNotFoundError
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cache_utils import (
Cache,
CachedFuncInfo,
make_cached_func_wrapper,
ttl_to_seconds,
)
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
ElementMsgData,
MsgData,
MultiCacheResults,
)
from streamlit.runtime.caching.hashing import HashFuncsDict
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner.script_run_context import get_script_run_ctx
from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
_resource_caches = ResourceCaches()
class CacheStatsProvider(Protocol):
def get_stats(self) -> list[CacheStat]:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `get_resource_cache_stats_provider` function. Write a Python function `def get_resource_cache_stats_provider() -> CacheStatsProvider` to solve the following problem:
Return the StatsProvider for all @st.cache_resource functions.
Here is the function:
def get_resource_cache_stats_provider() -> CacheStatsProvider:
"""Return the StatsProvider for all @st.cache_resource functions."""
return _resource_caches | Return the StatsProvider for all @st.cache_resource functions. |
178,334 | from __future__ import annotations
import math
import os
import shutil
from typing import Final
from streamlit import util
from streamlit.file_util import get_streamlit_file_path, streamlit_read, streamlit_write
from streamlit.logger import get_logger
from streamlit.runtime.caching.storage.cache_storage_protocol import (
CacheStorage,
CacheStorageContext,
CacheStorageError,
CacheStorageKeyNotFoundError,
CacheStorageManager,
)
from streamlit.runtime.caching.storage.in_memory_cache_storage_wrapper import (
InMemoryCacheStorageWrapper,
)
_CACHE_DIR_NAME: Final = "cache"
def get_streamlit_file_path(*filepath) -> str:
"""Return the full path to a file in ~/.streamlit.
This doesn't guarantee that the file (or its directory) exists.
"""
# os.path.expanduser works on OSX, Linux and Windows
home = os.path.expanduser("~")
if home is None:
raise RuntimeError("No home directory.")
return os.path.join(home, CONFIG_FOLDER_NAME, *filepath)
def get_cache_folder_path() -> str:
return get_streamlit_file_path(_CACHE_DIR_NAME) | null |
178,335 | from __future__ import annotations
import contextlib
import hashlib
import threading
import types
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterator,
Protocol,
Union,
runtime_checkable,
)
from google.protobuf.message import Message
import streamlit as st
from streamlit import runtime, util
from streamlit.elements import NONWIDGET_ELEMENTS, WIDGETS
from streamlit.logger import get_logger
from streamlit.proto.Block_pb2 import Block
from streamlit.runtime.caching.cache_errors import (
CachedStFunctionWarning,
CacheReplayClosureError,
)
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.hashing import update_hash
from streamlit.runtime.scriptrunner.script_run_context import (
ScriptRunContext,
get_script_run_ctx,
)
from streamlit.runtime.state.common import WidgetMetadata
from streamlit.util import HASHLIB_KWARGS
class ElementMsgData:
"""An element's message and related metadata for
replaying that element's function call.
widget_metadata is filled in if and only if this element is a widget.
media_data is filled in iff this is a media element (image, audio, video).
"""
delta_type: str
message: Message
id_of_dg_called_on: str
returned_dgs_id: str
widget_metadata: WidgetMsgMetadata | None = None
media_data: list[MediaMsgData] | None = None
class BlockMsgData:
message: Block
id_of_dg_called_on: str
returned_dgs_id: str
class CachedResult:
"""The full results of calling a cache-decorated function, enough to
replay the st functions called while executing it.
"""
value: Any
messages: list[MsgData]
main_id: str
sidebar_id: str
class CacheReplayClosureError(StreamlitAPIException):
def __init__(
self,
cache_type: CacheType,
cached_func: types.FunctionType,
):
func_name = get_cached_func_name_md(cached_func)
decorator_name = get_decorator_api_name(cache_type)
msg = (
f"""
While running {func_name}, a streamlit element is called on some layout block created outside the function.
This is incompatible with replaying the cached effect of that element, because the
the referenced block might not exist when the replay happens.
How to fix this:
* Move the creation of $THING inside {func_name}.
* Move the call to the streamlit element outside of {func_name}.
* Remove the `@st.{decorator_name}` decorator from {func_name}.
"""
).strip("\n")
super().__init__(msg)
class CacheType(enum.Enum):
"""The function cache types we implement."""
DATA = "DATA"
RESOURCE = "RESOURCE"
def get_script_run_ctx(suppress_warning: bool = False) -> ScriptRunContext | None:
"""
Parameters
----------
suppress_warning : bool
If True, don't log a warning if there's no ScriptRunContext.
Returns
-------
ScriptRunContext | None
The current thread's ScriptRunContext, or None if it doesn't have one.
"""
thread = threading.current_thread()
ctx: ScriptRunContext | None = getattr(thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, None)
if ctx is None and runtime.exists() and not suppress_warning:
# Only warn about a missing ScriptRunContext if suppress_warning is False, and
# we were started via `streamlit run`. Otherwise, the user is likely running a
# script "bare", and doesn't need to be warned about streamlit
# bits that are irrelevant when not connected to a session.
_LOGGER.warning("Thread '%s': missing ScriptRunContext", thread.name)
return ctx
class DeltaGenerator(
AlertMixin,
BalloonsMixin,
BokehMixin,
ButtonMixin,
CameraInputMixin,
ChatMixin,
CheckboxMixin,
CodeMixin,
ColorPickerMixin,
EmptyMixin,
ExceptionMixin,
FileUploaderMixin,
FormMixin,
GraphvizMixin,
HeadingMixin,
HelpMixin,
IframeMixin,
ImageMixin,
LayoutsMixin,
MarkdownMixin,
MapMixin,
MediaMixin,
MetricMixin,
MultiSelectMixin,
NumberInputMixin,
PlotlyMixin,
ProgressMixin,
PydeckMixin,
PyplotMixin,
RadioMixin,
SelectboxMixin,
SelectSliderMixin,
SliderMixin,
SnowMixin,
JsonMixin,
TextMixin,
TextWidgetsMixin,
TimeWidgetsMixin,
ToastMixin,
WriteMixin,
ArrowMixin,
ArrowAltairMixin,
ArrowVegaLiteMixin,
DataEditorMixin,
):
"""Creator of Delta protobuf messages.
Parameters
----------
root_container: BlockPath_pb2.BlockPath.ContainerValue or None
The root container for this DeltaGenerator. If None, this is a null
DeltaGenerator which doesn't print to the app at all (useful for
testing).
cursor: cursor.Cursor or None
This is either:
- None: if this is the running DeltaGenerator for a top-level
container (MAIN or SIDEBAR)
- RunningCursor: if this is the running DeltaGenerator for a
non-top-level container (created with dg.container())
- LockedCursor: if this is a locked DeltaGenerator returned by some
other DeltaGenerator method. E.g. the dg returned in dg =
st.text("foo").
parent: DeltaGenerator
To support the `with dg` notation, DGs are arranged as a tree. Each DG
remembers its own parent, and the root of the tree is the main DG.
block_type: None or "vertical" or "horizontal" or "column" or "expandable"
If this is a block DG, we track its type to prevent nested columns/expanders
"""
# The pydoc below is for user consumption, so it doesn't talk about
# DeltaGenerator constructor parameters (which users should never use). For
# those, see above.
def __init__(
self,
root_container: int | None = RootContainer.MAIN,
cursor: Cursor | None = None,
parent: DeltaGenerator | None = None,
block_type: str | None = None,
) -> None:
"""Inserts or updates elements in Streamlit apps.
As a user, you should never initialize this object by hand. Instead,
DeltaGenerator objects are initialized for you in two places:
1) When you call `dg = st.foo()` for some method "foo", sometimes `dg`
is a DeltaGenerator object. You can call methods on the `dg` object to
update the element `foo` that appears in the Streamlit app.
2) This is an internal detail, but `st.sidebar` itself is a
DeltaGenerator. That's why you can call `st.sidebar.foo()` to place
an element `foo` inside the sidebar.
"""
# Sanity check our Container + Cursor, to ensure that our Cursor
# is using the same Container that we are.
if (
root_container is not None
and cursor is not None
and root_container != cursor.root_container
):
raise RuntimeError(
"DeltaGenerator root_container and cursor.root_container must be the same"
)
# Whether this DeltaGenerator is nested in the main area or sidebar.
# No relation to `st.container()`.
self._root_container = root_container
# NOTE: You should never use this directly! Instead, use self._cursor,
# which is a computed property that fetches the right cursor.
self._provided_cursor = cursor
self._parent = parent
self._block_type = block_type
# If this an `st.form` block, this will get filled in.
self._form_data: FormData | None = None
# Change the module of all mixin'ed functions to be st.delta_generator,
# instead of the original module (e.g. st.elements.markdown)
for mixin in self.__class__.__bases__:
for _, func in mixin.__dict__.items():
if callable(func):
func.__module__ = self.__module__
def __repr__(self) -> str:
return util.repr_(self)
def __enter__(self) -> None:
# with block started
dg_stack.set(dg_stack.get() + (self,))
def __exit__(
self,
type: Any,
value: Any,
traceback: Any,
) -> Literal[False]:
# with block ended
dg_stack.set(dg_stack.get()[:-1])
# Re-raise any exceptions
return False
def _active_dg(self) -> DeltaGenerator:
"""Return the DeltaGenerator that's currently 'active'.
If we are the main DeltaGenerator, and are inside a `with` block that
creates a container, our active_dg is that container. Otherwise,
our active_dg is self.
"""
if self == self._main_dg:
# We're being invoked via an `st.foo` pattern - use the current
# `with` dg (aka the top of the stack).
current_stack = dg_stack.get()
if len(current_stack) > 1:
return current_stack[-1]
# We're being invoked via an `st.sidebar.foo` pattern - ignore the
# current `with` dg.
return self
def _main_dg(self) -> DeltaGenerator:
"""Return this DeltaGenerator's root - that is, the top-level ancestor
DeltaGenerator that we belong to (this generally means the st._main
DeltaGenerator).
"""
return self._parent._main_dg if self._parent else self
def __getattr__(self, name: str) -> Callable[..., NoReturn]:
import streamlit as st
streamlit_methods = [
method_name for method_name in dir(st) if callable(getattr(st, method_name))
]
def wrapper(*args: Any, **kwargs: Any) -> NoReturn:
if name in streamlit_methods:
if self._root_container == RootContainer.SIDEBAR:
message = (
f"Method `{name}()` does not exist for "
f"`st.sidebar`. Did you mean `st.{name}()`?"
)
else:
message = (
f"Method `{name}()` does not exist for "
"`DeltaGenerator` objects. Did you mean "
"`st.{name}()`?"
)
else:
message = f"`{name}()` is not a valid Streamlit command."
raise StreamlitAPIException(message)
return wrapper
def __deepcopy__(self, _memo):
dg = DeltaGenerator(
root_container=self._root_container,
cursor=deepcopy(self._cursor),
parent=deepcopy(self._parent),
block_type=self._block_type,
)
dg._form_data = deepcopy(self._form_data)
return dg
def _parent_block_types(self) -> ParentBlockTypes:
"""Iterate all the block types used by this DeltaGenerator and all
its ancestor DeltaGenerators.
"""
current_dg: DeltaGenerator | None = self
while current_dg is not None:
if current_dg._block_type is not None:
yield current_dg._block_type
current_dg = current_dg._parent
def _count_num_of_parent_columns(self, parent_block_types: ParentBlockTypes) -> int:
return sum(1 for parent_block in parent_block_types if parent_block == "column")
def _cursor(self) -> Cursor | None:
"""Return our Cursor. This will be None if we're not running in a
ScriptThread - e.g., if we're running a "bare" script outside of
Streamlit.
"""
if self._provided_cursor is None:
return cursor.get_container_cursor(self._root_container)
else:
return self._provided_cursor
def _is_top_level(self) -> bool:
return self._provided_cursor is None
def id(self) -> str:
return str(id(self))
def _get_delta_path_str(self) -> str:
"""Returns the element's delta path as a string like "[0, 2, 3, 1]".
This uniquely identifies the element's position in the front-end,
which allows (among other potential uses) the MediaFileManager to maintain
session-specific maps of MediaFile objects placed with their "coordinates".
This way, users can (say) use st.image with a stream of different images,
and Streamlit will expire the older images and replace them in place.
"""
# Operate on the active DeltaGenerator, in case we're in a `with` block.
dg = self._active_dg
return str(dg._cursor.delta_path) if dg._cursor is not None else "[]"
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: None,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> DeltaGenerator:
...
def _enqueue( # type: ignore[misc]
self,
delta_type: str,
element_proto: Message,
return_value: type[NoValue],
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> None:
...
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: Value,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> Value:
...
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: None = None,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> DeltaGenerator:
...
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: type[NoValue] | Value | None = None,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> DeltaGenerator | Value | None:
...
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: type[NoValue] | Value | None = None,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> DeltaGenerator | Value | None:
"""Create NewElement delta, fill it, and enqueue it.
Parameters
----------
delta_type : str
The name of the streamlit method being called
element_proto : proto
The actual proto in the NewElement type e.g. Alert/Button/Slider
return_value : any or None
The value to return to the calling script (for widgets)
element_width : int or None
Desired width for the element
element_height : int or None
Desired height for the element
Returns
-------
DeltaGenerator or any
If this element is NOT an interactive widget, return a
DeltaGenerator that can be used to modify the newly-created
element. Otherwise, if the element IS a widget, return the
`return_value` parameter.
"""
# Operate on the active DeltaGenerator, in case we're in a `with` block.
dg = self._active_dg
# Warn if we're called from within a legacy @st.cache function
legacy_caching.maybe_show_cached_st_function_warning(dg, delta_type)
# Warn if we're called from within @st.memo or @st.singleton
caching.maybe_show_cached_st_function_warning(dg, delta_type)
# Warn if an element is being changed but the user isn't running the streamlit server.
_maybe_print_use_warning()
# Some elements have a method.__name__ != delta_type in proto.
# This really matters for line_chart, bar_chart & area_chart,
# since add_rows() relies on method.__name__ == delta_type
# TODO: Fix for all elements (or the cache warning above will be wrong)
proto_type = delta_type
if proto_type in ARROW_DELTA_TYPES_THAT_MELT_DATAFRAMES:
proto_type = "arrow_vega_lite_chart"
# Copy the marshalled proto into the overall msg proto
msg = ForwardMsg_pb2.ForwardMsg()
msg_el_proto = getattr(msg.delta.new_element, proto_type)
msg_el_proto.CopyFrom(element_proto)
# Only enqueue message and fill in metadata if there's a container.
msg_was_enqueued = False
if dg._root_container is not None and dg._cursor is not None:
msg.metadata.delta_path[:] = dg._cursor.delta_path
if element_width is not None:
msg.metadata.element_dimension_spec.width = element_width
if element_height is not None:
msg.metadata.element_dimension_spec.height = element_height
_enqueue_message(msg)
msg_was_enqueued = True
if msg_was_enqueued:
# Get a DeltaGenerator that is locked to the current element
# position.
new_cursor = (
dg._cursor.get_locked_cursor(
delta_type=delta_type, add_rows_metadata=add_rows_metadata
)
if dg._cursor is not None
else None
)
output_dg = DeltaGenerator(
root_container=dg._root_container,
cursor=new_cursor,
parent=dg,
)
else:
# If the message was not enqueued, just return self since it's a
# no-op from the point of view of the app.
output_dg = dg
# Save message for replay if we're called from within @st.memo or @st.singleton
caching.save_element_message(
delta_type,
element_proto,
invoked_dg_id=self.id,
used_dg_id=dg.id,
returned_dg_id=output_dg.id,
)
return _value_or_dg(return_value, output_dg)
def _block(
self,
block_proto: Block_pb2.Block = Block_pb2.Block(),
dg_type: type | None = None,
) -> DeltaGenerator:
# Operate on the active DeltaGenerator, in case we're in a `with` block.
dg = self._active_dg
# Prevent nested columns & expanders by checking all parents.
block_type = block_proto.WhichOneof("type")
# Convert the generator to a list, so we can use it multiple times.
parent_block_types = list(dg._parent_block_types)
if block_type == "column":
num_of_parent_columns = self._count_num_of_parent_columns(
parent_block_types
)
if (
self._root_container == RootContainer.SIDEBAR
and num_of_parent_columns > 0
):
raise StreamlitAPIException(
"Columns cannot be placed inside other columns in the sidebar. This is only possible in the main area of the app."
)
if num_of_parent_columns > 1:
raise StreamlitAPIException(
"Columns can only be placed inside other columns up to one level of nesting."
)
if block_type == "chat_message" and block_type in frozenset(parent_block_types):
raise StreamlitAPIException(
"Chat messages cannot nested inside other chat messages."
)
if block_type == "expandable" and block_type in frozenset(parent_block_types):
raise StreamlitAPIException(
"Expanders may not be nested inside other expanders."
)
if block_type == "popover" and block_type in frozenset(parent_block_types):
raise StreamlitAPIException(
"Popovers may not be nested inside other popovers."
)
if dg._root_container is None or dg._cursor is None:
return dg
msg = ForwardMsg_pb2.ForwardMsg()
msg.metadata.delta_path[:] = dg._cursor.delta_path
msg.delta.add_block.CopyFrom(block_proto)
# Normally we'd return a new DeltaGenerator that uses the locked cursor
# below. But in this case we want to return a DeltaGenerator that uses
# a brand new cursor for this new block we're creating.
block_cursor = cursor.RunningCursor(
root_container=dg._root_container,
parent_path=dg._cursor.parent_path + (dg._cursor.index,),
)
# `dg_type` param added for st.status container. It allows us to
# instantiate DeltaGenerator subclasses from the function.
if dg_type is None:
dg_type = DeltaGenerator
block_dg = cast(
DeltaGenerator,
dg_type(
root_container=dg._root_container,
cursor=block_cursor,
parent=dg,
block_type=block_type,
),
)
# Blocks inherit their parent form ids.
# NOTE: Container form ids aren't set in proto.
block_dg._form_data = FormData(current_form_id(dg))
# Must be called to increment this cursor's index.
dg._cursor.get_locked_cursor(add_rows_metadata=None)
_enqueue_message(msg)
caching.save_block_message(
block_proto,
invoked_dg_id=self.id,
used_dg_id=dg.id,
returned_dg_id=block_dg.id,
)
return block_dg
def _arrow_add_rows(
self: DG,
data: Data = None,
**kwargs: DataFrame
| npt.NDArray[Any]
| Iterable[Any]
| dict[Hashable, Any]
| None,
) -> DG | None:
"""Concatenate a dataframe to the bottom of the current one.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None
Table to concat. Optional.
**kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None
The named dataset to concat. Optional. You can only pass in 1
dataset (including the one in the data parameter).
Example
-------
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df1 = pd.DataFrame(
... np.random.randn(50, 20),
... columns=('col %d' % i for i in range(20)))
...
>>> my_table = st.table(df1)
>>>
>>> df2 = pd.DataFrame(
... np.random.randn(50, 20),
... columns=('col %d' % i for i in range(20)))
...
>>> my_table._arrow_add_rows(df2)
>>> # Now the table shown in the Streamlit app contains the data for
>>> # df1 followed by the data for df2.
You can do the same thing with plots. For example, if you want to add
more data to a line chart:
>>> # Assuming df1 and df2 from the example above still exist...
>>> my_chart = st.line_chart(df1)
>>> my_chart._arrow_add_rows(df2)
>>> # Now the chart shown in the Streamlit app contains the data for
>>> # df1 followed by the data for df2.
And for plots whose datasets are named, you can pass the data with a
keyword argument where the key is the name:
>>> my_chart = st._arrow_vega_lite_chart({
... 'mark': 'line',
... 'encoding': {'x': 'a', 'y': 'b'},
... 'datasets': {
... 'some_fancy_name': df1, # <-- named dataset
... },
... 'data': {'name': 'some_fancy_name'},
... }),
>>> my_chart._arrow_add_rows(some_fancy_name=df2) # <-- name used as keyword
"""
if self._root_container is None or self._cursor is None:
return self
if not self._cursor.is_locked:
raise StreamlitAPIException("Only existing elements can `add_rows`.")
# Accept syntax st._arrow_add_rows(df).
if data is not None and len(kwargs) == 0:
name = ""
# Accept syntax st._arrow_add_rows(foo=df).
elif len(kwargs) == 1:
name, data = kwargs.popitem()
# Raise error otherwise.
else:
raise StreamlitAPIException(
"Wrong number of arguments to add_rows()."
"Command requires exactly one dataset"
)
# When doing _arrow_add_rows on an element that does not already have data
# (for example, st.line_chart() without any args), call the original
# st._arrow_foo() element with new data instead of doing a _arrow_add_rows().
if (
self._cursor.props["delta_type"] in ARROW_DELTA_TYPES_THAT_MELT_DATAFRAMES
and self._cursor.props["add_rows_metadata"].last_index is None
):
# IMPORTANT: This assumes delta types and st method names always
# match!
# delta_type starts with "arrow_", but st_method_name doesn't use this prefix.
st_method_name = self._cursor.props["delta_type"].replace("arrow_", "")
st_method = getattr(self, st_method_name)
st_method(data, **kwargs)
return None
new_data, self._cursor.props["add_rows_metadata"] = _prep_data_for_add_rows(
data,
self._cursor.props["delta_type"],
self._cursor.props["add_rows_metadata"],
)
msg = ForwardMsg_pb2.ForwardMsg()
msg.metadata.delta_path[:] = self._cursor.delta_path
import streamlit.elements.arrow as arrow_proto
default_uuid = str(hash(self._get_delta_path_str()))
arrow_proto.marshall(msg.delta.arrow_add_rows.data, new_data, default_uuid)
if name:
msg.delta.arrow_add_rows.name = name
msg.delta.arrow_add_rows.has_name = True
_enqueue_message(msg)
return self
def register_widget_from_metadata(
metadata: WidgetMetadata[T],
ctx: ScriptRunContext | None,
widget_func_name: str | None,
element_type: ElementType,
) -> RegisterWidgetResult[T]:
"""Register a widget and return its value, using an already constructed
`WidgetMetadata`.
This is split out from `register_widget` to allow caching code to replay
widgets by saving and reusing the completed metadata.
See `register_widget` for details on what this returns.
"""
# Local import to avoid import cycle
import streamlit.runtime.caching as caching
if ctx is None:
# Early-out if we don't have a script run context (which probably means
# we're running as a "bare" Python script, and not via `streamlit run`).
return RegisterWidgetResult.failure(deserializer=metadata.deserializer)
widget_id = metadata.id
user_key = user_key_from_widget_id(widget_id)
# Ensure another widget with the same user key hasn't already been registered.
if user_key is not None:
if user_key not in ctx.widget_user_keys_this_run:
ctx.widget_user_keys_this_run.add(user_key)
else:
raise DuplicateWidgetID(
_build_duplicate_widget_message(
widget_func_name if widget_func_name is not None else element_type,
user_key,
)
)
# Ensure another widget with the same id hasn't already been registered.
new_widget = widget_id not in ctx.widget_ids_this_run
if new_widget:
ctx.widget_ids_this_run.add(widget_id)
else:
raise DuplicateWidgetID(
_build_duplicate_widget_message(
widget_func_name if widget_func_name is not None else element_type,
user_key,
)
)
# Save the widget metadata for cached result replay
caching.save_widget_metadata(metadata)
return ctx.session_state.register_widget(metadata, user_key)
The provided code snippet includes necessary dependencies for implementing the `replay_cached_messages` function. Write a Python function `def replay_cached_messages( result: CachedResult, cache_type: CacheType, cached_func: types.FunctionType ) -> None` to solve the following problem:
Replay the st element function calls that happened when executing a cache-decorated function. When a cache function is executed, we record the element and block messages produced, and use those to reproduce the DeltaGenerator calls, so the elements will appear in the web app even when execution of the function is skipped because the result was cached. To make this work, for each st function call we record an identifier for the DG it was effectively called on (see Note [DeltaGenerator method invocation]). We also record the identifier for each DG returned by an st function call, if it returns one. Then, for each recorded message, we get the current DG instance corresponding to the DG the message was originally called on, and enqueue the message using that, recording any new DGs produced in case a later st function call is on one of them.
Here is the function:
def replay_cached_messages(
result: CachedResult, cache_type: CacheType, cached_func: types.FunctionType
) -> None:
"""Replay the st element function calls that happened when executing a
cache-decorated function.
When a cache function is executed, we record the element and block messages
produced, and use those to reproduce the DeltaGenerator calls, so the elements
will appear in the web app even when execution of the function is skipped
because the result was cached.
To make this work, for each st function call we record an identifier for the
DG it was effectively called on (see Note [DeltaGenerator method invocation]).
We also record the identifier for each DG returned by an st function call, if
it returns one. Then, for each recorded message, we get the current DG instance
corresponding to the DG the message was originally called on, and enqueue the
message using that, recording any new DGs produced in case a later st function
call is on one of them.
"""
from streamlit.delta_generator import DeltaGenerator
from streamlit.runtime.state.widgets import register_widget_from_metadata
# Maps originally recorded dg ids to this script run's version of that dg
returned_dgs: dict[str, DeltaGenerator] = {}
returned_dgs[result.main_id] = st._main
returned_dgs[result.sidebar_id] = st.sidebar
ctx = get_script_run_ctx()
try:
for msg in result.messages:
if isinstance(msg, ElementMsgData):
if msg.widget_metadata is not None:
register_widget_from_metadata(
msg.widget_metadata.metadata,
ctx,
None,
msg.delta_type,
)
if msg.media_data is not None:
for data in msg.media_data:
runtime.get_instance().media_file_mgr.add(
data.media, data.mimetype, data.media_id
)
dg = returned_dgs[msg.id_of_dg_called_on]
maybe_dg = dg._enqueue(msg.delta_type, msg.message)
if isinstance(maybe_dg, DeltaGenerator):
returned_dgs[msg.returned_dgs_id] = maybe_dg
elif isinstance(msg, BlockMsgData):
dg = returned_dgs[msg.id_of_dg_called_on]
new_dg = dg._block(msg.message)
returned_dgs[msg.returned_dgs_id] = new_dg
except KeyError:
raise CacheReplayClosureError(cache_type, cached_func) | Replay the st element function calls that happened when executing a cache-decorated function. When a cache function is executed, we record the element and block messages produced, and use those to reproduce the DeltaGenerator calls, so the elements will appear in the web app even when execution of the function is skipped because the result was cached. To make this work, for each st function call we record an identifier for the DG it was effectively called on (see Note [DeltaGenerator method invocation]). We also record the identifier for each DG returned by an st function call, if it returns one. Then, for each recorded message, we get the current DG instance corresponding to the DG the message was originally called on, and enqueue the message using that, recording any new DGs produced in case a later st function call is on one of them. |
178,336 | from __future__ import annotations
import contextlib
import hashlib
import threading
import types
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterator,
Protocol,
Union,
runtime_checkable,
)
from google.protobuf.message import Message
import streamlit as st
from streamlit import runtime, util
from streamlit.elements import NONWIDGET_ELEMENTS, WIDGETS
from streamlit.logger import get_logger
from streamlit.proto.Block_pb2 import Block
from streamlit.runtime.caching.cache_errors import (
CachedStFunctionWarning,
CacheReplayClosureError,
)
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.hashing import update_hash
from streamlit.runtime.scriptrunner.script_run_context import (
ScriptRunContext,
get_script_run_ctx,
)
from streamlit.runtime.state.common import WidgetMetadata
from streamlit.util import HASHLIB_KWARGS
class CacheType(enum.Enum):
"""The function cache types we implement."""
DATA = "DATA"
RESOURCE = "RESOURCE"
def update_hash(
val: Any,
hasher,
cache_type: CacheType,
hash_source: Callable[..., Any] | None = None,
hash_funcs: HashFuncsDict | None = None,
) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
hash_stacks.current.hash_source = hash_source
ch = _CacheFuncHasher(cache_type, hash_funcs)
ch.update(hasher, val)
HASHLIB_KWARGS: dict[str, Any] = (
{"usedforsecurity": False} if sys.version_info >= (3, 9) else {}
)
The provided code snippet includes necessary dependencies for implementing the `_make_widget_key` function. Write a Python function `def _make_widget_key(widgets: list[tuple[str, Any]], cache_type: CacheType) -> str` to solve the following problem:
Generate a key for the given list of widgets used in a cache-decorated function. Keys are generated by hashing the IDs and values of the widgets in the given list.
Here is the function:
def _make_widget_key(widgets: list[tuple[str, Any]], cache_type: CacheType) -> str:
"""Generate a key for the given list of widgets used in a cache-decorated function.
Keys are generated by hashing the IDs and values of the widgets in the given list.
"""
func_hasher = hashlib.new("md5", **HASHLIB_KWARGS)
for widget_id_val in widgets:
update_hash(widget_id_val, func_hasher, cache_type)
return func_hasher.hexdigest() | Generate a key for the given list of widgets used in a cache-decorated function. Keys are generated by hashing the IDs and values of the widgets in the given list. |
178,337 | from __future__ import annotations
from typing import Any, Final, Iterator, MutableMapping
from streamlit import logger as _logger
from streamlit import runtime
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.state.common import require_valid_user_key
from streamlit.runtime.state.safe_session_state import SafeSessionState
from streamlit.runtime.state.session_state import SessionState
from streamlit.type_util import Key
_LOGGER: Final = _logger.get_logger(__name__)
_state_use_warning_already_displayed: bool = False
class SafeSessionState:
"""Thread-safe wrapper around SessionState.
When AppSession gets a re-run request, it can interrupt its existing
ScriptRunner and spin up a new ScriptRunner to handle the request.
When this happens, the existing ScriptRunner will continue executing
its script until it reaches a yield point - but during this time, it
must not mutate its SessionState.
"""
_state: SessionState
_lock: threading.RLock
_yield_callback: Callable[[], None]
def __init__(self, state: SessionState, yield_callback: Callable[[], None]):
# Fields must be set using the object's setattr method to avoid
# infinite recursion from trying to look up the fields we're setting.
object.__setattr__(self, "_state", state)
# TODO: we'd prefer this be a threading.Lock instead of RLock -
# but `call_callbacks` first needs to be rewritten.
object.__setattr__(self, "_lock", threading.RLock())
object.__setattr__(self, "_yield_callback", yield_callback)
def register_widget(
self, metadata: WidgetMetadata[T], user_key: str | None
) -> RegisterWidgetResult[T]:
self._yield_callback()
with self._lock:
return self._state.register_widget(metadata, user_key)
def on_script_will_rerun(self, latest_widget_states: WidgetStatesProto) -> None:
self._yield_callback()
with self._lock:
# TODO: rewrite this to copy the callbacks list into a local
# variable so that we don't need to hold our lock for the
# duration. (This will also allow us to downgrade our RLock
# to a Lock.)
self._state.on_script_will_rerun(latest_widget_states)
def on_script_finished(self, widget_ids_this_run: set[str]) -> None:
with self._lock:
self._state.on_script_finished(widget_ids_this_run)
def maybe_check_serializable(self) -> None:
with self._lock:
self._state.maybe_check_serializable()
def get_widget_states(self) -> list[WidgetStateProto]:
"""Return a list of serialized widget values for each widget with a value."""
with self._lock:
return self._state.get_widget_states()
def is_new_state_value(self, user_key: str) -> bool:
with self._lock:
return self._state.is_new_state_value(user_key)
def filtered_state(self) -> dict[str, Any]:
"""The combined session and widget state, excluding keyless widgets."""
with self._lock:
return self._state.filtered_state
def __getitem__(self, key: str) -> Any:
self._yield_callback()
with self._lock:
return self._state[key]
def __setitem__(self, key: str, value: Any) -> None:
self._yield_callback()
with self._lock:
self._state[key] = value
def __delitem__(self, key: str) -> None:
self._yield_callback()
with self._lock:
del self._state[key]
def __contains__(self, key: str) -> bool:
self._yield_callback()
with self._lock:
return key in self._state
def __getattr__(self, key: str) -> Any:
try:
return self[key]
except KeyError:
raise AttributeError(f"{key} not found in session_state.")
def __setattr__(self, key: str, value: Any) -> None:
self[key] = value
def __delattr__(self, key: str) -> None:
try:
del self[key]
except KeyError:
raise AttributeError(f"{key} not found in session_state.")
def __repr__(self):
"""Presents itself as a simple dict of the underlying SessionState instance"""
kv = ((k, self._state[k]) for k in self._state._keys())
s = ", ".join(f"{k}: {v!r}" for k, v in kv)
return f"{{{s}}}"
def query_params(self) -> Iterator[QueryParams]:
self._yield_callback()
with self._lock:
yield self._state.query_params
class SessionState:
"""SessionState allows users to store values that persist between app
reruns.
Example
-------
>>> if "num_script_runs" not in st.session_state:
... st.session_state.num_script_runs = 0
>>> st.session_state.num_script_runs += 1
>>> st.write(st.session_state.num_script_runs) # writes 1
The next time your script runs, the value of
st.session_state.num_script_runs will be preserved.
>>> st.session_state.num_script_runs += 1
>>> st.write(st.session_state.num_script_runs) # writes 2
"""
# All the values from previous script runs, squished together to save memory
_old_state: dict[str, Any] = field(default_factory=dict)
# Values set in session state during the current script run, possibly for
# setting a widget's value. Keyed by a user provided string.
_new_session_state: dict[str, Any] = field(default_factory=dict)
# Widget values from the frontend, usually one changing prompted the script rerun
_new_widget_state: WStates = field(default_factory=WStates)
# Keys used for widgets will be eagerly converted to the matching widget id
_key_id_mapping: dict[str, str] = field(default_factory=dict)
# query params are stored in session state because query params will be tied with widget state at one point.
query_params: QueryParams = field(default_factory=QueryParams)
def __repr__(self):
return util.repr_(self)
# is it possible for a value to get through this without being deserialized?
def _compact_state(self) -> None:
"""Copy all current session_state and widget_state values into our
_old_state dict, and then clear our current session_state and
widget_state.
"""
for key_or_wid in self:
try:
self._old_state[key_or_wid] = self[key_or_wid]
except KeyError:
# handle key errors from widget state not having metadata gracefully
# https://github.com/streamlit/streamlit/issues/7206
pass
self._new_session_state.clear()
self._new_widget_state.clear()
def clear(self) -> None:
"""Reset self completely, clearing all current and old values."""
self._old_state.clear()
self._new_session_state.clear()
self._new_widget_state.clear()
self._key_id_mapping.clear()
def filtered_state(self) -> dict[str, Any]:
"""The combined session and widget state, excluding keyless widgets."""
wid_key_map = self._reverse_key_wid_map
state: dict[str, Any] = {}
# We can't write `for k, v in self.items()` here because doing so will
# run into a `KeyError` if widget metadata has been cleared (which
# happens when the streamlit server restarted or the cache was cleared),
# then we receive a widget's state from a browser.
for k in self._keys():
if not is_widget_id(k) and not _is_internal_key(k):
state[k] = self[k]
elif is_keyed_widget_id(k):
try:
key = wid_key_map[k]
state[key] = self[k]
except KeyError:
# Widget id no longer maps to a key, it is a not yet
# cleared value in old state for a reset widget
pass
return state
def _reverse_key_wid_map(self) -> dict[str, str]:
"""Return a mapping of widget_id : widget_key."""
wid_key_map = {v: k for k, v in self._key_id_mapping.items()}
return wid_key_map
def _keys(self) -> set[str]:
"""All keys active in Session State, with widget keys converted
to widget ids when one is known. (This includes autogenerated keys
for widgets that don't have user_keys defined, and which aren't
exposed to user code.)
"""
old_keys = {self._get_widget_id(k) for k in self._old_state.keys()}
new_widget_keys = set(self._new_widget_state.keys())
new_session_state_keys = {
self._get_widget_id(k) for k in self._new_session_state.keys()
}
return old_keys | new_widget_keys | new_session_state_keys
def is_new_state_value(self, user_key: str) -> bool:
"""True if a value with the given key is in the current session state."""
return user_key in self._new_session_state
def __iter__(self) -> Iterator[Any]:
"""Return an iterator over the keys of the SessionState.
This is a shortcut for `iter(self.keys())`
"""
return iter(self._keys())
def __len__(self) -> int:
"""Return the number of items in SessionState."""
return len(self._keys())
def __getitem__(self, key: str) -> Any:
wid_key_map = self._reverse_key_wid_map
widget_id = self._get_widget_id(key)
if widget_id in wid_key_map and widget_id == key:
# the "key" is a raw widget id, so get its associated user key for lookup
key = wid_key_map[widget_id]
try:
return self._getitem(widget_id, key)
except KeyError:
raise KeyError(_missing_key_error_message(key))
def _getitem(self, widget_id: str | None, user_key: str | None) -> Any:
"""Get the value of an entry in Session State, using either the
user-provided key or a widget id as appropriate for the internal dict
being accessed.
At least one of the arguments must have a value.
"""
assert user_key is not None or widget_id is not None
if user_key is not None:
try:
return self._new_session_state[user_key]
except KeyError:
pass
if widget_id is not None:
try:
return self._new_widget_state[widget_id]
except KeyError:
pass
# Typically, there won't be both a widget id and an associated state key in
# old state at the same time, so the order we check is arbitrary.
# The exception is if session state is set and then a later run has
# a widget created, so the widget id entry should be newer.
# The opposite case shouldn't happen, because setting the value of a widget
# through session state will result in the next widget state reflecting that
# value.
if widget_id is not None:
try:
return self._old_state[widget_id]
except KeyError:
pass
if user_key is not None:
try:
return self._old_state[user_key]
except KeyError:
pass
# We'll never get here
raise KeyError
def __setitem__(self, user_key: str, value: Any) -> None:
"""Set the value of the session_state entry with the given user_key.
If the key corresponds to a widget or form that's been instantiated
during the current script run, raise a StreamlitAPIException instead.
"""
from streamlit.runtime.scriptrunner import get_script_run_ctx
ctx = get_script_run_ctx()
if ctx is not None:
widget_id = self._key_id_mapping.get(user_key, None)
widget_ids = ctx.widget_ids_this_run
form_ids = ctx.form_ids_this_run
if widget_id in widget_ids or user_key in form_ids:
raise StreamlitAPIException(
f"`st.session_state.{user_key}` cannot be modified after the widget"
f" with key `{user_key}` is instantiated."
)
self._new_session_state[user_key] = value
def __delitem__(self, key: str) -> None:
widget_id = self._get_widget_id(key)
if not (key in self or widget_id in self):
raise KeyError(_missing_key_error_message(key))
if key in self._new_session_state:
del self._new_session_state[key]
if key in self._old_state:
del self._old_state[key]
if key in self._key_id_mapping:
del self._key_id_mapping[key]
if widget_id in self._new_widget_state:
del self._new_widget_state[widget_id]
if widget_id in self._old_state:
del self._old_state[widget_id]
def set_widgets_from_proto(self, widget_states: WidgetStatesProto) -> None:
"""Set the value of all widgets represented in the given WidgetStatesProto."""
for state in widget_states.widgets:
self._new_widget_state.set_widget_from_proto(state)
def on_script_will_rerun(self, latest_widget_states: WidgetStatesProto) -> None:
"""Called by ScriptRunner before its script re-runs.
Update widget data and call callbacks on widgets whose value changed
between the previous and current script runs.
"""
# Clear any triggers that weren't reset because the script was disconnected
self._reset_triggers()
self._compact_state()
self.set_widgets_from_proto(latest_widget_states)
self._call_callbacks()
def _call_callbacks(self) -> None:
"""Call any callback associated with each widget whose value
changed between the previous and current script runs.
"""
from streamlit.runtime.scriptrunner import RerunException
changed_widget_ids = [
wid for wid in self._new_widget_state if self._widget_changed(wid)
]
for wid in changed_widget_ids:
try:
self._new_widget_state.call_callback(wid)
except RerunException:
st.warning("Calling st.rerun() within a callback is a no-op.")
def _widget_changed(self, widget_id: str) -> bool:
"""True if the given widget's value changed between the previous
script run and the current script run.
"""
new_value = self._new_widget_state.get(widget_id)
old_value = self._old_state.get(widget_id)
changed: bool = new_value != old_value
return changed
def on_script_finished(self, widget_ids_this_run: set[str]) -> None:
"""Called by ScriptRunner after its script finishes running.
Updates widgets to prepare for the next script run.
Parameters
----------
widget_ids_this_run: set[str]
The IDs of the widgets that were accessed during the script
run. Any widget state whose ID does *not* appear in this set
is considered "stale" and will be removed.
"""
self._reset_triggers()
self._remove_stale_widgets(widget_ids_this_run)
def _reset_triggers(self) -> None:
"""Set all trigger values in our state dictionary to False."""
for state_id in self._new_widget_state:
metadata = self._new_widget_state.widget_metadata.get(state_id)
if metadata is not None:
if metadata.value_type == "trigger_value":
self._new_widget_state[state_id] = Value(False)
elif metadata.value_type == "string_trigger_value":
self._new_widget_state[state_id] = Value(None)
for state_id in self._old_state:
metadata = self._new_widget_state.widget_metadata.get(state_id)
if metadata is not None:
if metadata.value_type == "trigger_value":
self._old_state[state_id] = False
elif metadata.value_type == "string_trigger_value":
self._old_state[state_id] = None
def _remove_stale_widgets(self, active_widget_ids: set[str]) -> None:
"""Remove widget state for widgets whose ids aren't in `active_widget_ids`."""
self._new_widget_state.remove_stale_widgets(active_widget_ids)
# Remove entries from _old_state corresponding to
# widgets not in widget_ids.
self._old_state = {
k: v
for k, v in self._old_state.items()
if (k in active_widget_ids or not is_widget_id(k))
}
def _set_widget_metadata(self, widget_metadata: WidgetMetadata[Any]) -> None:
"""Set a widget's metadata."""
widget_id = widget_metadata.id
self._new_widget_state.widget_metadata[widget_id] = widget_metadata
def get_widget_states(self) -> list[WidgetStateProto]:
"""Return a list of serialized widget values for each widget with a value."""
return self._new_widget_state.as_widget_states()
def _get_widget_id(self, k: str) -> str:
"""Turns a value that might be a widget id or a user provided key into
an appropriate widget id.
"""
return self._key_id_mapping.get(k, k)
def _set_key_widget_mapping(self, widget_id: str, user_key: str) -> None:
self._key_id_mapping[user_key] = widget_id
def register_widget(
self, metadata: WidgetMetadata[T], user_key: str | None
) -> RegisterWidgetResult[T]:
"""Register a widget with the SessionState.
Returns
-------
RegisterWidgetResult[T]
Contains the widget's current value, and a bool that will be True
if the frontend needs to be updated with the current value.
"""
widget_id = metadata.id
self._set_widget_metadata(metadata)
if user_key is not None:
# If the widget has a user_key, update its user_key:widget_id mapping
self._set_key_widget_mapping(widget_id, user_key)
if widget_id not in self and (user_key is None or user_key not in self):
# This is the first time the widget is registered, so we save its
# value in widget state.
deserializer = metadata.deserializer
initial_widget_value = deepcopy(deserializer(None, metadata.id))
self._new_widget_state.set_from_value(widget_id, initial_widget_value)
# Get the current value of the widget for use as its return value.
# We return a copy, so that reference types can't be accidentally
# mutated by user code.
widget_value = cast(T, self[widget_id])
widget_value = deepcopy(widget_value)
# widget_value_changed indicates to the caller that the widget's
# current value is different from what is in the frontend.
widget_value_changed = user_key is not None and self.is_new_state_value(
user_key
)
return RegisterWidgetResult(widget_value, widget_value_changed)
def __contains__(self, key: str) -> bool:
try:
self[key]
except KeyError:
return False
else:
return True
def get_stats(self) -> list[CacheStat]:
# Lazy-load vendored package to prevent import of numpy
from streamlit.vendor.pympler.asizeof import asizeof
stat = CacheStat("st_session_state", "", asizeof(self))
return [stat]
def _check_serializable(self) -> None:
"""Verify that everything added to session state can be serialized.
We use pickleability as the metric for serializability, and test for
pickleability by just trying it.
"""
for k in self:
try:
pickle.dumps(self[k])
except Exception as e:
err_msg = f"""Cannot serialize the value (of type `{type(self[k])}`) of '{k}' in st.session_state.
Streamlit has been configured to use [pickle](https://docs.python.org/3/library/pickle.html) to
serialize session_state values. Please convert the value to a pickle-serializable type. To learn
more about this behavior, see [our docs](https://docs.streamlit.io/knowledge-base/using-streamlit/serializable-session-state). """
raise UnserializableSessionStateError(err_msg) from e
def maybe_check_serializable(self) -> None:
"""Verify that session state can be serialized, if the relevant config
option is set.
See `_check_serializable` for details."""
if config.get_option("runner.enforceSerializableSessionState"):
self._check_serializable()
The provided code snippet includes necessary dependencies for implementing the `get_session_state` function. Write a Python function `def get_session_state() -> SafeSessionState` to solve the following problem:
Get the SessionState object for the current session. Note that in streamlit scripts, this function should not be called directly. Instead, SessionState objects should be accessed via st.session_state.
Here is the function:
def get_session_state() -> SafeSessionState:
"""Get the SessionState object for the current session.
Note that in streamlit scripts, this function should not be called
directly. Instead, SessionState objects should be accessed via
st.session_state.
"""
global _state_use_warning_already_displayed
from streamlit.runtime.scriptrunner import get_script_run_ctx
ctx = get_script_run_ctx()
# If there is no script run context because the script is run bare, have
# session state act as an always empty dictionary, and print a warning.
if ctx is None:
if not _state_use_warning_already_displayed:
_state_use_warning_already_displayed = True
if not runtime.exists():
_LOGGER.warning(
"Session state does not function when running a script without `streamlit run`"
)
return SafeSessionState(SessionState(), lambda: None)
return ctx.session_state | Get the SessionState object for the current session. Note that in streamlit scripts, this function should not be called directly. Instead, SessionState objects should be accessed via st.session_state. |
178,338 | from __future__ import annotations
from typing import Any, Final, Iterator, MutableMapping
from streamlit import logger as _logger
from streamlit import runtime
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.state.common import require_valid_user_key
from streamlit.runtime.state.safe_session_state import SafeSessionState
from streamlit.runtime.state.session_state import SessionState
from streamlit.type_util import Key
def _missing_attr_error_message(attr_name: str) -> str:
return (
f'st.session_state has no attribute "{attr_name}". Did you forget to initialize it? '
f"More info: https://docs.streamlit.io/library/advanced-features/session-state#initialization"
) | null |
178,339 | from __future__ import annotations
import json
import pickle
from copy import deepcopy
from dataclasses import dataclass, field, replace
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterator,
KeysView,
List,
MutableMapping,
Union,
cast,
)
from typing_extensions import TypeAlias
import streamlit as st
from streamlit import config, util
from streamlit.errors import StreamlitAPIException, UnserializableSessionStateError
from streamlit.proto.WidgetStates_pb2 import WidgetState as WidgetStateProto
from streamlit.proto.WidgetStates_pb2 import WidgetStates as WidgetStatesProto
from streamlit.runtime.state.common import (
RegisterWidgetResult,
T,
WidgetMetadata,
is_keyed_widget_id,
is_widget_id,
)
from streamlit.runtime.state.query_params import QueryParams
from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
from streamlit.type_util import ValueFieldName, is_array_value_field_name
def _missing_key_error_message(key: str) -> str:
return (
f'st.session_state has no key "{key}". Did you forget to initialize it? '
f"More info: https://docs.streamlit.io/library/advanced-features/session-state#initialization"
) | null |
178,340 | from __future__ import annotations
import json
import pickle
from copy import deepcopy
from dataclasses import dataclass, field, replace
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterator,
KeysView,
List,
MutableMapping,
Union,
cast,
)
from typing_extensions import TypeAlias
import streamlit as st
from streamlit import config, util
from streamlit.errors import StreamlitAPIException, UnserializableSessionStateError
from streamlit.proto.WidgetStates_pb2 import WidgetState as WidgetStateProto
from streamlit.proto.WidgetStates_pb2 import WidgetStates as WidgetStatesProto
from streamlit.runtime.state.common import (
RegisterWidgetResult,
T,
WidgetMetadata,
is_keyed_widget_id,
is_widget_id,
)
from streamlit.runtime.state.query_params import QueryParams
from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
from streamlit.type_util import ValueFieldName, is_array_value_field_name
STREAMLIT_INTERNAL_KEY_PREFIX: Final = "$$STREAMLIT_INTERNAL_KEY"
def _is_internal_key(key: str) -> bool:
return key.startswith(STREAMLIT_INTERNAL_KEY_PREFIX) | null |
178,341 | from __future__ import annotations
import textwrap
from types import MappingProxyType
from typing import TYPE_CHECKING, Final, Mapping
from typing_extensions import TypeAlias
from streamlit.errors import DuplicateWidgetID
from streamlit.proto.WidgetStates_pb2 import WidgetState, WidgetStates
from streamlit.runtime.state.common import (
RegisterWidgetResult,
T,
WidgetArgs,
WidgetCallback,
WidgetDeserializer,
WidgetKwargs,
WidgetMetadata,
WidgetProto,
WidgetSerializer,
user_key_from_widget_id,
)
from streamlit.type_util import ValueFieldName
The provided code snippet includes necessary dependencies for implementing the `coalesce_widget_states` function. Write a Python function `def coalesce_widget_states( old_states: WidgetStates, new_states: WidgetStates ) -> WidgetStates` to solve the following problem:
Coalesce an older WidgetStates into a newer one, and return a new WidgetStates containing the result. For most widget values, we just take the latest version. However, any trigger_values (which are set by buttons) that are True in `old_states` will be set to True in the coalesced result, so that button presses don't go missing.
Here is the function:
def coalesce_widget_states(
old_states: WidgetStates, new_states: WidgetStates
) -> WidgetStates:
"""Coalesce an older WidgetStates into a newer one, and return a new
WidgetStates containing the result.
For most widget values, we just take the latest version.
However, any trigger_values (which are set by buttons) that are True in
`old_states` will be set to True in the coalesced result, so that button
presses don't go missing.
"""
states_by_id: dict[str, WidgetState] = {
wstate.id: wstate for wstate in new_states.widgets
}
trigger_value_types = [("trigger_value", False), ("string_trigger_value", None)]
for old_state in old_states.widgets:
for trigger_value_type, unset_value in trigger_value_types:
if (
old_state.WhichOneof("value") == trigger_value_type
and old_state.trigger_value != unset_value
):
# Ensure the corresponding new_state is also a trigger;
# otherwise, a widget that was previously a button but no longer is
# could get a bad value.
new_trigger_val = states_by_id.get(old_state.id)
if (
new_trigger_val
and new_trigger_val.WhichOneof("value") == trigger_value_type
):
states_by_id[old_state.id] = old_state
coalesced = WidgetStates()
coalesced.widgets.extend(states_by_id.values())
return coalesced | Coalesce an older WidgetStates into a newer one, and return a new WidgetStates containing the result. For most widget values, we just take the latest version. However, any trigger_values (which are set by buttons) that are True in `old_states` will be set to True in the coalesced result, so that button presses don't go missing. |
178,342 | from __future__ import annotations
import hashlib
from dataclasses import dataclass, field
from datetime import date, datetime, time, timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Final,
Generic,
Sequence,
Tuple,
TypeVar,
Union,
)
from google.protobuf.message import Message
from typing_extensions import TypeAlias
from streamlit import config, util
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow
from streamlit.proto.Button_pb2 import Button
from streamlit.proto.CameraInput_pb2 import CameraInput
from streamlit.proto.ChatInput_pb2 import ChatInput
from streamlit.proto.Checkbox_pb2 import Checkbox
from streamlit.proto.ColorPicker_pb2 import ColorPicker
from streamlit.proto.Components_pb2 import ComponentInstance
from streamlit.proto.DateInput_pb2 import DateInput
from streamlit.proto.DownloadButton_pb2 import DownloadButton
from streamlit.proto.FileUploader_pb2 import FileUploader
from streamlit.proto.MultiSelect_pb2 import MultiSelect
from streamlit.proto.NumberInput_pb2 import NumberInput
from streamlit.proto.Radio_pb2 import Radio
from streamlit.proto.Selectbox_pb2 import Selectbox
from streamlit.proto.Slider_pb2 import Slider
from streamlit.proto.TextArea_pb2 import TextArea
from streamlit.proto.TextInput_pb2 import TextInput
from streamlit.proto.TimeInput_pb2 import TimeInput
from streamlit.type_util import ValueFieldName
from streamlit.util import HASHLIB_KWARGS
GENERATED_WIDGET_ID_PREFIX: Final = "$$WIDGET_ID"
SAFE_VALUES = Union[
date,
time,
datetime,
timedelta,
None,
"NoValue",
"ellipsis",
Message,
PROTO_SCALAR_VALUE,
]
HASHLIB_KWARGS: dict[str, Any] = (
{"usedforsecurity": False} if sys.version_info >= (3, 9) else {}
)
The provided code snippet includes necessary dependencies for implementing the `compute_widget_id` function. Write a Python function `def compute_widget_id( element_type: str, user_key: str | None = None, **kwargs: SAFE_VALUES | Sequence[SAFE_VALUES], ) -> str` to solve the following problem:
Compute the widget id for the given widget. This id is stable: a given set of inputs to this function will always produce the same widget id output. Only stable, deterministic values should be used to compute widget ids. Using nondeterministic values as inputs can cause the resulting widget id to change between runs. The widget id includes the user_key so widgets with identical arguments can use it to be distinct. The widget id includes an easily identified prefix, and the user_key as a suffix, to make it easy to identify it and know if a key maps to it.
Here is the function:
def compute_widget_id(
element_type: str,
user_key: str | None = None,
**kwargs: SAFE_VALUES | Sequence[SAFE_VALUES],
) -> str:
"""Compute the widget id for the given widget. This id is stable: a given
set of inputs to this function will always produce the same widget id output.
Only stable, deterministic values should be used to compute widget ids. Using
nondeterministic values as inputs can cause the resulting widget id to
change between runs.
The widget id includes the user_key so widgets with identical arguments can
use it to be distinct.
The widget id includes an easily identified prefix, and the user_key as a
suffix, to make it easy to identify it and know if a key maps to it.
"""
h = hashlib.new("md5", **HASHLIB_KWARGS)
h.update(element_type.encode("utf-8"))
# This will iterate in a consistent order when the provided arguments have
# consistent order; dicts are always in insertion order.
for k, v in kwargs.items():
h.update(str(k).encode("utf-8"))
h.update(str(v).encode("utf-8"))
return f"{GENERATED_WIDGET_ID_PREFIX}-{h.hexdigest()}-{user_key}" | Compute the widget id for the given widget. This id is stable: a given set of inputs to this function will always produce the same widget id output. Only stable, deterministic values should be used to compute widget ids. Using nondeterministic values as inputs can cause the resulting widget id to change between runs. The widget id includes the user_key so widgets with identical arguments can use it to be distinct. The widget id includes an easily identified prefix, and the user_key as a suffix, to make it easy to identify it and know if a key maps to it. |
178,343 | from __future__ import annotations
import hashlib
from dataclasses import dataclass, field
from datetime import date, datetime, time, timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Final,
Generic,
Sequence,
Tuple,
TypeVar,
Union,
)
from google.protobuf.message import Message
from typing_extensions import TypeAlias
from streamlit import config, util
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow
from streamlit.proto.Button_pb2 import Button
from streamlit.proto.CameraInput_pb2 import CameraInput
from streamlit.proto.ChatInput_pb2 import ChatInput
from streamlit.proto.Checkbox_pb2 import Checkbox
from streamlit.proto.ColorPicker_pb2 import ColorPicker
from streamlit.proto.Components_pb2 import ComponentInstance
from streamlit.proto.DateInput_pb2 import DateInput
from streamlit.proto.DownloadButton_pb2 import DownloadButton
from streamlit.proto.FileUploader_pb2 import FileUploader
from streamlit.proto.MultiSelect_pb2 import MultiSelect
from streamlit.proto.NumberInput_pb2 import NumberInput
from streamlit.proto.Radio_pb2 import Radio
from streamlit.proto.Selectbox_pb2 import Selectbox
from streamlit.proto.Slider_pb2 import Slider
from streamlit.proto.TextArea_pb2 import TextArea
from streamlit.proto.TextInput_pb2 import TextInput
from streamlit.proto.TimeInput_pb2 import TimeInput
from streamlit.type_util import ValueFieldName
from streamlit.util import HASHLIB_KWARGS
def is_widget_id(key: str) -> bool:
"""True if the given session_state key has the structure of a widget ID."""
return key.startswith(GENERATED_WIDGET_ID_PREFIX)
The provided code snippet includes necessary dependencies for implementing the `is_keyed_widget_id` function. Write a Python function `def is_keyed_widget_id(key: str) -> bool` to solve the following problem:
True if the given session_state key has the structure of a widget ID with a user_key.
Here is the function:
def is_keyed_widget_id(key: str) -> bool:
"""True if the given session_state key has the structure of a widget ID with a user_key."""
return is_widget_id(key) and not key.endswith("-None") | True if the given session_state key has the structure of a widget ID with a user_key. |
178,344 | from __future__ import annotations
import hashlib
from dataclasses import dataclass, field
from datetime import date, datetime, time, timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Final,
Generic,
Sequence,
Tuple,
TypeVar,
Union,
)
from google.protobuf.message import Message
from typing_extensions import TypeAlias
from streamlit import config, util
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow
from streamlit.proto.Button_pb2 import Button
from streamlit.proto.CameraInput_pb2 import CameraInput
from streamlit.proto.ChatInput_pb2 import ChatInput
from streamlit.proto.Checkbox_pb2 import Checkbox
from streamlit.proto.ColorPicker_pb2 import ColorPicker
from streamlit.proto.Components_pb2 import ComponentInstance
from streamlit.proto.DateInput_pb2 import DateInput
from streamlit.proto.DownloadButton_pb2 import DownloadButton
from streamlit.proto.FileUploader_pb2 import FileUploader
from streamlit.proto.MultiSelect_pb2 import MultiSelect
from streamlit.proto.NumberInput_pb2 import NumberInput
from streamlit.proto.Radio_pb2 import Radio
from streamlit.proto.Selectbox_pb2 import Selectbox
from streamlit.proto.Slider_pb2 import Slider
from streamlit.proto.TextArea_pb2 import TextArea
from streamlit.proto.TextInput_pb2 import TextInput
from streamlit.proto.TimeInput_pb2 import TimeInput
from streamlit.type_util import ValueFieldName
from streamlit.util import HASHLIB_KWARGS
GENERATED_WIDGET_ID_PREFIX: Final = "$$WIDGET_ID"
def is_widget_id(key: str) -> bool:
"""True if the given session_state key has the structure of a widget ID."""
return key.startswith(GENERATED_WIDGET_ID_PREFIX)
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
The provided code snippet includes necessary dependencies for implementing the `require_valid_user_key` function. Write a Python function `def require_valid_user_key(key: str) -> None` to solve the following problem:
Raise an Exception if the given user_key is invalid.
Here is the function:
def require_valid_user_key(key: str) -> None:
"""Raise an Exception if the given user_key is invalid."""
if is_widget_id(key):
raise StreamlitAPIException(
f"Keys beginning with {GENERATED_WIDGET_ID_PREFIX} are reserved."
) | Raise an Exception if the given user_key is invalid. |
178,345 | from __future__ import annotations
import hashlib
from dataclasses import dataclass, field
from datetime import date, datetime, time, timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Final,
Generic,
Sequence,
Tuple,
TypeVar,
Union,
)
from google.protobuf.message import Message
from typing_extensions import TypeAlias
from streamlit import config, util
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow
from streamlit.proto.Button_pb2 import Button
from streamlit.proto.CameraInput_pb2 import CameraInput
from streamlit.proto.ChatInput_pb2 import ChatInput
from streamlit.proto.Checkbox_pb2 import Checkbox
from streamlit.proto.ColorPicker_pb2 import ColorPicker
from streamlit.proto.Components_pb2 import ComponentInstance
from streamlit.proto.DateInput_pb2 import DateInput
from streamlit.proto.DownloadButton_pb2 import DownloadButton
from streamlit.proto.FileUploader_pb2 import FileUploader
from streamlit.proto.MultiSelect_pb2 import MultiSelect
from streamlit.proto.NumberInput_pb2 import NumberInput
from streamlit.proto.Radio_pb2 import Radio
from streamlit.proto.Selectbox_pb2 import Selectbox
from streamlit.proto.Slider_pb2 import Slider
from streamlit.proto.TextArea_pb2 import TextArea
from streamlit.proto.TextInput_pb2 import TextInput
from streamlit.proto.TimeInput_pb2 import TimeInput
from streamlit.type_util import ValueFieldName
from streamlit.util import HASHLIB_KWARGS
TESTING_KEY = "$$STREAMLIT_INTERNAL_KEY_TESTING"
class ScriptRunContext:
"""A context object that contains data for a "script run" - that is,
data that's scoped to a single ScriptRunner execution (and therefore also
scoped to a single connected "session").
ScriptRunContext is used internally by virtually every `st.foo()` function.
It is accessed only from the script thread that's created by ScriptRunner,
or from app-created helper threads that have been "attached" to the
ScriptRunContext via `add_script_run_ctx`.
Streamlit code typically retrieves the active ScriptRunContext via the
`get_script_run_ctx` function.
"""
session_id: str
_enqueue: Callable[[ForwardMsg], None]
query_string: str
session_state: SafeSessionState
uploaded_file_mgr: UploadedFileManager
main_script_path: str
page_script_hash: str
user_info: UserInfo
gather_usage_stats: bool = False
command_tracking_deactivated: bool = False
tracked_commands: list[Command] = field(default_factory=list)
tracked_commands_counter: Counter[str] = field(default_factory=collections.Counter)
_set_page_config_allowed: bool = True
_has_script_started: bool = False
widget_ids_this_run: set[str] = field(default_factory=set)
widget_user_keys_this_run: set[str] = field(default_factory=set)
form_ids_this_run: set[str] = field(default_factory=set)
cursors: dict[int, "streamlit.cursor.RunningCursor"] = field(default_factory=dict)
script_requests: ScriptRequests | None = None
# TODO(willhuang1997): Remove this variable when experimental query params are removed
_experimental_query_params_used = False
_production_query_params_used = False
def reset(self, query_string: str = "", page_script_hash: str = "") -> None:
self.cursors = {}
self.widget_ids_this_run = set()
self.widget_user_keys_this_run = set()
self.form_ids_this_run = set()
self.query_string = query_string
self.page_script_hash = page_script_hash
# Permit set_page_config when the ScriptRunContext is reused on a rerun
self._set_page_config_allowed = True
self._has_script_started = False
self.command_tracking_deactivated: bool = False
self.tracked_commands = []
self.tracked_commands_counter = collections.Counter()
parsed_query_params = parse.parse_qs(query_string, keep_blank_values=True)
with self.session_state.query_params() as qp:
qp.clear_with_no_forward_msg()
for key, val in parsed_query_params.items():
if len(val) == 0:
qp.set_with_no_forward_msg(key, val="")
elif len(val) == 1:
qp.set_with_no_forward_msg(key, val=val[-1])
else:
qp.set_with_no_forward_msg(key, val)
def on_script_start(self) -> None:
self._has_script_started = True
def enqueue(self, msg: ForwardMsg) -> None:
"""Enqueue a ForwardMsg for this context's session."""
if msg.HasField("page_config_changed") and not self._set_page_config_allowed:
raise StreamlitAPIException(
"`set_page_config()` can only be called once per app page, "
+ "and must be called as the first Streamlit command in your script.\n\n"
+ "For more information refer to the [docs]"
+ "(https://docs.streamlit.io/library/api-reference/utilities/st.set_page_config)."
)
# We want to disallow set_page config if one of the following occurs:
# - set_page_config was called on this message
# - The script has already started and a different st call occurs (a delta)
if msg.HasField("page_config_changed") or (
msg.HasField("delta") and self._has_script_started
):
self._set_page_config_allowed = False
# Pass the message up to our associated ScriptRunner.
self._enqueue(msg)
def ensure_single_query_api_used(self):
if self._experimental_query_params_used and self._production_query_params_used:
raise StreamlitAPIException(
"Using `st.query_params` together with either `st.experimental_get_query_params` "
+ "or `st.experimental_set_query_params` is not supported. Please convert your app "
+ "to only use `st.query_params`"
)
def mark_experimental_query_params_used(self):
self._experimental_query_params_used = True
self.ensure_single_query_api_used()
def mark_production_query_params_used(self):
self._production_query_params_used = True
self.ensure_single_query_api_used()
def save_for_app_testing(ctx: ScriptRunContext, k: str, v: Any):
if config.get_option("global.appTest"):
try:
ctx.session_state[TESTING_KEY][k] = v
except KeyError:
ctx.session_state[TESTING_KEY] = {k: v} | null |
178,346 | from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Iterable, Iterator, MutableMapping
from urllib import parse
from streamlit.constants import EMBED_QUERY_PARAMS_KEYS
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
def missing_key_error_message(key: str) -> str:
return f'st.query_params has no key "{key}". Did you forget to initialize it?' | null |
178,347 | from __future__ import annotations
import contextlib
import hashlib
import mimetypes
import os.path
from typing import Final, NamedTuple
from streamlit.logger import get_logger
from streamlit.runtime.media_file_storage import (
MediaFileKind,
MediaFileStorage,
MediaFileStorageError,
)
from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
from streamlit.util import HASHLIB_KWARGS
HASHLIB_KWARGS: dict[str, Any] = (
{"usedforsecurity": False} if sys.version_info >= (3, 9) else {}
)
The provided code snippet includes necessary dependencies for implementing the `_calculate_file_id` function. Write a Python function `def _calculate_file_id(data: bytes, mimetype: str, filename: str | None = None) -> str` to solve the following problem:
Hash data, mimetype, and an optional filename to generate a stable file ID. Parameters ---------- data Content of in-memory file in bytes. Other types will throw TypeError. mimetype Any string. Will be converted to bytes and used to compute a hash. filename Any string. Will be converted to bytes and used to compute a hash.
Here is the function:
def _calculate_file_id(data: bytes, mimetype: str, filename: str | None = None) -> str:
"""Hash data, mimetype, and an optional filename to generate a stable file ID.
Parameters
----------
data
Content of in-memory file in bytes. Other types will throw TypeError.
mimetype
Any string. Will be converted to bytes and used to compute a hash.
filename
Any string. Will be converted to bytes and used to compute a hash.
"""
filehash = hashlib.new("sha224", **HASHLIB_KWARGS)
filehash.update(data)
filehash.update(bytes(mimetype.encode()))
if filename is not None:
filehash.update(bytes(filename.encode()))
return filehash.hexdigest() | Hash data, mimetype, and an optional filename to generate a stable file ID. Parameters ---------- data Content of in-memory file in bytes. Other types will throw TypeError. mimetype Any string. Will be converted to bytes and used to compute a hash. filename Any string. Will be converted to bytes and used to compute a hash. |
178,348 | from __future__ import annotations
import contextlib
import hashlib
import mimetypes
import os.path
from typing import Final, NamedTuple
from streamlit.logger import get_logger
from streamlit.runtime.media_file_storage import (
MediaFileKind,
MediaFileStorage,
MediaFileStorageError,
)
from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
from streamlit.util import HASHLIB_KWARGS
PREFERRED_MIMETYPE_EXTENSION_MAP: Final = {
"audio/wav": ".wav",
"text/vtt": ".vtt",
}
def get_extension_for_mimetype(mimetype: str) -> str:
if mimetype in PREFERRED_MIMETYPE_EXTENSION_MAP:
return PREFERRED_MIMETYPE_EXTENSION_MAP[mimetype]
extension = mimetypes.guess_extension(mimetype, strict=False)
if extension is None:
return ""
return extension | null |
178,349 | from __future__ import annotations
import os
import threading
from copy import deepcopy
from typing import (
Any,
Final,
ItemsView,
Iterator,
KeysView,
Mapping,
NoReturn,
ValuesView,
)
from blinker import Signal
import streamlit as st
import streamlit.watcher.path_watcher
from streamlit import file_util, runtime
from streamlit.logger import get_logger
def _missing_attr_error_message(attr_name: str) -> str:
return (
f'st.secrets has no attribute "{attr_name}". '
f"Did you forget to add it to secrets.toml or the app settings on Streamlit Cloud? "
f"More info: https://docs.streamlit.io/streamlit-cloud/get-started/deploy-an-app/connect-to-data-sources/secrets-management"
) | null |
178,350 | from __future__ import annotations
import os
import threading
from copy import deepcopy
from typing import (
Any,
Final,
ItemsView,
Iterator,
KeysView,
Mapping,
NoReturn,
ValuesView,
)
from blinker import Signal
import streamlit as st
import streamlit.watcher.path_watcher
from streamlit import file_util, runtime
from streamlit.logger import get_logger
def _missing_key_error_message(key: str) -> str:
return (
f'st.secrets has no key "{key}". '
f"Did you forget to add it to secrets.toml or the app settings on Streamlit Cloud? "
f"More info: https://docs.streamlit.io/streamlit-cloud/get-started/deploy-an-app/connect-to-data-sources/secrets-management"
) | null |
178,351 | from __future__ import annotations
from typing import Any
from streamlit.proto.Delta_pb2 import Delta
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
The provided code snippet includes necessary dependencies for implementing the `_is_composable_message` function. Write a Python function `def _is_composable_message(msg: ForwardMsg) -> bool` to solve the following problem:
True if the ForwardMsg is potentially composable with other ForwardMsgs.
Here is the function:
def _is_composable_message(msg: ForwardMsg) -> bool:
"""True if the ForwardMsg is potentially composable with other ForwardMsgs."""
if not msg.HasField("delta"):
# Non-delta messages are never composable.
return False
# We never compose add_rows messages in Python, because the add_rows
# operation can raise errors, and we don't have a good way of handling
# those errors in the message queue.
delta_type = msg.delta.WhichOneof("type")
return delta_type != "add_rows" and delta_type != "arrow_add_rows" | True if the ForwardMsg is potentially composable with other ForwardMsgs. |
178,352 | from __future__ import annotations
from typing import Any
from streamlit.proto.Delta_pb2 import Delta
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
The provided code snippet includes necessary dependencies for implementing the `_maybe_compose_deltas` function. Write a Python function `def _maybe_compose_deltas(old_delta: Delta, new_delta: Delta) -> Delta | None` to solve the following problem:
Combines new_delta onto old_delta if possible. If the combination takes place, the function returns a new Delta that should replace old_delta in the queue. If the new_delta is incompatible with old_delta, the function returns None. In this case, the new_delta should just be appended to the queue as normal.
Here is the function:
def _maybe_compose_deltas(old_delta: Delta, new_delta: Delta) -> Delta | None:
"""Combines new_delta onto old_delta if possible.
If the combination takes place, the function returns a new Delta that
should replace old_delta in the queue.
If the new_delta is incompatible with old_delta, the function returns None.
In this case, the new_delta should just be appended to the queue as normal.
"""
old_delta_type = old_delta.WhichOneof("type")
if old_delta_type == "add_block":
# We never replace add_block deltas, because blocks can have
# other dependent deltas later in the queue. For example:
#
# placeholder = st.empty()
# placeholder.columns(1)
# placeholder.empty()
#
# The call to "placeholder.columns(1)" creates two blocks, a parent
# container with delta_path (0, 0), and a column child with
# delta_path (0, 0, 0). If the final "placeholder.empty()" Delta
# is composed with the parent container Delta, the frontend will
# throw an error when it tries to add that column child to what is
# now just an element, and not a block.
return None
new_delta_type = new_delta.WhichOneof("type")
if new_delta_type == "new_element":
return new_delta
if new_delta_type == "add_block":
return new_delta
return None | Combines new_delta onto old_delta if possible. If the combination takes place, the function returns a new Delta that should replace old_delta in the queue. If the new_delta is incompatible with old_delta, the function returns None. In this case, the new_delta should just be appended to the queue as normal. |
178,353 | from __future__ import annotations
import re
from typing import Any, Callable
The provided code snippet includes necessary dependencies for implementing the `to_upper_camel_case` function. Write a Python function `def to_upper_camel_case(snake_case_str: str) -> str` to solve the following problem:
Converts snake_case to UpperCamelCase. Example ------- foo_bar -> FooBar
Here is the function:
def to_upper_camel_case(snake_case_str: str) -> str:
"""Converts snake_case to UpperCamelCase.
Example
-------
foo_bar -> FooBar
"""
return "".join(map(str.title, snake_case_str.split("_"))) | Converts snake_case to UpperCamelCase. Example ------- foo_bar -> FooBar |
178,354 | from __future__ import annotations
import re
from typing import Any, Callable
The provided code snippet includes necessary dependencies for implementing the `to_lower_camel_case` function. Write a Python function `def to_lower_camel_case(snake_case_str: str) -> str` to solve the following problem:
Converts snake_case to lowerCamelCase. Example ------- foo_bar -> fooBar fooBar -> foobar
Here is the function:
def to_lower_camel_case(snake_case_str: str) -> str:
"""Converts snake_case to lowerCamelCase.
Example
-------
foo_bar -> fooBar
fooBar -> foobar
"""
words = snake_case_str.split("_")
if len(words) > 1:
capitalized = [w.title() for w in words]
capitalized[0] = words[0]
return "".join(capitalized)
else:
return snake_case_str | Converts snake_case to lowerCamelCase. Example ------- foo_bar -> fooBar fooBar -> foobar |
178,355 | from __future__ import annotations
import re
from typing import Any, Callable
The provided code snippet includes necessary dependencies for implementing the `convert_dict_keys` function. Write a Python function `def convert_dict_keys( func: Callable[[str], str], in_dict: dict[Any, Any] ) -> dict[Any, Any]` to solve the following problem:
Apply a conversion function to all keys in a dict. Parameters ---------- func : callable The function to apply. Takes a str and returns a str. in_dict : dict The dictionary to convert. If some value in this dict is itself a dict, it also gets recursively converted. Returns ------- dict A new dict with all the contents of `in_dict`, but with the keys converted by `func`.
Here is the function:
def convert_dict_keys(
func: Callable[[str], str], in_dict: dict[Any, Any]
) -> dict[Any, Any]:
"""Apply a conversion function to all keys in a dict.
Parameters
----------
func : callable
The function to apply. Takes a str and returns a str.
in_dict : dict
The dictionary to convert. If some value in this dict is itself a dict,
it also gets recursively converted.
Returns
-------
dict
A new dict with all the contents of `in_dict`, but with the keys
converted by `func`.
"""
out_dict = dict()
for k, v in in_dict.items():
converted_key = func(k)
if type(v) is dict:
out_dict[converted_key] = convert_dict_keys(func, v)
else:
out_dict[converted_key] = v
return out_dict | Apply a conversion function to all keys in a dict. Parameters ---------- func : callable The function to apply. Takes a str and returns a str. in_dict : dict The dictionary to convert. If some value in this dict is itself a dict, it also gets recursively converted. Returns ------- dict A new dict with all the contents of `in_dict`, but with the keys converted by `func`. |
178,356 | from __future__ import annotations
import dataclasses
import functools
import hashlib
import os
import subprocess
import sys
from typing import Any, Callable, Final, Iterable, Mapping, TypeVar
from streamlit import env_util
The provided code snippet includes necessary dependencies for implementing the `memoize` function. Write a Python function `def memoize(func: Callable[..., Any]) -> Callable[..., Any]` to solve the following problem:
Decorator to memoize the result of a no-args func.
Here is the function:
def memoize(func: Callable[..., Any]) -> Callable[..., Any]:
"""Decorator to memoize the result of a no-args func."""
result: list[Any] = []
@functools.wraps(func)
def wrapped_func():
if not result:
result.append(func())
return result[0]
return wrapped_func | Decorator to memoize the result of a no-args func. |
178,357 | from __future__ import annotations
import dataclasses
import functools
import hashlib
import os
import subprocess
import sys
from typing import Any, Callable, Final, Iterable, Mapping, TypeVar
from streamlit import env_util
The provided code snippet includes necessary dependencies for implementing the `repr_` function. Write a Python function `def repr_(self: Any) -> str` to solve the following problem:
A clean repr for a class, excluding both values that are likely defaults, and those explicitly default for dataclasses.
Here is the function:
def repr_(self: Any) -> str:
"""A clean repr for a class, excluding both values that are likely defaults,
and those explicitly default for dataclasses.
"""
classname = self.__class__.__name__
# Most of the falsey value, but excluding 0 and 0.0, since those often have
# semantic meaning within streamlit.
defaults: list[Any] = [None, "", False, [], set(), dict()]
if dataclasses.is_dataclass(self):
fields_vals = (
(f.name, getattr(self, f.name))
for f in dataclasses.fields(self)
if f.repr
and getattr(self, f.name) != f.default
and getattr(self, f.name) not in defaults
)
else:
fields_vals = ((f, v) for (f, v) in self.__dict__.items() if v not in defaults)
field_reprs = ", ".join(f"{field}={value!r}" for field, value in fields_vals)
return f"{classname}({field_reprs})" | A clean repr for a class, excluding both values that are likely defaults, and those explicitly default for dataclasses. |
178,358 | from __future__ import annotations
import dataclasses
import functools
import hashlib
import os
import subprocess
import sys
from typing import Any, Callable, Final, Iterable, Mapping, TypeVar
from streamlit import env_util
FLOAT_EQUALITY_EPSILON: Final[float] = 0.000000000005
_Value = TypeVar("_Value")
The provided code snippet includes necessary dependencies for implementing the `index_` function. Write a Python function `def index_(iterable: Iterable[_Value], x: _Value) -> int` to solve the following problem:
Return zero-based index of the first item whose value is equal to x. Raises a ValueError if there is no such item. We need a custom implementation instead of the built-in list .index() to be compatible with NumPy array and Pandas Series. Parameters ---------- iterable : list, tuple, numpy.ndarray, pandas.Series x : Any Returns ------- int
Here is the function:
def index_(iterable: Iterable[_Value], x: _Value) -> int:
"""Return zero-based index of the first item whose value is equal to x.
Raises a ValueError if there is no such item.
We need a custom implementation instead of the built-in list .index() to
be compatible with NumPy array and Pandas Series.
Parameters
----------
iterable : list, tuple, numpy.ndarray, pandas.Series
x : Any
Returns
-------
int
"""
for i, value in enumerate(iterable):
if x == value:
return i
elif isinstance(value, float) and isinstance(x, float):
if abs(x - value) < FLOAT_EQUALITY_EPSILON:
return i
raise ValueError(f"{str(x)} is not in iterable") | Return zero-based index of the first item whose value is equal to x. Raises a ValueError if there is no such item. We need a custom implementation instead of the built-in list .index() to be compatible with NumPy array and Pandas Series. Parameters ---------- iterable : list, tuple, numpy.ndarray, pandas.Series x : Any Returns ------- int |
178,359 | from __future__ import annotations
import copy
import hashlib
import json
from typing import TYPE_CHECKING, Any, Collection, Dict, Final, Iterable, Union, cast
from typing_extensions import TypeAlias
import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
from streamlit import config, type_util
from streamlit.color_util import Color, IntColorTuple, is_color_like, to_int_color_tuple
from streamlit.errors import StreamlitAPIException
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.util import HASHLIB_KWARGS
Data: TypeAlias = Union[
"DataFrame",
"Styler",
Iterable[Any],
Dict[Any, Any],
None,
]
_DEFAULT_MAP: Final[dict[str, Any]] = dict(deck_gl_json_chart.EMPTY_MAP)
_DEFAULT_LAT_COL_NAMES: Final = {"lat", "latitude", "LAT", "LATITUDE"}
_DEFAULT_LON_COL_NAMES: Final = {"lon", "longitude", "LON", "LONGITUDE"}
_DEFAULT_COLOR: Final = (200, 30, 0, 160)
_DEFAULT_SIZE: Final = 100
def _get_lat_or_lon_col_name(
data: DataFrame,
human_readable_name: str,
col_name_from_user: str | None,
default_col_names: set[str],
) -> str:
"""Returns the column name to be used for latitude or longitude."""
if isinstance(col_name_from_user, str) and col_name_from_user in data.columns:
col_name = col_name_from_user
else:
# Try one of the default col_names:
candidate_col_name = None
for c in default_col_names:
if c in data.columns:
candidate_col_name = c
break
if candidate_col_name is None:
formatted_allowed_col_name = ", ".join(map(repr, sorted(default_col_names)))
formmated_col_names = ", ".join(map(repr, list(data.columns)))
raise StreamlitAPIException(
f"Map data must contain a {human_readable_name} column named: "
f"{formatted_allowed_col_name}. Existing columns: {formmated_col_names}"
)
else:
col_name = candidate_col_name
# Check that the column is well-formed.
# IMPLEMENTATION NOTE: We can't use isnull().values.any() because .values can return
# ExtensionArrays, which don't have a .any() method.
# (Read about ExtensionArrays here: # https://pandas.pydata.org/community/blog/extension-arrays.html)
# However, after a performance test I found the solution below runs basically as
# fast as .values.any().
if any(data[col_name].isnull().array):
raise StreamlitAPIException(
f"Column {col_name} is not allowed to contain null values, such "
"as NaN, NaT, or None."
)
return col_name
def _get_value_and_col_name(
data: DataFrame,
value_or_name: Any,
default_value: Any,
) -> tuple[Any, str | None]:
"""Take a value_or_name passed in by the Streamlit developer and return a PyDeck
argument and column name for that property.
This is used for the size and color properties of the chart.
Example:
- If the user passes size=None, this returns the default size value and no column.
- If the user passes size=42, this returns 42 and no column.
- If the user passes size="my_col_123", this returns "@@=my_col_123" and "my_col_123".
"""
pydeck_arg: str | float
if isinstance(value_or_name, str) and value_or_name in data.columns:
col_name = value_or_name
pydeck_arg = f"@@={col_name}"
else:
col_name = None
if value_or_name is None:
pydeck_arg = default_value
else:
pydeck_arg = value_or_name
return pydeck_arg, col_name
def _convert_color_arg_or_column(
data: DataFrame,
color_arg: str | Color,
color_col_name: str | None,
) -> None | str | IntColorTuple:
"""Converts color to a format accepted by PyDeck.
For example:
- If color_arg is "#fff", then returns (255, 255, 255, 255).
- If color_col_name is "my_col_123", then it converts everything in column my_col_123 to
an accepted color format such as (0, 100, 200, 255).
NOTE: This function mutates the data argument.
"""
color_arg_out: None | str | IntColorTuple = None
if color_col_name is not None:
# Convert color column to the right format.
if len(data[color_col_name]) > 0 and is_color_like(data[color_col_name].iat[0]):
# Use .loc[] to avoid a SettingWithCopyWarning in some cases.
data.loc[:, color_col_name] = data.loc[:, color_col_name].map(
to_int_color_tuple
)
else:
raise StreamlitAPIException(
f'Column "{color_col_name}" does not appear to contain valid colors.'
)
# This is guaranteed to be a str because of _get_value_and_col_name
assert isinstance(color_arg, str)
color_arg_out = color_arg
elif color_arg is not None:
color_arg_out = to_int_color_tuple(color_arg)
return color_arg_out
def _get_viewport_details(
data: DataFrame, lat_col_name: str, lon_col_name: str, zoom: int | None
) -> tuple[int, float, float]:
"""Auto-set viewport when not fully specified by user."""
min_lat = data[lat_col_name].min()
max_lat = data[lat_col_name].max()
min_lon = data[lon_col_name].min()
max_lon = data[lon_col_name].max()
center_lat = (max_lat + min_lat) / 2.0
center_lon = (max_lon + min_lon) / 2.0
range_lon = abs(max_lon - min_lon)
range_lat = abs(max_lat - min_lat)
if zoom is None:
if range_lon > range_lat:
longitude_distance = range_lon
else:
longitude_distance = range_lat
zoom = _get_zoom_level(longitude_distance)
return zoom, center_lat, center_lon
import json
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def to_deckgl_json(
data: Data,
lat: str | None,
lon: str | None,
size: None | str | float,
color: None | str | Collection[float],
map_style: str | None,
zoom: int | None,
) -> str:
if data is None:
return json.dumps(_DEFAULT_MAP)
# TODO(harahu): iterables don't have the empty attribute. This is either
# a bug, or the documented data type is too broad. One or the other
# should be addressed
if hasattr(data, "empty") and data.empty:
return json.dumps(_DEFAULT_MAP)
df = type_util.convert_anything_to_df(data)
lat_col_name = _get_lat_or_lon_col_name(df, "latitude", lat, _DEFAULT_LAT_COL_NAMES)
lon_col_name = _get_lat_or_lon_col_name(
df, "longitude", lon, _DEFAULT_LON_COL_NAMES
)
size_arg, size_col_name = _get_value_and_col_name(df, size, _DEFAULT_SIZE)
color_arg, color_col_name = _get_value_and_col_name(df, color, _DEFAULT_COLOR)
# Drop columns we're not using.
# (Sort for tests)
used_columns = sorted(
[
c
for c in {lat_col_name, lon_col_name, size_col_name, color_col_name}
if c is not None
]
)
df = df[used_columns]
color_arg = _convert_color_arg_or_column(df, color_arg, color_col_name)
zoom, center_lat, center_lon = _get_viewport_details(
df, lat_col_name, lon_col_name, zoom
)
default = copy.deepcopy(_DEFAULT_MAP)
default["initialViewState"]["latitude"] = center_lat
default["initialViewState"]["longitude"] = center_lon
default["initialViewState"]["zoom"] = zoom
default["layers"] = [
{
"@@type": "ScatterplotLayer",
"getPosition": f"@@=[{lon_col_name}, {lat_col_name}]",
"getRadius": size_arg,
"radiusMinPixels": 3,
"radiusUnits": "meters",
"getFillColor": color_arg,
"data": df.to_dict("records"),
}
]
if map_style:
if not config.get_option("mapbox.token"):
raise StreamlitAPIException(
"You need a Mapbox token in order to select a map type. "
"Refer to the docs for st.map for more information."
)
default["mapStyle"] = map_style
return json.dumps(default) | null |
178,360 | from __future__ import annotations
import copy
import hashlib
import json
from typing import TYPE_CHECKING, Any, Collection, Dict, Final, Iterable, Union, cast
from typing_extensions import TypeAlias
import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
from streamlit import config, type_util
from streamlit.color_util import Color, IntColorTuple, is_color_like, to_int_color_tuple
from streamlit.errors import StreamlitAPIException
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.util import HASHLIB_KWARGS
import json
HASHLIB_KWARGS: dict[str, Any] = (
{"usedforsecurity": False} if sys.version_info >= (3, 9) else {}
)
def marshall(
pydeck_proto: DeckGlJsonChartProto,
pydeck_json: str,
use_container_width: bool,
) -> None:
json_bytes = pydeck_json.encode("utf-8")
id = hashlib.md5(json_bytes, **HASHLIB_KWARGS).hexdigest()
pydeck_proto.json = pydeck_json
pydeck_proto.use_container_width = use_container_width
pydeck_proto.id = id | null |
178,361 | from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, cast
from streamlit.proto.Json_pb2 import Json as JsonProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.state import QueryParamsProxy, SessionStateProxy
from streamlit.user_info import UserInfoProxy
The provided code snippet includes necessary dependencies for implementing the `_ensure_serialization` function. Write a Python function `def _ensure_serialization(o: object) -> str | list[Any]` to solve the following problem:
A repr function for json.dumps default arg, which tries to serialize sets as lists
Here is the function:
def _ensure_serialization(o: object) -> str | list[Any]:
"""A repr function for json.dumps default arg, which tries to serialize sets as lists"""
if isinstance(o, set):
return list(o)
return repr(o) | A repr function for json.dumps default arg, which tries to serialize sets as lists |
178,362 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
def is_in_form(dg: DeltaGenerator) -> bool:
class StreamlitAPIException(MarkdownFormattedException):
def __repr__(self) -> str:
class DeltaGenerator(
AlertMixin,
BalloonsMixin,
BokehMixin,
ButtonMixin,
CameraInputMixin,
ChatMixin,
CheckboxMixin,
CodeMixin,
ColorPickerMixin,
EmptyMixin,
ExceptionMixin,
FileUploaderMixin,
FormMixin,
GraphvizMixin,
HeadingMixin,
HelpMixin,
IframeMixin,
ImageMixin,
LayoutsMixin,
MarkdownMixin,
MapMixin,
MediaMixin,
MetricMixin,
MultiSelectMixin,
NumberInputMixin,
PlotlyMixin,
ProgressMixin,
PydeckMixin,
PyplotMixin,
RadioMixin,
SelectboxMixin,
SelectSliderMixin,
SliderMixin,
SnowMixin,
JsonMixin,
TextMixin,
TextWidgetsMixin,
TimeWidgetsMixin,
ToastMixin,
WriteMixin,
ArrowMixin,
ArrowAltairMixin,
ArrowVegaLiteMixin,
DataEditorMixin,
):
def __init__(
self,
root_container: int | None = RootContainer.MAIN,
cursor: Cursor | None = None,
parent: DeltaGenerator | None = None,
block_type: str | None = None,
) -> None:
def __repr__(self) -> str:
def __enter__(self) -> None:
def __exit__(
self,
type: Any,
value: Any,
traceback: Any,
) -> Literal[False]:
def _active_dg(self) -> DeltaGenerator:
def _main_dg(self) -> DeltaGenerator:
def __getattr__(self, name: str) -> Callable[..., NoReturn]:
def wrapper(*args: Any, **kwargs: Any) -> NoReturn:
def __deepcopy__(self, _memo):
def _parent_block_types(self) -> ParentBlockTypes:
def _count_num_of_parent_columns(self, parent_block_types: ParentBlockTypes) -> int:
def _cursor(self) -> Cursor | None:
def _is_top_level(self) -> bool:
def id(self) -> str:
def _get_delta_path_str(self) -> str:
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: None,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> DeltaGenerator:
def _enqueue( # type: ignore[misc]
self,
delta_type: str,
element_proto: Message,
return_value: type[NoValue],
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> None:
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: Value,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> Value:
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: None = None,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> DeltaGenerator:
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: type[NoValue] | Value | None = None,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> DeltaGenerator | Value | None:
def _enqueue(
self,
delta_type: str,
element_proto: Message,
return_value: type[NoValue] | Value | None = None,
add_rows_metadata: AddRowsMetadata | None = None,
element_width: int | None = None,
element_height: int | None = None,
) -> DeltaGenerator | Value | None:
def _block(
self,
block_proto: Block_pb2.Block = Block_pb2.Block(),
dg_type: type | None = None,
) -> DeltaGenerator:
def _arrow_add_rows(
self: DG,
data: Data = None,
**kwargs: DataFrame
| npt.NDArray[Any]
| Iterable[Any]
| dict[Hashable, Any]
| None,
) -> DG | None:
def check_callback_rules(dg: DeltaGenerator, on_change: WidgetCallback | None) -> None:
if runtime.exists() and is_in_form(dg) and on_change is not None:
raise StreamlitAPIException(
"With forms, callbacks can only be defined on the `st.form_submit_button`."
" Defining callbacks on other widgets inside a form is not allowed."
) | null |
178,363 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
_shown_default_value_warning: bool = False
SESSION_STATE_WRITES_NOT_ALLOWED_ERROR_TEXT = """
Values for st.button, st.download_button, st.file_uploader, st.data_editor,
st.chat_input, and st.form cannot be set using st.session_state.
"""
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def check_session_state_rules(
default_value: Any, key: str | None, writes_allowed: bool = True
) -> None:
global _shown_default_value_warning
if key is None or not runtime.exists():
return
session_state = get_session_state()
if not session_state.is_new_state_value(key):
return
if not writes_allowed:
raise StreamlitAPIException(SESSION_STATE_WRITES_NOT_ALLOWED_ERROR_TEXT)
if (
default_value is not None
and not _shown_default_value_warning
and not config.get_option("global.disableWidgetStateDuplicationWarning")
):
streamlit.warning(
f'The widget with key "{key}" was created with a default value but'
" also had its value set via the Session State API."
)
_shown_default_value_warning = True | null |
178,364 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
The provided code snippet includes necessary dependencies for implementing the `get_label_visibility_proto_value` function. Write a Python function `def get_label_visibility_proto_value( label_visibility_string: type_util.LabelVisibility, ) -> LabelVisibilityMessage.LabelVisibilityOptions.ValueType` to solve the following problem:
Returns one of LabelVisibilityMessage enum constants.py based on string value.
Here is the function:
def get_label_visibility_proto_value(
label_visibility_string: type_util.LabelVisibility,
) -> LabelVisibilityMessage.LabelVisibilityOptions.ValueType:
"""Returns one of LabelVisibilityMessage enum constants.py based on string value."""
if label_visibility_string == "visible":
return LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
elif label_visibility_string == "hidden":
return LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN
elif label_visibility_string == "collapsed":
return LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED
raise ValueError(f"Unknown label visibility value: {label_visibility_string}") | Returns one of LabelVisibilityMessage enum constants.py based on string value. |
178,365 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
class RegisterWidgetResult(Generic[T_co]):
def failure(
cls, deserializer: WidgetDeserializer[T_co]
) -> RegisterWidgetResult[T_co]:
def maybe_coerce_enum(
register_widget_result: RegisterWidgetResult[Enum],
options: type[Enum],
opt_sequence: Sequence[Any],
) -> RegisterWidgetResult[Enum]:
... | null |
178,366 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
T = TypeVar("T")
class RegisterWidgetResult(Generic[T_co]):
"""Result returned by the `register_widget` family of functions/methods.
Should be usable by widget code to determine what value to return, and
whether to update the UI.
Parameters
----------
value : T_co
The widget's current value, or, in cases where the true widget value
could not be determined, an appropriate fallback value.
This value should be returned by the widget call.
value_changed : bool
True if the widget's value is different from the value most recently
returned from the frontend.
Implies an update to the frontend is needed.
"""
value: T_co
value_changed: bool
def failure(
cls, deserializer: WidgetDeserializer[T_co]
) -> RegisterWidgetResult[T_co]:
"""The canonical way to construct a RegisterWidgetResult in cases
where the true widget value could not be determined.
"""
return cls(value=deserializer(None, ""), value_changed=False)
def maybe_coerce_enum(
register_widget_result: RegisterWidgetResult[T],
options: type_util.OptionSequence[T],
opt_sequence: Sequence[T],
) -> RegisterWidgetResult[T]:
... | null |
178,367 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
def _extract_common_class_from_iter(iterable: Iterable[Any]) -> Any:
"""Return the common class of all elements in a iterable if they share one.
Otherwise, return None."""
try:
inner_iter = iter(iterable)
first_class = type(next(inner_iter))
except StopIteration:
return None
if all(type(item) is first_class for item in inner_iter):
return first_class
return None
class RegisterWidgetResult(Generic[T_co]):
"""Result returned by the `register_widget` family of functions/methods.
Should be usable by widget code to determine what value to return, and
whether to update the UI.
Parameters
----------
value : T_co
The widget's current value, or, in cases where the true widget value
could not be determined, an appropriate fallback value.
This value should be returned by the widget call.
value_changed : bool
True if the widget's value is different from the value most recently
returned from the frontend.
Implies an update to the frontend is needed.
"""
value: T_co
value_changed: bool
def failure(
cls, deserializer: WidgetDeserializer[T_co]
) -> RegisterWidgetResult[T_co]:
"""The canonical way to construct a RegisterWidgetResult in cases
where the true widget value could not be determined.
"""
return cls(value=deserializer(None, ""), value_changed=False)
The provided code snippet includes necessary dependencies for implementing the `maybe_coerce_enum` function. Write a Python function `def maybe_coerce_enum(register_widget_result, options, opt_sequence)` to solve the following problem:
Maybe Coerce a RegisterWidgetResult with an Enum member value to RegisterWidgetResult[option] if option is an EnumType, otherwise just return the original RegisterWidgetResult.
Here is the function:
def maybe_coerce_enum(register_widget_result, options, opt_sequence):
"""Maybe Coerce a RegisterWidgetResult with an Enum member value to
RegisterWidgetResult[option] if option is an EnumType, otherwise just return
the original RegisterWidgetResult."""
# If the value is not a Enum, return early
if not isinstance(register_widget_result.value, Enum):
return register_widget_result
coerce_class: EnumMeta | None
if isinstance(options, EnumMeta):
coerce_class = options
else:
coerce_class = _extract_common_class_from_iter(opt_sequence)
if coerce_class is None:
return register_widget_result
return RegisterWidgetResult(
type_util.coerce_enum(register_widget_result.value, coerce_class),
register_widget_result.value_changed,
) | Maybe Coerce a RegisterWidgetResult with an Enum member value to RegisterWidgetResult[option] if option is an EnumType, otherwise just return the original RegisterWidgetResult. |
178,368 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
T = TypeVar("T")
class RegisterWidgetResult(Generic[T_co]):
"""Result returned by the `register_widget` family of functions/methods.
Should be usable by widget code to determine what value to return, and
whether to update the UI.
Parameters
----------
value : T_co
The widget's current value, or, in cases where the true widget value
could not be determined, an appropriate fallback value.
This value should be returned by the widget call.
value_changed : bool
True if the widget's value is different from the value most recently
returned from the frontend.
Implies an update to the frontend is needed.
"""
value: T_co
value_changed: bool
def failure(
cls, deserializer: WidgetDeserializer[T_co]
) -> RegisterWidgetResult[T_co]:
"""The canonical way to construct a RegisterWidgetResult in cases
where the true widget value could not be determined.
"""
return cls(value=deserializer(None, ""), value_changed=False)
def maybe_coerce_enum_sequence(
register_widget_result: RegisterWidgetResult[list[T]],
options: type_util.OptionSequence[T],
opt_sequence: Sequence[T],
) -> RegisterWidgetResult[list[T]]:
... | null |
178,369 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
T = TypeVar("T")
class RegisterWidgetResult(Generic[T_co]):
def failure(
cls, deserializer: WidgetDeserializer[T_co]
) -> RegisterWidgetResult[T_co]:
def maybe_coerce_enum_sequence(
register_widget_result: RegisterWidgetResult[tuple[T, T]],
options: type_util.OptionSequence[T],
opt_sequence: Sequence[T],
) -> RegisterWidgetResult[tuple[T, T]]:
... | null |
178,370 | from __future__ import annotations
from enum import Enum, EnumMeta
from typing import TYPE_CHECKING, Any, Hashable, Iterable, Sequence, cast, overload
import streamlit
from streamlit import config, runtime, type_util
from streamlit.elements.form import is_in_form
from streamlit.errors import StreamlitAPIException
from streamlit.proto.LabelVisibilityMessage_pb2 import LabelVisibilityMessage
from streamlit.runtime.state import WidgetCallback, get_session_state
from streamlit.runtime.state.common import RegisterWidgetResult
from streamlit.type_util import T
def _extract_common_class_from_iter(iterable: Iterable[Any]) -> Any:
"""Return the common class of all elements in a iterable if they share one.
Otherwise, return None."""
try:
inner_iter = iter(iterable)
first_class = type(next(inner_iter))
except StopIteration:
return None
if all(type(item) is first_class for item in inner_iter):
return first_class
return None
class RegisterWidgetResult(Generic[T_co]):
"""Result returned by the `register_widget` family of functions/methods.
Should be usable by widget code to determine what value to return, and
whether to update the UI.
Parameters
----------
value : T_co
The widget's current value, or, in cases where the true widget value
could not be determined, an appropriate fallback value.
This value should be returned by the widget call.
value_changed : bool
True if the widget's value is different from the value most recently
returned from the frontend.
Implies an update to the frontend is needed.
"""
value: T_co
value_changed: bool
def failure(
cls, deserializer: WidgetDeserializer[T_co]
) -> RegisterWidgetResult[T_co]:
"""The canonical way to construct a RegisterWidgetResult in cases
where the true widget value could not be determined.
"""
return cls(value=deserializer(None, ""), value_changed=False)
The provided code snippet includes necessary dependencies for implementing the `maybe_coerce_enum_sequence` function. Write a Python function `def maybe_coerce_enum_sequence(register_widget_result, options, opt_sequence)` to solve the following problem:
Maybe Coerce a RegisterWidgetResult with a sequence of Enum members as value to RegisterWidgetResult[Sequence[option]] if option is an EnumType, otherwise just return the original RegisterWidgetResult.
Here is the function:
def maybe_coerce_enum_sequence(register_widget_result, options, opt_sequence):
"""Maybe Coerce a RegisterWidgetResult with a sequence of Enum members as value
to RegisterWidgetResult[Sequence[option]] if option is an EnumType, otherwise just return
the original RegisterWidgetResult."""
# If not all widget values are Enums, return early
if not all(isinstance(val, Enum) for val in register_widget_result.value):
return register_widget_result
# Extract the class to coerce
coerce_class: EnumMeta | None
if isinstance(options, EnumMeta):
coerce_class = options
else:
coerce_class = _extract_common_class_from_iter(opt_sequence)
if coerce_class is None:
return register_widget_result
# Return a new RegisterWidgetResult with the coerced enum values sequence
return RegisterWidgetResult(
type(register_widget_result.value)(
type_util.coerce_enum(val, coerce_class)
for val in register_widget_result.value
),
register_widget_result.value_changed,
) | Maybe Coerce a RegisterWidgetResult with a sequence of Enum members as value to RegisterWidgetResult[Sequence[option]] if option is an EnumType, otherwise just return the original RegisterWidgetResult. |
178,371 | from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Literal, cast
import streamlit.elements.lib.dicttools as dicttools
from streamlit.elements import arrow
from streamlit.elements.arrow import Data
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ArrowVegaLiteChart_pb2 import (
ArrowVegaLiteChart as ArrowVegaLiteChartProto,
)
from streamlit.runtime.metrics_util import gather_metrics
_CHANNELS = {
"x",
"y",
"x2",
"y2",
"xError",
"xError2",
"yError",
"yError2",
"longitude",
"latitude",
"color",
"opacity",
"fillOpacity",
"strokeOpacity",
"strokeWidth",
"size",
"shape",
"text",
"tooltip",
"href",
"key",
"order",
"detail",
"facet",
"row",
"column",
}
import json
Data: TypeAlias = Union[
"DataFrame",
"Series",
"Styler",
"Index",
"pa.Table",
"ndarray",
Iterable,
Dict[str, List[Any]],
None,
]
The provided code snippet includes necessary dependencies for implementing the `marshall` function. Write a Python function `def marshall( proto: ArrowVegaLiteChartProto, data: Data = None, spec: dict[str, Any] | None = None, use_container_width: bool = False, theme: None | Literal["streamlit"] = "streamlit", **kwargs, )` to solve the following problem:
Construct a Vega-Lite chart object. See DeltaGenerator.vega_lite_chart for docs.
Here is the function:
def marshall(
proto: ArrowVegaLiteChartProto,
data: Data = None,
spec: dict[str, Any] | None = None,
use_container_width: bool = False,
theme: None | Literal["streamlit"] = "streamlit",
**kwargs,
):
"""Construct a Vega-Lite chart object.
See DeltaGenerator.vega_lite_chart for docs.
"""
# Support passing data inside spec['datasets'] and spec['data'].
# (The data gets pulled out of the spec dict later on.)
if isinstance(data, dict) and spec is None:
spec = data
data = None
# Support passing no spec arg, but filling it with kwargs.
# Example:
# marshall(proto, baz='boz')
if spec is None:
spec = dict()
else:
# Clone the spec dict, since we may be mutating it.
spec = dict(spec)
# Support passing in kwargs. Example:
# marshall(proto, {foo: 'bar'}, baz='boz')
if len(kwargs):
# Merge spec with unflattened kwargs, where kwargs take precedence.
# This only works for string keys, but kwarg keys are strings anyways.
spec = dict(spec, **dicttools.unflatten(kwargs, _CHANNELS))
if len(spec) == 0:
raise ValueError("Vega-Lite charts require a non-empty spec dict.")
if "autosize" not in spec:
spec["autosize"] = {"type": "fit", "contains": "padding"}
# Pull data out of spec dict when it's in a 'datasets' key:
# marshall(proto, {datasets: {foo: df1, bar: df2}, ...})
if "datasets" in spec:
for k, v in spec["datasets"].items():
dataset = proto.datasets.add()
dataset.name = str(k)
dataset.has_name = True
arrow.marshall(dataset.data, v)
del spec["datasets"]
# Pull data out of spec dict when it's in a top-level 'data' key:
# marshall(proto, {data: df})
# marshall(proto, {data: {values: df, ...}})
# marshall(proto, {data: {url: 'url'}})
# marshall(proto, {data: {name: 'foo'}})
if "data" in spec:
data_spec = spec["data"]
if isinstance(data_spec, dict):
if "values" in data_spec:
data = data_spec["values"]
del spec["data"]
else:
data = data_spec
del spec["data"]
proto.spec = json.dumps(spec)
proto.use_container_width = use_container_width
proto.theme = theme or ""
if data is not None:
arrow.marshall(proto.data, data) | Construct a Vega-Lite chart object. See DeltaGenerator.vega_lite_chart for docs. |
178,372 | from __future__ import annotations
import io
import re
from pathlib import Path
from typing import TYPE_CHECKING, Dict, Final, Union, cast
from typing_extensions import TypeAlias
import streamlit as st
from streamlit import runtime, type_util, url_util
from streamlit.elements.lib.subtitle_utils import process_subtitle_data
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Audio_pb2 import Audio as AudioProto
from streamlit.proto.Video_pb2 import Video as VideoProto
from streamlit.runtime import caching
from streamlit.runtime.metrics_util import gather_metrics
MediaData: TypeAlias = Union[
str, bytes, io.BytesIO, io.RawIOBase, io.BufferedReader, "npt.NDArray[Any]", None
]
SubtitleData: TypeAlias = Union[
str, Path, bytes, io.BytesIO, Dict[str, Union[str, Path, bytes, io.BytesIO]], None
]
def _reshape_youtube_url(url: str) -> str | None:
"""Return whether URL is any kind of YouTube embed or watch link. If so,
reshape URL into an embed link suitable for use in an iframe.
If not a YouTube URL, return None.
Parameters
----------
url : str
Example
-------
>>> print(_reshape_youtube_url('https://youtu.be/_T8LGqJtuGc'))
.. output::
https://www.youtube.com/embed/_T8LGqJtuGc
"""
match = re.match(YOUTUBE_RE, url)
if match:
code = (
match.group("video_id_1")
or match.group("video_id_2")
or match.group("video_id_3")
)
return f"https://www.youtube.com/embed/{code}"
return None
def _marshall_av_media(
coordinates: str,
proto: AudioProto | VideoProto,
data: MediaData,
mimetype: str,
) -> None:
"""Fill audio or video proto based on contents of data.
Given a string, check if it's a url; if so, send it out without modification.
Otherwise assume strings are filenames and let any OS errors raise.
Load data either from file or through bytes-processing methods into a
MediaFile object. Pack proto with generated Tornado-based URL.
(When running in "raw" mode, we won't actually load data into the
MediaFileManager, and we'll return an empty URL.)
"""
# Audio and Video methods have already checked if this is a URL by this point.
if data is None:
# Allow empty values so media players can be shown without media.
return
data_or_filename: bytes | str
if isinstance(data, (str, bytes)):
# Pass strings and bytes through unchanged
data_or_filename = data
elif isinstance(data, io.BytesIO):
data.seek(0)
data_or_filename = data.getvalue()
elif isinstance(data, io.RawIOBase) or isinstance(data, io.BufferedReader):
data.seek(0)
read_data = data.read()
if read_data is None:
return
else:
data_or_filename = read_data
elif type_util.is_type(data, "numpy.ndarray"):
data_or_filename = data.tobytes()
else:
raise RuntimeError("Invalid binary data format: %s" % type(data))
if runtime.exists():
file_url = runtime.get_instance().media_file_mgr.add(
data_or_filename, mimetype, coordinates
)
caching.save_media_data(data_or_filename, mimetype, coordinates)
else:
# When running in "raw mode", we can't access the MediaFileManager.
file_url = ""
proto.url = file_url
def process_subtitle_data(
coordinates: str,
data: str | bytes | Path | io.BytesIO,
label: str,
) -> str:
# Determine the type of data and process accordingly
if isinstance(data, (str, Path)):
subtitle_data = _handle_string_or_path_data(data)
elif isinstance(data, io.BytesIO):
subtitle_data = _handle_stream_data(data)
elif isinstance(data, bytes):
subtitle_data = _handle_bytes_data(data)
else:
raise TypeError(f"Invalid binary data format for subtitle: {type(data)}.")
if runtime.exists():
filename = hashlib.md5(label.encode()).hexdigest()
# Save the processed data and return the file URL
file_url = runtime.get_instance().media_file_mgr.add(
path_or_data=subtitle_data,
mimetype="text/vtt",
coordinates=coordinates,
file_name=f"{filename}.vtt",
)
caching.save_media_data(subtitle_data, "text/vtt", coordinates)
return file_url
else:
# When running in "raw mode", we can't access the MediaFileManager.
return ""
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
The provided code snippet includes necessary dependencies for implementing the `marshall_video` function. Write a Python function `def marshall_video( coordinates: str, proto: VideoProto, data: MediaData, mimetype: str = "video/mp4", start_time: int = 0, subtitles: SubtitleData = None, end_time: int | None = None, loop: bool = False, ) -> None` to solve the following problem:
Marshalls a video proto, using url processors as needed. Parameters ---------- coordinates : str proto : the proto to fill. Must have a string field called "data". data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data or a string with a URL pointing to the video to load. Includes support for YouTube URLs. If passing the raw data, this must include headers and any other bytes required in the actual file. mimetype : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time : int The time from which this element should start playing. (default: 0) subtitles: str, dict, or io.BytesIO Optional subtitle data for the video, supporting several input types: * None (default): No subtitles. * A string: File path to a subtitle file in '.vtt' or '.srt' formats, or the raw content of subtitles conforming to these formats. If providing raw content, the string must adhere to the WebVTT or SRT format specifications. * A dictionary: Pairs of labels and file paths or raw subtitle content in '.vtt' or '.srt' formats. Enables multiple subtitle tracks. The label will be shown in the video player. Example: {'English': 'path/to/english.vtt', 'French': 'path/to/french.srt'} * io.BytesIO: A BytesIO stream that contains valid '.vtt' or '.srt' formatted subtitle data. When provided, subtitles are displayed by default. For multiple tracks, the first one is displayed by default. Not supported for YouTube videos. end_time: int The time at which this element should stop playing loop: bool Whether the video should loop playback.
Here is the function:
def marshall_video(
coordinates: str,
proto: VideoProto,
data: MediaData,
mimetype: str = "video/mp4",
start_time: int = 0,
subtitles: SubtitleData = None,
end_time: int | None = None,
loop: bool = False,
) -> None:
"""Marshalls a video proto, using url processors as needed.
Parameters
----------
coordinates : str
proto : the proto to fill. Must have a string field called "data".
data : str, bytes, BytesIO, numpy.ndarray, or file opened with
io.open().
Raw video data or a string with a URL pointing to the video
to load. Includes support for YouTube URLs.
If passing the raw data, this must include headers and any other
bytes required in the actual file.
mimetype : str
The mime type for the video file. Defaults to 'video/mp4'.
See https://tools.ietf.org/html/rfc4281 for more info.
start_time : int
The time from which this element should start playing. (default: 0)
subtitles: str, dict, or io.BytesIO
Optional subtitle data for the video, supporting several input types:
* None (default): No subtitles.
* A string: File path to a subtitle file in '.vtt' or '.srt' formats, or the raw content of subtitles conforming to these formats.
If providing raw content, the string must adhere to the WebVTT or SRT format specifications.
* A dictionary: Pairs of labels and file paths or raw subtitle content in '.vtt' or '.srt' formats.
Enables multiple subtitle tracks. The label will be shown in the video player.
Example: {'English': 'path/to/english.vtt', 'French': 'path/to/french.srt'}
* io.BytesIO: A BytesIO stream that contains valid '.vtt' or '.srt' formatted subtitle data.
When provided, subtitles are displayed by default. For multiple tracks, the first one is displayed by default.
Not supported for YouTube videos.
end_time: int
The time at which this element should stop playing
loop: bool
Whether the video should loop playback.
"""
if start_time < 0 or (end_time is not None and end_time <= start_time):
raise StreamlitAPIException("Invalid start_time and end_time combination.")
proto.start_time = start_time
if end_time is not None:
proto.end_time = end_time
proto.loop = loop
# "type" distinguishes between YouTube and non-YouTube links
proto.type = VideoProto.Type.NATIVE
if isinstance(data, str) and url_util.is_url(
data, allowed_schemas=("http", "https", "data")
):
if youtube_url := _reshape_youtube_url(data):
proto.url = youtube_url
proto.type = VideoProto.Type.YOUTUBE_IFRAME
if subtitles:
raise StreamlitAPIException(
"Subtitles are not supported for YouTube videos."
)
else:
proto.url = data
else:
_marshall_av_media(coordinates, proto, data, mimetype)
if subtitles:
subtitle_items: list[tuple[str, str | Path | bytes | io.BytesIO]] = []
# Single subtitle
if isinstance(subtitles, (str, bytes, io.BytesIO, Path)):
subtitle_items.append(("default", subtitles))
# Multiple subtitles
elif isinstance(subtitles, dict):
subtitle_items.extend(subtitles.items())
else:
raise StreamlitAPIException(
f"Unsupported data type for subtitles: {type(subtitles)}. "
f"Only str (file paths) and dict are supported."
)
for label, subtitle_data in subtitle_items:
sub = proto.subtitles.add()
sub.label = label or ""
# Coordinates used in media_file_manager to identify the place of
# element, in case of subtitle, we use same video coordinates
# with suffix.
# It is not aligned with common coordinates format, but in
# media_file_manager we use it just as unique identifier, so it is fine.
subtitle_coordinates = f"{coordinates}[subtitle{label}]"
try:
sub.url = process_subtitle_data(
subtitle_coordinates, subtitle_data, label
)
except (TypeError, ValueError) as original_err:
raise StreamlitAPIException(
f"Failed to process the provided subtitle: {label}"
) from original_err | Marshalls a video proto, using url processors as needed. Parameters ---------- coordinates : str proto : the proto to fill. Must have a string field called "data". data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data or a string with a URL pointing to the video to load. Includes support for YouTube URLs. If passing the raw data, this must include headers and any other bytes required in the actual file. mimetype : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time : int The time from which this element should start playing. (default: 0) subtitles: str, dict, or io.BytesIO Optional subtitle data for the video, supporting several input types: * None (default): No subtitles. * A string: File path to a subtitle file in '.vtt' or '.srt' formats, or the raw content of subtitles conforming to these formats. If providing raw content, the string must adhere to the WebVTT or SRT format specifications. * A dictionary: Pairs of labels and file paths or raw subtitle content in '.vtt' or '.srt' formats. Enables multiple subtitle tracks. The label will be shown in the video player. Example: {'English': 'path/to/english.vtt', 'French': 'path/to/french.srt'} * io.BytesIO: A BytesIO stream that contains valid '.vtt' or '.srt' formatted subtitle data. When provided, subtitles are displayed by default. For multiple tracks, the first one is displayed by default. Not supported for YouTube videos. end_time: int The time at which this element should stop playing loop: bool Whether the video should loop playback. |
178,373 | from __future__ import annotations
import io
import re
from pathlib import Path
from typing import TYPE_CHECKING, Dict, Final, Union, cast
from typing_extensions import TypeAlias
import streamlit as st
from streamlit import runtime, type_util, url_util
from streamlit.elements.lib.subtitle_utils import process_subtitle_data
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Audio_pb2 import Audio as AudioProto
from streamlit.proto.Video_pb2 import Video as VideoProto
from streamlit.runtime import caching
from streamlit.runtime.metrics_util import gather_metrics
MediaData: TypeAlias = Union[
str, bytes, io.BytesIO, io.RawIOBase, io.BufferedReader, "npt.NDArray[Any]", None
]
def _marshall_av_media(
coordinates: str,
proto: AudioProto | VideoProto,
data: MediaData,
mimetype: str,
) -> None:
"""Fill audio or video proto based on contents of data.
Given a string, check if it's a url; if so, send it out without modification.
Otherwise assume strings are filenames and let any OS errors raise.
Load data either from file or through bytes-processing methods into a
MediaFile object. Pack proto with generated Tornado-based URL.
(When running in "raw" mode, we won't actually load data into the
MediaFileManager, and we'll return an empty URL.)
"""
# Audio and Video methods have already checked if this is a URL by this point.
if data is None:
# Allow empty values so media players can be shown without media.
return
data_or_filename: bytes | str
if isinstance(data, (str, bytes)):
# Pass strings and bytes through unchanged
data_or_filename = data
elif isinstance(data, io.BytesIO):
data.seek(0)
data_or_filename = data.getvalue()
elif isinstance(data, io.RawIOBase) or isinstance(data, io.BufferedReader):
data.seek(0)
read_data = data.read()
if read_data is None:
return
else:
data_or_filename = read_data
elif type_util.is_type(data, "numpy.ndarray"):
data_or_filename = data.tobytes()
else:
raise RuntimeError("Invalid binary data format: %s" % type(data))
if runtime.exists():
file_url = runtime.get_instance().media_file_mgr.add(
data_or_filename, mimetype, coordinates
)
caching.save_media_data(data_or_filename, mimetype, coordinates)
else:
# When running in "raw mode", we can't access the MediaFileManager.
file_url = ""
proto.url = file_url
def _maybe_convert_to_wav_bytes(data: MediaData, sample_rate: int | None) -> MediaData:
"""Convert data to wav bytes if the data type is numpy array."""
if type_util.is_type(data, "numpy.ndarray") and sample_rate is not None:
data = _make_wav(cast("npt.NDArray[Any]", data), sample_rate)
return data
The provided code snippet includes necessary dependencies for implementing the `marshall_audio` function. Write a Python function `def marshall_audio( coordinates: str, proto: AudioProto, data: MediaData, mimetype: str = "audio/wav", start_time: int = 0, sample_rate: int | None = None, end_time: int | None = None, loop: bool = False, ) -> None` to solve the following problem:
Marshalls an audio proto, using data and url processors as needed. Parameters ---------- coordinates : str proto : The proto to fill. Must have a string field called "url". data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open() Raw audio data or a string with a URL pointing to the file to load. If passing the raw data, this must include headers and any other bytes required in the actual file. mimetype : str The mime type for the audio file. Defaults to "audio/wav". See https://tools.ietf.org/html/rfc4281 for more info. start_time : int The time from which this element should start playing. (default: 0) sample_rate: int or None Optional param to provide sample_rate in case of numpy array end_time: int The time at which this element should stop playing loop: bool Whether the audio should loop playback.
Here is the function:
def marshall_audio(
coordinates: str,
proto: AudioProto,
data: MediaData,
mimetype: str = "audio/wav",
start_time: int = 0,
sample_rate: int | None = None,
end_time: int | None = None,
loop: bool = False,
) -> None:
"""Marshalls an audio proto, using data and url processors as needed.
Parameters
----------
coordinates : str
proto : The proto to fill. Must have a string field called "url".
data : str, bytes, BytesIO, numpy.ndarray, or file opened with
io.open()
Raw audio data or a string with a URL pointing to the file to load.
If passing the raw data, this must include headers and any other bytes
required in the actual file.
mimetype : str
The mime type for the audio file. Defaults to "audio/wav".
See https://tools.ietf.org/html/rfc4281 for more info.
start_time : int
The time from which this element should start playing. (default: 0)
sample_rate: int or None
Optional param to provide sample_rate in case of numpy array
end_time: int
The time at which this element should stop playing
loop: bool
Whether the audio should loop playback.
"""
proto.start_time = start_time
if end_time is not None:
proto.end_time = end_time
proto.loop = loop
if isinstance(data, str) and url_util.is_url(
data, allowed_schemas=("http", "https", "data")
):
proto.url = data
else:
data = _maybe_convert_to_wav_bytes(data, sample_rate)
_marshall_av_media(coordinates, proto, data, mimetype) | Marshalls an audio proto, using data and url processors as needed. Parameters ---------- coordinates : str proto : The proto to fill. Must have a string field called "url". data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open() Raw audio data or a string with a URL pointing to the file to load. If passing the raw data, this must include headers and any other bytes required in the actual file. mimetype : str The mime type for the audio file. Defaults to "audio/wav". See https://tools.ietf.org/html/rfc4281 for more info. start_time : int The time from which this element should start playing. (default: 0) sample_rate: int or None Optional param to provide sample_rate in case of numpy array end_time: int The time at which this element should stop playing loop: bool Whether the audio should loop playback. |
178,374 | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Union, cast
from typing_extensions import TypeAlias
from streamlit import type_util
from streamlit.elements.lib.column_config_utils import (
INDEX_IDENTIFIER,
ColumnConfigMappingInput,
apply_data_specific_configs,
marshall_column_config,
process_config_mapping,
update_column_config,
)
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.runtime.metrics_util import gather_metrics
Data: TypeAlias = Union[
"DataFrame",
"Series",
"Styler",
"Index",
"pa.Table",
"ndarray",
Iterable,
Dict[str, List[Any]],
None,
]
def marshall_styler(proto: ArrowProto, styler: Styler, default_uuid: str) -> None:
"""Marshall pandas.Styler into an Arrow proto.
Parameters
----------
proto : proto.Arrow
Output. The protobuf for Streamlit Arrow proto.
styler : pandas.Styler
Helps style a DataFrame or Series according to the data with HTML and CSS.
default_uuid : str
If pandas.Styler uuid is not provided, this value will be used.
"""
import pandas as pd
styler_data_df: pd.DataFrame = styler.data
if styler_data_df.size > int(pd.options.styler.render.max_elements):
raise StreamlitAPIException(
f"The dataframe has `{styler_data_df.size}` cells, but the maximum number "
"of cells allowed to be rendered by Pandas Styler is configured to "
f"`{pd.options.styler.render.max_elements}`. To allow more cells to be "
'styled, you can change the `"styler.render.max_elements"` config. For example: '
f'`pd.set_option("styler.render.max_elements", {styler_data_df.size})`'
)
# pandas.Styler uuid should be set before _compute is called.
_marshall_uuid(proto, styler, default_uuid)
# We're using protected members of pandas.Styler to get styles,
# which is not ideal and could break if the interface changes.
styler._compute()
pandas_styles = styler._translate(False, False)
_marshall_caption(proto, styler)
_marshall_styles(proto, styler, pandas_styles)
_marshall_display_values(proto, styler_data_df, pandas_styles)
The provided code snippet includes necessary dependencies for implementing the `marshall` function. Write a Python function `def marshall(proto: ArrowProto, data: Data, default_uuid: str | None = None) -> None` to solve the following problem:
Marshall pandas.DataFrame into an Arrow proto. Parameters ---------- proto : proto.Arrow Output. The protobuf for Streamlit Arrow proto. data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.DataFrame, Iterable, dict, or None Something that is or can be converted to a dataframe. default_uuid : str | None If pandas.Styler UUID is not provided, this value will be used. This attribute is optional and only used for pandas.Styler, other elements (e.g. charts) can ignore it.
Here is the function:
def marshall(proto: ArrowProto, data: Data, default_uuid: str | None = None) -> None:
"""Marshall pandas.DataFrame into an Arrow proto.
Parameters
----------
proto : proto.Arrow
Output. The protobuf for Streamlit Arrow proto.
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.DataFrame, Iterable, dict, or None
Something that is or can be converted to a dataframe.
default_uuid : str | None
If pandas.Styler UUID is not provided, this value will be used.
This attribute is optional and only used for pandas.Styler, other elements
(e.g. charts) can ignore it.
"""
import pyarrow as pa
if type_util.is_pandas_styler(data):
# default_uuid is a string only if the data is a `Styler`,
# and `None` otherwise.
assert isinstance(
default_uuid, str
), "Default UUID must be a string for Styler data."
marshall_styler(proto, data, default_uuid)
if isinstance(data, pa.Table):
proto.data = type_util.pyarrow_table_to_bytes(data)
else:
df = type_util.convert_anything_to_df(data)
proto.data = type_util.data_frame_to_bytes(df) | Marshall pandas.DataFrame into an Arrow proto. Parameters ---------- proto : proto.Arrow Output. The protobuf for Streamlit Arrow proto. data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.DataFrame, Iterable, dict, or None Something that is or can be converted to a dataframe. default_uuid : str | None If pandas.Styler UUID is not provided, this value will be used. This attribute is optional and only used for pandas.Styler, other elements (e.g. charts) can ignore it. |
178,375 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `Column` function. Write a Python function `def Column( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a generic column in ``st.dataframe`` or ``st.data_editor``. The type of the column will be automatically inferred from the data type. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. To change the type of the column and enable type-specific configuration options, use one of the column types in the ``st.column_config`` namespace, e.g. ``st.column_config.NumberColumn``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "widgets": st.column_config.Column( >>> "Streamlit Widgets", >>> help="Streamlit **widget** commands 🎈", >>> width="medium", >>> required=True, >>> ) >>> }, >>> hide_index=True, >>> num_rows="dynamic", >>> ) .. output:: https://doc-column.streamlit.app/ height: 300px
Here is the function:
def Column(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
) -> ColumnConfig:
"""Configure a generic column in ``st.dataframe`` or ``st.data_editor``.
The type of the column will be automatically inferred from the data type.
This command needs to be used in the ``column_config`` parameter of ``st.dataframe``
or ``st.data_editor``.
To change the type of the column and enable type-specific configuration options,
use one of the column types in the ``st.column_config`` namespace,
e.g. ``st.column_config.NumberColumn``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "widgets": st.column_config.Column(
>>> "Streamlit Widgets",
>>> help="Streamlit **widget** commands 🎈",
>>> width="medium",
>>> required=True,
>>> )
>>> },
>>> hide_index=True,
>>> num_rows="dynamic",
>>> )
.. output::
https://doc-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label, width=width, help=help, disabled=disabled, required=required
) | Configure a generic column in ``st.dataframe`` or ``st.data_editor``. The type of the column will be automatically inferred from the data type. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. To change the type of the column and enable type-specific configuration options, use one of the column types in the ``st.column_config`` namespace, e.g. ``st.column_config.NumberColumn``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "widgets": st.column_config.Column( >>> "Streamlit Widgets", >>> help="Streamlit **widget** commands 🎈", >>> width="medium", >>> required=True, >>> ) >>> }, >>> hide_index=True, >>> num_rows="dynamic", >>> ) .. output:: https://doc-column.streamlit.app/ height: 300px |
178,376 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class NumberColumnConfig(TypedDict):
type: Literal["number"]
format: NotRequired[str | None]
min_value: NotRequired[int | float | None]
max_value: NotRequired[int | float | None]
step: NotRequired[int | float | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `NumberColumn` function. Write a Python function `def NumberColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, default: int | float | None = None, format: str | None = None, min_value: int | float | None = None, max_value: int | float | None = None, step: int | float | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a number column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for integer and float values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a numeric input widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: int, float, or None Specifies the default value in this column when a new row is added by the user. format : str or None A printf-style format string controlling how numbers are displayed. This does not impact the return value. Valid formatters: %d %e %f %g %i %u. You can also add prefixes and suffixes, e.g. ``"$ %.2f"`` to show a dollar prefix. min_value : int, float, or None The minimum value that can be entered. If None (default), there will be no minimum. max_value : int, float, or None The maximum value that can be entered. If None (default), there will be no maximum. step: int, float, or None The stepping interval. Specifies the precision of numbers that can be entered. If None (default), uses 1 for integers and unrestricted precision for floats. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "price": [20, 950, 250, 500], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "price": st.column_config.NumberColumn( >>> "Price (in USD)", >>> help="The price of the product in USD", >>> min_value=0, >>> max_value=1000, >>> step=1, >>> format="$%d", >>> ) >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-number-column.streamlit.app/ height: 300px
Here is the function:
def NumberColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: int | float | None = None,
format: str | None = None,
min_value: int | float | None = None,
max_value: int | float | None = None,
step: int | float | None = None,
) -> ColumnConfig:
"""Configure a number column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for integer and float values. This command needs to
be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
When used with ``st.data_editor``, editing will be enabled with a numeric input widget.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: int, float, or None
Specifies the default value in this column when a new row is added by the user.
format : str or None
A printf-style format string controlling how numbers are displayed.
This does not impact the return value. Valid formatters: %d %e %f %g %i %u.
You can also add prefixes and suffixes, e.g. ``"$ %.2f"`` to show a dollar prefix.
min_value : int, float, or None
The minimum value that can be entered.
If None (default), there will be no minimum.
max_value : int, float, or None
The maximum value that can be entered.
If None (default), there will be no maximum.
step: int, float, or None
The stepping interval. Specifies the precision of numbers that can be entered.
If None (default), uses 1 for integers and unrestricted precision for floats.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "price": [20, 950, 250, 500],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "price": st.column_config.NumberColumn(
>>> "Price (in USD)",
>>> help="The price of the product in USD",
>>> min_value=0,
>>> max_value=1000,
>>> step=1,
>>> format="$%d",
>>> )
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-number-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
default=default,
type_config=NumberColumnConfig(
type="number",
min_value=min_value,
max_value=max_value,
format=format,
step=step,
),
) | Configure a number column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for integer and float values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a numeric input widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: int, float, or None Specifies the default value in this column when a new row is added by the user. format : str or None A printf-style format string controlling how numbers are displayed. This does not impact the return value. Valid formatters: %d %e %f %g %i %u. You can also add prefixes and suffixes, e.g. ``"$ %.2f"`` to show a dollar prefix. min_value : int, float, or None The minimum value that can be entered. If None (default), there will be no minimum. max_value : int, float, or None The maximum value that can be entered. If None (default), there will be no maximum. step: int, float, or None The stepping interval. Specifies the precision of numbers that can be entered. If None (default), uses 1 for integers and unrestricted precision for floats. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "price": [20, 950, 250, 500], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "price": st.column_config.NumberColumn( >>> "Price (in USD)", >>> help="The price of the product in USD", >>> min_value=0, >>> max_value=1000, >>> step=1, >>> format="$%d", >>> ) >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-number-column.streamlit.app/ height: 300px |
178,377 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class TextColumnConfig(TypedDict):
type: Literal["text"]
max_chars: NotRequired[int | None]
validate: NotRequired[str | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `TextColumn` function. Write a Python function `def TextColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, default: str | None = None, max_chars: int | None = None, validate: str | None = None, ) -> ColumnConfig` to solve the following problem:
r"""Configure a text column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for string values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a text input widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: str or None Specifies the default value in this column when a new row is added by the user. max_chars: int or None The maximum number of characters that can be entered. If None (default), there will be no maximum. validate: str or None A regular expression (JS flavor, e.g. ``"^[a-z]+$"``) that edited values are validated against. If the input is invalid, it will not be submitted. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "widgets": st.column_config.TextColumn( >>> "Widgets", >>> help="Streamlit **widget** commands 🎈", >>> default="st.", >>> max_chars=50, >>> validate="^st\.[a-z_]+$", >>> ) >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-text-column.streamlit.app/ height: 300px
Here is the function:
def TextColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: str | None = None,
max_chars: int | None = None,
validate: str | None = None,
) -> ColumnConfig:
r"""Configure a text column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for string values. This command needs to be used in the
``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with
``st.data_editor``, editing will be enabled with a text input widget.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str or None
Specifies the default value in this column when a new row is added by the user.
max_chars: int or None
The maximum number of characters that can be entered. If None (default),
there will be no maximum.
validate: str or None
A regular expression (JS flavor, e.g. ``"^[a-z]+$"``) that edited values are validated against.
If the input is invalid, it will not be submitted.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "widgets": st.column_config.TextColumn(
>>> "Widgets",
>>> help="Streamlit **widget** commands 🎈",
>>> default="st.",
>>> max_chars=50,
>>> validate="^st\.[a-z_]+$",
>>> )
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-text-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
default=default,
type_config=TextColumnConfig(
type="text", max_chars=max_chars, validate=validate
),
) | r"""Configure a text column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for string values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a text input widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: str or None Specifies the default value in this column when a new row is added by the user. max_chars: int or None The maximum number of characters that can be entered. If None (default), there will be no maximum. validate: str or None A regular expression (JS flavor, e.g. ``"^[a-z]+$"``) that edited values are validated against. If the input is invalid, it will not be submitted. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "widgets": st.column_config.TextColumn( >>> "Widgets", >>> help="Streamlit **widget** commands 🎈", >>> default="st.", >>> max_chars=50, >>> validate="^st\.[a-z_]+$", >>> ) >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-text-column.streamlit.app/ height: 300px |
178,378 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class LinkColumnConfig(TypedDict):
type: Literal["link"]
max_chars: NotRequired[int | None]
validate: NotRequired[str | None]
display_text: NotRequired[str | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `LinkColumn` function. Write a Python function `def LinkColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, default: str | None = None, max_chars: int | None = None, validate: str | None = None, display_text: str | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a link column in ``st.dataframe`` or ``st.data_editor``. The cell values need to be string and will be shown as clickable links. This command needs to be used in the column_config parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a text input widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: str or None Specifies the default value in this column when a new row is added by the user. max_chars: int or None The maximum number of characters that can be entered. If None (default), there will be no maximum. validate: str or None A regular expression (JS flavor, e.g. ``"^https://.+$"``) that edited values are validated against. If the input is invalid, it will not be submitted. display_text: str or None The text that is displayed in the cell. Can be one of: * ``None`` (default) to display the URL itself. * A string that is displayed in every cell, e.g. ``"Open link"``. * A regular expression (JS flavor, detected by usage of parentheses) to extract a part of the URL via a capture group, e.g. ``"https://(.*?)\.example\.com"`` to extract the display text "foo" from the URL "\https://foo.example.com". For more complex cases, you may use `Pandas Styler's format \ <https://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.format.html>`_ function on the underlying dataframe. Note that this makes the app slow, doesn't work with editable columns, and might be removed in the future. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "apps": [ >>> "https://roadmap.streamlit.app", >>> "https://extras.streamlit.app", >>> "https://issues.streamlit.app", >>> "https://30days.streamlit.app", >>> ], >>> "creator": [ >>> "https://github.com/streamlit", >>> "https://github.com/arnaudmiribel", >>> "https://github.com/streamlit", >>> "https://github.com/streamlit", >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "apps": st.column_config.LinkColumn( >>> "Trending apps", >>> help="The top trending Streamlit apps", >>> validate="^https://[a-z]+\\.streamlit\\.app$", >>> max_chars=100, >>> display_text="https://(.*?)\\.streamlit\\.app" >>> ), >>> "creator": st.column_config.LinkColumn( >>> "App Creator", display_text="Open profile" >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-link-column.streamlit.app/ height: 300px
Here is the function:
def LinkColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: str | None = None,
max_chars: int | None = None,
validate: str | None = None,
display_text: str | None = None,
) -> ColumnConfig:
"""Configure a link column in ``st.dataframe`` or ``st.data_editor``.
The cell values need to be string and will be shown as clickable links.
This command needs to be used in the column_config parameter of ``st.dataframe``
or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled
with a text input widget.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str or None
Specifies the default value in this column when a new row is added by the user.
max_chars: int or None
The maximum number of characters that can be entered. If None (default),
there will be no maximum.
validate: str or None
A regular expression (JS flavor, e.g. ``"^https://.+$"``) that edited values are validated against.
If the input is invalid, it will not be submitted.
display_text: str or None
The text that is displayed in the cell. Can be one of:
* ``None`` (default) to display the URL itself.
* A string that is displayed in every cell, e.g. ``"Open link"``.
* A regular expression (JS flavor, detected by usage of parentheses)
to extract a part of the URL via a capture group, e.g. ``"https://(.*?)\.example\.com"``
to extract the display text "foo" from the URL "\https://foo.example.com".
For more complex cases, you may use `Pandas Styler's format \
<https://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.format.html>`_
function on the underlying dataframe. Note that this makes the app slow,
doesn't work with editable columns, and might be removed in the future.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "apps": [
>>> "https://roadmap.streamlit.app",
>>> "https://extras.streamlit.app",
>>> "https://issues.streamlit.app",
>>> "https://30days.streamlit.app",
>>> ],
>>> "creator": [
>>> "https://github.com/streamlit",
>>> "https://github.com/arnaudmiribel",
>>> "https://github.com/streamlit",
>>> "https://github.com/streamlit",
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "apps": st.column_config.LinkColumn(
>>> "Trending apps",
>>> help="The top trending Streamlit apps",
>>> validate="^https://[a-z]+\\.streamlit\\.app$",
>>> max_chars=100,
>>> display_text="https://(.*?)\\.streamlit\\.app"
>>> ),
>>> "creator": st.column_config.LinkColumn(
>>> "App Creator", display_text="Open profile"
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-link-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
default=default,
type_config=LinkColumnConfig(
type="link",
max_chars=max_chars,
validate=validate,
display_text=display_text,
),
) | Configure a link column in ``st.dataframe`` or ``st.data_editor``. The cell values need to be string and will be shown as clickable links. This command needs to be used in the column_config parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a text input widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: str or None Specifies the default value in this column when a new row is added by the user. max_chars: int or None The maximum number of characters that can be entered. If None (default), there will be no maximum. validate: str or None A regular expression (JS flavor, e.g. ``"^https://.+$"``) that edited values are validated against. If the input is invalid, it will not be submitted. display_text: str or None The text that is displayed in the cell. Can be one of: * ``None`` (default) to display the URL itself. * A string that is displayed in every cell, e.g. ``"Open link"``. * A regular expression (JS flavor, detected by usage of parentheses) to extract a part of the URL via a capture group, e.g. ``"https://(.*?)\.example\.com"`` to extract the display text "foo" from the URL "\https://foo.example.com". For more complex cases, you may use `Pandas Styler's format \ <https://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.format.html>`_ function on the underlying dataframe. Note that this makes the app slow, doesn't work with editable columns, and might be removed in the future. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "apps": [ >>> "https://roadmap.streamlit.app", >>> "https://extras.streamlit.app", >>> "https://issues.streamlit.app", >>> "https://30days.streamlit.app", >>> ], >>> "creator": [ >>> "https://github.com/streamlit", >>> "https://github.com/arnaudmiribel", >>> "https://github.com/streamlit", >>> "https://github.com/streamlit", >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "apps": st.column_config.LinkColumn( >>> "Trending apps", >>> help="The top trending Streamlit apps", >>> validate="^https://[a-z]+\\.streamlit\\.app$", >>> max_chars=100, >>> display_text="https://(.*?)\\.streamlit\\.app" >>> ), >>> "creator": st.column_config.LinkColumn( >>> "App Creator", display_text="Open profile" >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-link-column.streamlit.app/ height: 300px |
178,379 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class CheckboxColumnConfig(TypedDict):
type: Literal["checkbox"]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `CheckboxColumn` function. Write a Python function `def CheckboxColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, default: bool | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a checkbox column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for boolean values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a checkbox widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: bool or None Specifies the default value in this column when a new row is added by the user. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"], >>> "favorite": [True, False, False, True], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "favorite": st.column_config.CheckboxColumn( >>> "Your favorite?", >>> help="Select your **favorite** widgets", >>> default=False, >>> ) >>> }, >>> disabled=["widgets"], >>> hide_index=True, >>> ) .. output:: https://doc-checkbox-column.streamlit.app/ height: 300px
Here is the function:
def CheckboxColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: bool | None = None,
) -> ColumnConfig:
"""Configure a checkbox column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for boolean values. This command needs to be used in
the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
When used with ``st.data_editor``, editing will be enabled with a checkbox widget.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: bool or None
Specifies the default value in this column when a new row is added by the user.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"],
>>> "favorite": [True, False, False, True],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "favorite": st.column_config.CheckboxColumn(
>>> "Your favorite?",
>>> help="Select your **favorite** widgets",
>>> default=False,
>>> )
>>> },
>>> disabled=["widgets"],
>>> hide_index=True,
>>> )
.. output::
https://doc-checkbox-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
default=default,
type_config=CheckboxColumnConfig(type="checkbox"),
) | Configure a checkbox column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for boolean values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a checkbox widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: bool or None Specifies the default value in this column when a new row is added by the user. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"], >>> "favorite": [True, False, False, True], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "favorite": st.column_config.CheckboxColumn( >>> "Your favorite?", >>> help="Select your **favorite** widgets", >>> default=False, >>> ) >>> }, >>> disabled=["widgets"], >>> hide_index=True, >>> ) .. output:: https://doc-checkbox-column.streamlit.app/ height: 300px |
178,380 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class SelectboxColumnConfig(TypedDict):
type: Literal["selectbox"]
options: NotRequired[list[str | int | float] | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `SelectboxColumn` function. Write a Python function `def SelectboxColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, default: str | int | float | None = None, options: Iterable[str | int | float] | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a selectbox column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for Pandas categorical values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a selectbox widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: str, int, float, bool, or None Specifies the default value in this column when a new row is added by the user. options: Iterable of str or None The options that can be selected during editing. If None (default), this will be inferred from the underlying dataframe column if its dtype is "category" (`see Pandas docs on categorical data <https://pandas.pydata.org/docs/user_guide/categorical.html>`_). Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "category": [ >>> "📊 Data Exploration", >>> "📈 Data Visualization", >>> "🤖 LLM", >>> "📊 Data Exploration", >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "category": st.column_config.SelectboxColumn( >>> "App Category", >>> help="The category of the app", >>> width="medium", >>> options=[ >>> "📊 Data Exploration", >>> "📈 Data Visualization", >>> "🤖 LLM", >>> ], >>> required=True, >>> ) >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-selectbox-column.streamlit.app/ height: 300px
Here is the function:
def SelectboxColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: str | int | float | None = None,
options: Iterable[str | int | float] | None = None,
) -> ColumnConfig:
"""Configure a selectbox column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for Pandas categorical values. This command needs to
be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
When used with ``st.data_editor``, editing will be enabled with a selectbox widget.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, int, float, bool, or None
Specifies the default value in this column when a new row is added by the user.
options: Iterable of str or None
The options that can be selected during editing. If None (default), this will be
inferred from the underlying dataframe column if its dtype is "category"
(`see Pandas docs on categorical data <https://pandas.pydata.org/docs/user_guide/categorical.html>`_).
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "category": [
>>> "📊 Data Exploration",
>>> "📈 Data Visualization",
>>> "🤖 LLM",
>>> "📊 Data Exploration",
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "category": st.column_config.SelectboxColumn(
>>> "App Category",
>>> help="The category of the app",
>>> width="medium",
>>> options=[
>>> "📊 Data Exploration",
>>> "📈 Data Visualization",
>>> "🤖 LLM",
>>> ],
>>> required=True,
>>> )
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-selectbox-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
default=default,
type_config=SelectboxColumnConfig(
type="selectbox", options=list(options) if options is not None else None
),
) | Configure a selectbox column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for Pandas categorical values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a selectbox widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: str, int, float, bool, or None Specifies the default value in this column when a new row is added by the user. options: Iterable of str or None The options that can be selected during editing. If None (default), this will be inferred from the underlying dataframe column if its dtype is "category" (`see Pandas docs on categorical data <https://pandas.pydata.org/docs/user_guide/categorical.html>`_). Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "category": [ >>> "📊 Data Exploration", >>> "📈 Data Visualization", >>> "🤖 LLM", >>> "📊 Data Exploration", >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "category": st.column_config.SelectboxColumn( >>> "App Category", >>> help="The category of the app", >>> width="medium", >>> options=[ >>> "📊 Data Exploration", >>> "📈 Data Visualization", >>> "🤖 LLM", >>> ], >>> required=True, >>> ) >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-selectbox-column.streamlit.app/ height: 300px |
178,381 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class BarChartColumnConfig(TypedDict):
type: Literal["bar_chart"]
y_min: NotRequired[int | float | None]
y_max: NotRequired[int | float | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `BarChartColumn` function. Write a Python function `def BarChartColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, y_min: int | float | None = None, y_max: int | float | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a bar chart column in ``st.dataframe`` or ``st.data_editor``. Cells need to contain a list of numbers. Chart columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. y_min: int, float, or None The minimum value on the y-axis for all cells in the column. If None (default), every cell will use the minimum of its data. y_max: int, float, or None The maximum value on the y-axis for all cells in the column. If None (default), every cell will use the maximum of its data. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [ >>> [0, 4, 26, 80, 100, 40], >>> [80, 20, 80, 35, 40, 100], >>> [10, 20, 80, 80, 70, 0], >>> [10, 100, 20, 100, 30, 100], >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.BarChartColumn( >>> "Sales (last 6 months)", >>> help="The sales volume in the last 6 months", >>> y_min=0, >>> y_max=100, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-barchart-column.streamlit.app/ height: 300px
Here is the function:
def BarChartColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
y_min: int | float | None = None,
y_max: int | float | None = None,
) -> ColumnConfig:
"""Configure a bar chart column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain a list of numbers. Chart columns are not editable
at the moment. This command needs to be used in the ``column_config`` parameter
of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
y_min: int, float, or None
The minimum value on the y-axis for all cells in the column.
If None (default), every cell will use the minimum of its data.
y_max: int, float, or None
The maximum value on the y-axis for all cells in the column. If None (default),
every cell will use the maximum of its data.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [
>>> [0, 4, 26, 80, 100, 40],
>>> [80, 20, 80, 35, 40, 100],
>>> [10, 20, 80, 80, 70, 0],
>>> [10, 100, 20, 100, 30, 100],
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.BarChartColumn(
>>> "Sales (last 6 months)",
>>> help="The sales volume in the last 6 months",
>>> y_min=0,
>>> y_max=100,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-barchart-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
type_config=BarChartColumnConfig(type="bar_chart", y_min=y_min, y_max=y_max),
) | Configure a bar chart column in ``st.dataframe`` or ``st.data_editor``. Cells need to contain a list of numbers. Chart columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. y_min: int, float, or None The minimum value on the y-axis for all cells in the column. If None (default), every cell will use the minimum of its data. y_max: int, float, or None The maximum value on the y-axis for all cells in the column. If None (default), every cell will use the maximum of its data. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [ >>> [0, 4, 26, 80, 100, 40], >>> [80, 20, 80, 35, 40, 100], >>> [10, 20, 80, 80, 70, 0], >>> [10, 100, 20, 100, 30, 100], >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.BarChartColumn( >>> "Sales (last 6 months)", >>> help="The sales volume in the last 6 months", >>> y_min=0, >>> y_max=100, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-barchart-column.streamlit.app/ height: 300px |
178,382 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class LineChartColumnConfig(TypedDict):
type: Literal["line_chart"]
y_min: NotRequired[int | float | None]
y_max: NotRequired[int | float | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `LineChartColumn` function. Write a Python function `def LineChartColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, y_min: int | float | None = None, y_max: int | float | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a line chart column in ``st.dataframe`` or ``st.data_editor``. Cells need to contain a list of numbers. Chart columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. y_min: int, float, or None The minimum value on the y-axis for all cells in the column. If None (default), every cell will use the minimum of its data. y_max: int, float, or None The maximum value on the y-axis for all cells in the column. If None (default), every cell will use the maximum of its data. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [ >>> [0, 4, 26, 80, 100, 40], >>> [80, 20, 80, 35, 40, 100], >>> [10, 20, 80, 80, 70, 0], >>> [10, 100, 20, 100, 30, 100], >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.LineChartColumn( >>> "Sales (last 6 months)", >>> width="medium", >>> help="The sales volume in the last 6 months", >>> y_min=0, >>> y_max=100, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-linechart-column.streamlit.app/ height: 300px
Here is the function:
def LineChartColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
y_min: int | float | None = None,
y_max: int | float | None = None,
) -> ColumnConfig:
"""Configure a line chart column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain a list of numbers. Chart columns are not editable
at the moment. This command needs to be used in the ``column_config`` parameter
of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
y_min: int, float, or None
The minimum value on the y-axis for all cells in the column.
If None (default), every cell will use the minimum of its data.
y_max: int, float, or None
The maximum value on the y-axis for all cells in the column. If None (default),
every cell will use the maximum of its data.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [
>>> [0, 4, 26, 80, 100, 40],
>>> [80, 20, 80, 35, 40, 100],
>>> [10, 20, 80, 80, 70, 0],
>>> [10, 100, 20, 100, 30, 100],
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.LineChartColumn(
>>> "Sales (last 6 months)",
>>> width="medium",
>>> help="The sales volume in the last 6 months",
>>> y_min=0,
>>> y_max=100,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-linechart-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
type_config=LineChartColumnConfig(type="line_chart", y_min=y_min, y_max=y_max),
) | Configure a line chart column in ``st.dataframe`` or ``st.data_editor``. Cells need to contain a list of numbers. Chart columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. y_min: int, float, or None The minimum value on the y-axis for all cells in the column. If None (default), every cell will use the minimum of its data. y_max: int, float, or None The maximum value on the y-axis for all cells in the column. If None (default), every cell will use the maximum of its data. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [ >>> [0, 4, 26, 80, 100, 40], >>> [80, 20, 80, 35, 40, 100], >>> [10, 20, 80, 80, 70, 0], >>> [10, 100, 20, 100, 30, 100], >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.LineChartColumn( >>> "Sales (last 6 months)", >>> width="medium", >>> help="The sales volume in the last 6 months", >>> y_min=0, >>> y_max=100, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-linechart-column.streamlit.app/ height: 300px |
178,383 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class AreaChartColumnConfig(TypedDict):
type: Literal["area_chart"]
y_min: NotRequired[int | float | None]
y_max: NotRequired[int | float | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `AreaChartColumn` function. Write a Python function `def AreaChartColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, y_min: int | float | None = None, y_max: int | float | None = None, ) -> ColumnConfig` to solve the following problem:
Configure an area chart column in ``st.dataframe`` or ``st.data_editor``. Cells need to contain a list of numbers. Chart columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. y_min: int, float, or None The minimum value on the y-axis for all cells in the column. If None (default), every cell will use the minimum of its data. y_max: int, float, or None The maximum value on the y-axis for all cells in the column. If None (default), every cell will use the maximum of its data. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [ >>> [0, 4, 26, 80, 100, 40], >>> [80, 20, 80, 35, 40, 100], >>> [10, 20, 80, 80, 70, 0], >>> [10, 100, 20, 100, 30, 100], >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.AreaChartColumn( >>> "Sales (last 6 months)", >>> width="medium", >>> help="The sales volume in the last 6 months", >>> y_min=0, >>> y_max=100, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-areachart-column.streamlit.app/ height: 300px
Here is the function:
def AreaChartColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
y_min: int | float | None = None,
y_max: int | float | None = None,
) -> ColumnConfig:
"""Configure an area chart column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain a list of numbers. Chart columns are not editable
at the moment. This command needs to be used in the ``column_config`` parameter
of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
y_min: int, float, or None
The minimum value on the y-axis for all cells in the column.
If None (default), every cell will use the minimum of its data.
y_max: int, float, or None
The maximum value on the y-axis for all cells in the column. If None (default),
every cell will use the maximum of its data.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [
>>> [0, 4, 26, 80, 100, 40],
>>> [80, 20, 80, 35, 40, 100],
>>> [10, 20, 80, 80, 70, 0],
>>> [10, 100, 20, 100, 30, 100],
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.AreaChartColumn(
>>> "Sales (last 6 months)",
>>> width="medium",
>>> help="The sales volume in the last 6 months",
>>> y_min=0,
>>> y_max=100,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-areachart-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
type_config=AreaChartColumnConfig(type="area_chart", y_min=y_min, y_max=y_max),
) | Configure an area chart column in ``st.dataframe`` or ``st.data_editor``. Cells need to contain a list of numbers. Chart columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. y_min: int, float, or None The minimum value on the y-axis for all cells in the column. If None (default), every cell will use the minimum of its data. y_max: int, float, or None The maximum value on the y-axis for all cells in the column. If None (default), every cell will use the maximum of its data. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [ >>> [0, 4, 26, 80, 100, 40], >>> [80, 20, 80, 35, 40, 100], >>> [10, 20, 80, 80, 70, 0], >>> [10, 100, 20, 100, 30, 100], >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.AreaChartColumn( >>> "Sales (last 6 months)", >>> width="medium", >>> help="The sales volume in the last 6 months", >>> y_min=0, >>> y_max=100, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-areachart-column.streamlit.app/ height: 300px |
178,384 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class ImageColumnConfig(TypedDict):
type: Literal["image"]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `ImageColumn` function. Write a Python function `def ImageColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, )` to solve the following problem:
Configure an image column in ``st.dataframe`` or ``st.data_editor``. The cell values need to be one of: * A URL to fetch the image from. This can also be a relative URL of an image deployed via `static file serving <https://docs.streamlit.io/library/advanced-features/static-file-serving>`_. Note that you can NOT use an arbitrary local image if it is not available through a public URL. * A data URL containing an SVG XML like ``data:image/svg+xml;utf8,<svg xmlns=...</svg>``. * A data URL containing a Base64 encoded image like ``data:image/png;base64,iVBO...``. Image columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "apps": [ >>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/5435b8cb-6c6c-490b-9608-799b543655d3/Home_Page.png", >>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/ef9a7627-13f2-47e5-8f65-3f69bb38a5c2/Home_Page.png", >>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/31b99099-8eae-4ff8-aa89-042895ed3843/Home_Page.png", >>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/6a399b09-241e-4ae7-a31f-7640dc1d181e/Home_Page.png", >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "apps": st.column_config.ImageColumn( >>> "Preview Image", help="Streamlit app preview screenshots" >>> ) >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-image-column.streamlit.app/ height: 300px
Here is the function:
def ImageColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
):
"""Configure an image column in ``st.dataframe`` or ``st.data_editor``.
The cell values need to be one of:
* A URL to fetch the image from. This can also be a relative URL of an image
deployed via `static file serving <https://docs.streamlit.io/library/advanced-features/static-file-serving>`_.
Note that you can NOT use an arbitrary local image if it is not available through
a public URL.
* A data URL containing an SVG XML like ``data:image/svg+xml;utf8,<svg xmlns=...</svg>``.
* A data URL containing a Base64 encoded image like ``data:image/png;base64,iVBO...``.
Image columns are not editable at the moment. This command needs to be used in the
``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "apps": [
>>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/5435b8cb-6c6c-490b-9608-799b543655d3/Home_Page.png",
>>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/ef9a7627-13f2-47e5-8f65-3f69bb38a5c2/Home_Page.png",
>>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/31b99099-8eae-4ff8-aa89-042895ed3843/Home_Page.png",
>>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/6a399b09-241e-4ae7-a31f-7640dc1d181e/Home_Page.png",
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "apps": st.column_config.ImageColumn(
>>> "Preview Image", help="Streamlit app preview screenshots"
>>> )
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-image-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label, width=width, help=help, type_config=ImageColumnConfig(type="image")
) | Configure an image column in ``st.dataframe`` or ``st.data_editor``. The cell values need to be one of: * A URL to fetch the image from. This can also be a relative URL of an image deployed via `static file serving <https://docs.streamlit.io/library/advanced-features/static-file-serving>`_. Note that you can NOT use an arbitrary local image if it is not available through a public URL. * A data URL containing an SVG XML like ``data:image/svg+xml;utf8,<svg xmlns=...</svg>``. * A data URL containing a Base64 encoded image like ``data:image/png;base64,iVBO...``. Image columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "apps": [ >>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/5435b8cb-6c6c-490b-9608-799b543655d3/Home_Page.png", >>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/ef9a7627-13f2-47e5-8f65-3f69bb38a5c2/Home_Page.png", >>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/31b99099-8eae-4ff8-aa89-042895ed3843/Home_Page.png", >>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/6a399b09-241e-4ae7-a31f-7640dc1d181e/Home_Page.png", >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "apps": st.column_config.ImageColumn( >>> "Preview Image", help="Streamlit app preview screenshots" >>> ) >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-image-column.streamlit.app/ height: 300px |
178,385 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class ListColumnConfig(TypedDict):
type: Literal["list"]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `ListColumn` function. Write a Python function `def ListColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, )` to solve the following problem:
Configure a list column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for list-like values. List columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [ >>> [0, 4, 26, 80, 100, 40], >>> [80, 20, 80, 35, 40, 100], >>> [10, 20, 80, 80, 70, 0], >>> [10, 100, 20, 100, 30, 100], >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.ListColumn( >>> "Sales (last 6 months)", >>> help="The sales volume in the last 6 months", >>> width="medium", >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-list-column.streamlit.app/ height: 300px
Here is the function:
def ListColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
):
"""Configure a list column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for list-like values. List columns are not editable
at the moment. This command needs to be used in the ``column_config`` parameter of
``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [
>>> [0, 4, 26, 80, 100, 40],
>>> [80, 20, 80, 35, 40, 100],
>>> [10, 20, 80, 80, 70, 0],
>>> [10, 100, 20, 100, 30, 100],
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.ListColumn(
>>> "Sales (last 6 months)",
>>> help="The sales volume in the last 6 months",
>>> width="medium",
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-list-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label, width=width, help=help, type_config=ListColumnConfig(type="list")
) | Configure a list column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for list-like values. List columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [ >>> [0, 4, 26, 80, 100, 40], >>> [80, 20, 80, 35, 40, 100], >>> [10, 20, 80, 80, 70, 0], >>> [10, 100, 20, 100, 30, 100], >>> ], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.ListColumn( >>> "Sales (last 6 months)", >>> help="The sales volume in the last 6 months", >>> width="medium", >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-list-column.streamlit.app/ height: 300px |
178,386 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class DatetimeColumnConfig(TypedDict):
type: Literal["datetime"]
format: NotRequired[str | None]
min_value: NotRequired[str | None]
max_value: NotRequired[str | None]
step: NotRequired[int | float | None]
timezone: NotRequired[str | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `DatetimeColumn` function. Write a Python function `def DatetimeColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, default: datetime.datetime | None = None, format: str | None = None, min_value: datetime.datetime | None = None, max_value: datetime.datetime | None = None, step: int | float | datetime.timedelta | None = None, timezone: str | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a datetime column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for datetime values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a datetime picker widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: datetime.datetime or None Specifies the default value in this column when a new row is added by the user. format: str or None A momentJS format string controlling how datetimes are displayed. See `momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available formats. If None (default), uses ``YYYY-MM-DD HH:mm:ss``. min_value: datetime.datetime or None The minimum datetime that can be entered. If None (default), there will be no minimum. max_value: datetime.datetime or None The maximum datetime that can be entered. If None (default), there will be no maximum. step: int, float, datetime.timedelta, or None The stepping interval in seconds. If None (default), the step will be 1 second. timezone: str or None The timezone of this column. If None (default), the timezone is inferred from the underlying data. Examples -------- >>> from datetime import datetime >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "appointment": [ >>> datetime(2024, 2, 5, 12, 30), >>> datetime(2023, 11, 10, 18, 0), >>> datetime(2024, 3, 11, 20, 10), >>> datetime(2023, 9, 12, 3, 0), >>> ] >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "appointment": st.column_config.DatetimeColumn( >>> "Appointment", >>> min_value=datetime(2023, 6, 1), >>> max_value=datetime(2025, 1, 1), >>> format="D MMM YYYY, h:mm a", >>> step=60, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-datetime-column.streamlit.app/ height: 300px
Here is the function:
def DatetimeColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: datetime.datetime | None = None,
format: str | None = None,
min_value: datetime.datetime | None = None,
max_value: datetime.datetime | None = None,
step: int | float | datetime.timedelta | None = None,
timezone: str | None = None,
) -> ColumnConfig:
"""Configure a datetime column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for datetime values. This command needs to be
used in the ``column_config`` parameter of ``st.dataframe`` or
``st.data_editor``. When used with ``st.data_editor``, editing will be enabled
with a datetime picker widget.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: datetime.datetime or None
Specifies the default value in this column when a new row is added by the user.
format: str or None
A momentJS format string controlling how datetimes are displayed. See
`momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available
formats. If None (default), uses ``YYYY-MM-DD HH:mm:ss``.
min_value: datetime.datetime or None
The minimum datetime that can be entered.
If None (default), there will be no minimum.
max_value: datetime.datetime or None
The maximum datetime that can be entered.
If None (default), there will be no maximum.
step: int, float, datetime.timedelta, or None
The stepping interval in seconds. If None (default), the step will be 1 second.
timezone: str or None
The timezone of this column. If None (default),
the timezone is inferred from the underlying data.
Examples
--------
>>> from datetime import datetime
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "appointment": [
>>> datetime(2024, 2, 5, 12, 30),
>>> datetime(2023, 11, 10, 18, 0),
>>> datetime(2024, 3, 11, 20, 10),
>>> datetime(2023, 9, 12, 3, 0),
>>> ]
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "appointment": st.column_config.DatetimeColumn(
>>> "Appointment",
>>> min_value=datetime(2023, 6, 1),
>>> max_value=datetime(2025, 1, 1),
>>> format="D MMM YYYY, h:mm a",
>>> step=60,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-datetime-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
default=None if default is None else default.isoformat(),
type_config=DatetimeColumnConfig(
type="datetime",
format=format,
min_value=None if min_value is None else min_value.isoformat(),
max_value=None if max_value is None else max_value.isoformat(),
step=step.total_seconds() if isinstance(step, datetime.timedelta) else step,
timezone=timezone,
),
) | Configure a datetime column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for datetime values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a datetime picker widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: datetime.datetime or None Specifies the default value in this column when a new row is added by the user. format: str or None A momentJS format string controlling how datetimes are displayed. See `momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available formats. If None (default), uses ``YYYY-MM-DD HH:mm:ss``. min_value: datetime.datetime or None The minimum datetime that can be entered. If None (default), there will be no minimum. max_value: datetime.datetime or None The maximum datetime that can be entered. If None (default), there will be no maximum. step: int, float, datetime.timedelta, or None The stepping interval in seconds. If None (default), the step will be 1 second. timezone: str or None The timezone of this column. If None (default), the timezone is inferred from the underlying data. Examples -------- >>> from datetime import datetime >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "appointment": [ >>> datetime(2024, 2, 5, 12, 30), >>> datetime(2023, 11, 10, 18, 0), >>> datetime(2024, 3, 11, 20, 10), >>> datetime(2023, 9, 12, 3, 0), >>> ] >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "appointment": st.column_config.DatetimeColumn( >>> "Appointment", >>> min_value=datetime(2023, 6, 1), >>> max_value=datetime(2025, 1, 1), >>> format="D MMM YYYY, h:mm a", >>> step=60, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-datetime-column.streamlit.app/ height: 300px |
178,387 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class TimeColumnConfig(TypedDict):
type: Literal["time"]
format: NotRequired[str | None]
min_value: NotRequired[str | None]
max_value: NotRequired[str | None]
step: NotRequired[int | float | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `TimeColumn` function. Write a Python function `def TimeColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, default: datetime.time | None = None, format: str | None = None, min_value: datetime.time | None = None, max_value: datetime.time | None = None, step: int | float | datetime.timedelta | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a time column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for time values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a time picker widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: datetime.time or None Specifies the default value in this column when a new row is added by the user. format: str or None A momentJS format string controlling how times are displayed. See `momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available formats. If None (default), uses ``HH:mm:ss``. min_value: datetime.time or None The minimum time that can be entered. If None (default), there will be no minimum. max_value: datetime.time or None The maximum time that can be entered. If None (default), there will be no maximum. step: int, float, datetime.timedelta, or None The stepping interval in seconds. If None (default), the step will be 1 second. Examples -------- >>> from datetime import time >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "appointment": [ >>> time(12, 30), >>> time(18, 0), >>> time(9, 10), >>> time(16, 25), >>> ] >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "appointment": st.column_config.TimeColumn( >>> "Appointment", >>> min_value=time(8, 0, 0), >>> max_value=time(19, 0, 0), >>> format="hh:mm a", >>> step=60, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-time-column.streamlit.app/ height: 300px
Here is the function:
def TimeColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: datetime.time | None = None,
format: str | None = None,
min_value: datetime.time | None = None,
max_value: datetime.time | None = None,
step: int | float | datetime.timedelta | None = None,
) -> ColumnConfig:
"""Configure a time column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for time values. This command needs to be used in
the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When
used with ``st.data_editor``, editing will be enabled with a time picker widget.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: datetime.time or None
Specifies the default value in this column when a new row is added by the user.
format: str or None
A momentJS format string controlling how times are displayed. See
`momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available
formats. If None (default), uses ``HH:mm:ss``.
min_value: datetime.time or None
The minimum time that can be entered.
If None (default), there will be no minimum.
max_value: datetime.time or None
The maximum time that can be entered.
If None (default), there will be no maximum.
step: int, float, datetime.timedelta, or None
The stepping interval in seconds. If None (default), the step will be 1 second.
Examples
--------
>>> from datetime import time
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "appointment": [
>>> time(12, 30),
>>> time(18, 0),
>>> time(9, 10),
>>> time(16, 25),
>>> ]
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "appointment": st.column_config.TimeColumn(
>>> "Appointment",
>>> min_value=time(8, 0, 0),
>>> max_value=time(19, 0, 0),
>>> format="hh:mm a",
>>> step=60,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-time-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
default=None if default is None else default.isoformat(),
type_config=TimeColumnConfig(
type="time",
format=format,
min_value=None if min_value is None else min_value.isoformat(),
max_value=None if max_value is None else max_value.isoformat(),
step=step.total_seconds() if isinstance(step, datetime.timedelta) else step,
),
) | Configure a time column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for time values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a time picker widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: datetime.time or None Specifies the default value in this column when a new row is added by the user. format: str or None A momentJS format string controlling how times are displayed. See `momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available formats. If None (default), uses ``HH:mm:ss``. min_value: datetime.time or None The minimum time that can be entered. If None (default), there will be no minimum. max_value: datetime.time or None The maximum time that can be entered. If None (default), there will be no maximum. step: int, float, datetime.timedelta, or None The stepping interval in seconds. If None (default), the step will be 1 second. Examples -------- >>> from datetime import time >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "appointment": [ >>> time(12, 30), >>> time(18, 0), >>> time(9, 10), >>> time(16, 25), >>> ] >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "appointment": st.column_config.TimeColumn( >>> "Appointment", >>> min_value=time(8, 0, 0), >>> max_value=time(19, 0, 0), >>> format="hh:mm a", >>> step=60, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-time-column.streamlit.app/ height: 300px |
178,388 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class DateColumnConfig(TypedDict):
type: Literal["date"]
format: NotRequired[str | None]
min_value: NotRequired[str | None]
max_value: NotRequired[str | None]
step: NotRequired[int | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `DateColumn` function. Write a Python function `def DateColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, disabled: bool | None = None, required: bool | None = None, default: datetime.date | None = None, format: str | None = None, min_value: datetime.date | None = None, max_value: datetime.date | None = None, step: int | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a date column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for date values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a date picker widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: datetime.date or None Specifies the default value in this column when a new row is added by the user. format: str or None A momentJS format string controlling how times are displayed. See `momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available formats. If None (default), uses ``YYYY-MM-DD``. min_value: datetime.date or None The minimum date that can be entered. If None (default), there will be no minimum. max_value: datetime.date or None The maximum date that can be entered. If None (default), there will be no maximum. step: int or None The stepping interval in days. If None (default), the step will be 1 day. Examples -------- >>> from datetime import date >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "birthday": [ >>> date(1980, 1, 1), >>> date(1990, 5, 3), >>> date(1974, 5, 19), >>> date(2001, 8, 17), >>> ] >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "birthday": st.column_config.DateColumn( >>> "Birthday", >>> min_value=date(1900, 1, 1), >>> max_value=date(2005, 1, 1), >>> format="DD.MM.YYYY", >>> step=1, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-date-column.streamlit.app/ height: 300px
Here is the function:
def DateColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: datetime.date | None = None,
format: str | None = None,
min_value: datetime.date | None = None,
max_value: datetime.date | None = None,
step: int | None = None,
) -> ColumnConfig:
"""Configure a date column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for date values. This command needs to be used in
the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used
with ``st.data_editor``, editing will be enabled with a date picker widget.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: datetime.date or None
Specifies the default value in this column when a new row is added by the user.
format: str or None
A momentJS format string controlling how times are displayed. See
`momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available
formats. If None (default), uses ``YYYY-MM-DD``.
min_value: datetime.date or None
The minimum date that can be entered.
If None (default), there will be no minimum.
max_value: datetime.date or None
The maximum date that can be entered.
If None (default), there will be no maximum.
step: int or None
The stepping interval in days. If None (default), the step will be 1 day.
Examples
--------
>>> from datetime import date
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "birthday": [
>>> date(1980, 1, 1),
>>> date(1990, 5, 3),
>>> date(1974, 5, 19),
>>> date(2001, 8, 17),
>>> ]
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "birthday": st.column_config.DateColumn(
>>> "Birthday",
>>> min_value=date(1900, 1, 1),
>>> max_value=date(2005, 1, 1),
>>> format="DD.MM.YYYY",
>>> step=1,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-date-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
default=None if default is None else default.isoformat(),
type_config=DateColumnConfig(
type="date",
format=format,
min_value=None if min_value is None else min_value.isoformat(),
max_value=None if max_value is None else max_value.isoformat(),
step=step,
),
) | Configure a date column in ``st.dataframe`` or ``st.data_editor``. This is the default column type for date values. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled with a date picker widget. Parameters ---------- label: str or None The label shown at the top of the column. If None (default), the column name is used. width: "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help: str or None An optional tooltip that gets displayed when hovering over the column label. disabled: bool or None Whether editing should be disabled for this column. Defaults to False. required: bool or None Whether edited cells in the column need to have a value. If True, an edited cell can only be submitted if it has a value other than None. Defaults to False. default: datetime.date or None Specifies the default value in this column when a new row is added by the user. format: str or None A momentJS format string controlling how times are displayed. See `momentJS docs <https://momentjs.com/docs/#/displaying/format/>`_ for available formats. If None (default), uses ``YYYY-MM-DD``. min_value: datetime.date or None The minimum date that can be entered. If None (default), there will be no minimum. max_value: datetime.date or None The maximum date that can be entered. If None (default), there will be no maximum. step: int or None The stepping interval in days. If None (default), the step will be 1 day. Examples -------- >>> from datetime import date >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "birthday": [ >>> date(1980, 1, 1), >>> date(1990, 5, 3), >>> date(1974, 5, 19), >>> date(2001, 8, 17), >>> ] >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "birthday": st.column_config.DateColumn( >>> "Birthday", >>> min_value=date(1900, 1, 1), >>> max_value=date(2005, 1, 1), >>> format="DD.MM.YYYY", >>> step=1, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-date-column.streamlit.app/ height: 300px |
178,389 | from __future__ import annotations
import datetime
from typing import Iterable, Literal, TypedDict
from typing_extensions import NotRequired, TypeAlias
from streamlit.runtime.metrics_util import gather_metrics
ColumnWidth: TypeAlias = Literal["small", "medium", "large"]
class ProgressColumnConfig(TypedDict):
type: Literal["progress"]
format: NotRequired[str | None]
min_value: NotRequired[int | float | None]
max_value: NotRequired[int | float | None]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
The provided code snippet includes necessary dependencies for implementing the `ProgressColumn` function. Write a Python function `def ProgressColumn( label: str | None = None, *, width: ColumnWidth | None = None, help: str | None = None, format: str | None = None, min_value: int | float | None = None, max_value: int | float | None = None, ) -> ColumnConfig` to solve the following problem:
Configure a progress column in ``st.dataframe`` or ``st.data_editor``. Cells need to contain a number. Progress columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label : str or None The label shown at the top of the column. If None (default), the column name is used. width : "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help : str or None An optional tooltip that gets displayed when hovering over the column label. format : str or None A printf-style format string controlling how numbers are displayed. Valid formatters: %d %e %f %g %i %u. You can also add prefixes and suffixes, e.g. ``"$ %.2f"`` to show a dollar prefix. min_value : int, float, or None The minimum value of the progress bar. If None (default), will be 0. max_value : int, float, or None The minimum value of the progress bar. If None (default), will be 100 for integer values and 1 for float values. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [200, 550, 1000, 80], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.ProgressColumn( >>> "Sales volume", >>> help="The sales volume in USD", >>> format="$%f", >>> min_value=0, >>> max_value=1000, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-progress-column.streamlit.app/ height: 300px
Here is the function:
def ProgressColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
format: str | None = None,
min_value: int | float | None = None,
max_value: int | float | None = None,
) -> ColumnConfig:
"""Configure a progress column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain a number. Progress columns are not editable at the moment.
This command needs to be used in the ``column_config`` parameter of ``st.dataframe``
or ``st.data_editor``.
Parameters
----------
label : str or None
The label shown at the top of the column. If None (default),
the column name is used.
width : "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help : str or None
An optional tooltip that gets displayed when hovering over the column label.
format : str or None
A printf-style format string controlling how numbers are displayed.
Valid formatters: %d %e %f %g %i %u. You can also add prefixes and suffixes,
e.g. ``"$ %.2f"`` to show a dollar prefix.
min_value : int, float, or None
The minimum value of the progress bar.
If None (default), will be 0.
max_value : int, float, or None
The minimum value of the progress bar. If None (default), will be 100 for
integer values and 1 for float values.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [200, 550, 1000, 80],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.ProgressColumn(
>>> "Sales volume",
>>> help="The sales volume in USD",
>>> format="$%f",
>>> min_value=0,
>>> max_value=1000,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-progress-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
type_config=ProgressColumnConfig(
type="progress",
format=format,
min_value=min_value,
max_value=max_value,
),
) | Configure a progress column in ``st.dataframe`` or ``st.data_editor``. Cells need to contain a number. Progress columns are not editable at the moment. This command needs to be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. Parameters ---------- label : str or None The label shown at the top of the column. If None (default), the column name is used. width : "small", "medium", "large", or None The display width of the column. Can be one of "small", "medium", or "large". If None (default), the column will be sized to fit the cell contents. help : str or None An optional tooltip that gets displayed when hovering over the column label. format : str or None A printf-style format string controlling how numbers are displayed. Valid formatters: %d %e %f %g %i %u. You can also add prefixes and suffixes, e.g. ``"$ %.2f"`` to show a dollar prefix. min_value : int, float, or None The minimum value of the progress bar. If None (default), will be 0. max_value : int, float, or None The minimum value of the progress bar. If None (default), will be 100 for integer values and 1 for float values. Examples -------- >>> import pandas as pd >>> import streamlit as st >>> >>> data_df = pd.DataFrame( >>> { >>> "sales": [200, 550, 1000, 80], >>> } >>> ) >>> >>> st.data_editor( >>> data_df, >>> column_config={ >>> "sales": st.column_config.ProgressColumn( >>> "Sales volume", >>> help="The sales volume in USD", >>> format="$%f", >>> min_value=0, >>> max_value=1000, >>> ), >>> }, >>> hide_index=True, >>> ) .. output:: https://doc-progress-column.streamlit.app/ height: 300px |
178,390 | from __future__ import annotations
import json
from enum import Enum
from typing import TYPE_CHECKING, Dict, Final, Literal, Mapping, Union
from typing_extensions import TypeAlias
from streamlit.elements.lib.column_types import ColumnConfig, ColumnType
from streamlit.elements.lib.dicttools import remove_none_values
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.type_util import DataFormat, is_colum_type_arrow_incompatible
INDEX_IDENTIFIER: IndexIdentifierType = "_index"
DataframeSchema: TypeAlias = Dict[str, ColumnDataKind]
def _determine_data_kind(
column: Series | Index, field: pa.Field | None = None
) -> ColumnDataKind:
"""Determine the data kind of a column.
The column data kind refers to the shared data type of the values
in the column (e.g. int, float, str, bool).
Parameters
----------
column : pd.Series, pd.Index
The column to determine the data kind for.
field : pa.Field, optional
The arrow field from the arrow table schema.
Returns
-------
ColumnDataKind
The data kind of the column.
"""
import pandas as pd
if isinstance(column.dtype, pd.CategoricalDtype):
# Categorical columns can have different underlying data kinds
# depending on the categories.
return _determine_data_kind_via_inferred_type(column.dtype.categories)
if field is not None:
data_kind = _determine_data_kind_via_arrow(field)
if data_kind != ColumnDataKind.UNKNOWN:
return data_kind
if column.dtype.name == "object":
# If dtype is object, we need to infer the type from the column
return _determine_data_kind_via_inferred_type(column)
return _determine_data_kind_via_pandas_dtype(column)
The provided code snippet includes necessary dependencies for implementing the `determine_dataframe_schema` function. Write a Python function `def determine_dataframe_schema( data_df: DataFrame, arrow_schema: pa.Schema ) -> DataframeSchema` to solve the following problem:
Determine the schema of a dataframe. Parameters ---------- data_df : pd.DataFrame The dataframe to determine the schema of. arrow_schema : pa.Schema The Arrow schema of the dataframe. Returns ------- DataframeSchema A mapping that contains the detected data type for the index and columns. The key is the column name in the underlying dataframe or ``_index`` for index columns.
Here is the function:
def determine_dataframe_schema(
data_df: DataFrame, arrow_schema: pa.Schema
) -> DataframeSchema:
"""Determine the schema of a dataframe.
Parameters
----------
data_df : pd.DataFrame
The dataframe to determine the schema of.
arrow_schema : pa.Schema
The Arrow schema of the dataframe.
Returns
-------
DataframeSchema
A mapping that contains the detected data type for the index and columns.
The key is the column name in the underlying dataframe or ``_index`` for index columns.
"""
dataframe_schema: DataframeSchema = {}
# Add type of index:
# TODO(lukasmasuch): We need to apply changes here to support multiindex.
dataframe_schema[INDEX_IDENTIFIER] = _determine_data_kind(data_df.index)
# Add types for all columns:
for i, column in enumerate(data_df.items()):
column_name, column_data = column
dataframe_schema[column_name] = _determine_data_kind(
column_data, arrow_schema.field(i)
)
return dataframe_schema | Determine the schema of a dataframe. Parameters ---------- data_df : pd.DataFrame The dataframe to determine the schema of. arrow_schema : pa.Schema The Arrow schema of the dataframe. Returns ------- DataframeSchema A mapping that contains the detected data type for the index and columns. The key is the column name in the underlying dataframe or ``_index`` for index columns. |
178,391 | from __future__ import annotations
import json
from enum import Enum
from typing import TYPE_CHECKING, Dict, Final, Literal, Mapping, Union
from typing_extensions import TypeAlias
from streamlit.elements.lib.column_types import ColumnConfig, ColumnType
from streamlit.elements.lib.dicttools import remove_none_values
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.type_util import DataFormat, is_colum_type_arrow_incompatible
ColumnConfigMapping: TypeAlias = Dict[Union[IndexIdentifierType, str], ColumnConfig]
ColumnConfigMappingInput: TypeAlias = Mapping[
Union[IndexIdentifierType, str],
Union[ColumnConfig, None, str],
]
class ColumnConfig(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label: str or None
The label shown at the top of the column. If None (default),
the column name is used.
width: "small", "medium", "large", or None
The display width of the column. Can be one of "small", "medium", or "large".
If None (default), the column will be sized to fit the cell contents.
help: str or None
An optional tooltip that gets displayed when hovering over the column label.
disabled: bool or None
Whether editing should be disabled for this column. Defaults to False.
required: bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
default: str, bool, int, float, or None
Specifies the default value in this column when a new row is added by the user.
hidden: bool or None
Whether to hide the column. Defaults to False.
type_config: dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
default: str | bool | int | float | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| None
)
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
The provided code snippet includes necessary dependencies for implementing the `process_config_mapping` function. Write a Python function `def process_config_mapping( column_config: ColumnConfigMappingInput | None = None, ) -> ColumnConfigMapping` to solve the following problem:
Transforms a user-provided column config mapping into a valid column config mapping that can be used by the frontend. Parameters ---------- column_config: dict or None The user-provided column config mapping. Returns ------- dict The transformed column config mapping.
Here is the function:
def process_config_mapping(
column_config: ColumnConfigMappingInput | None = None,
) -> ColumnConfigMapping:
"""Transforms a user-provided column config mapping into a valid column config mapping
that can be used by the frontend.
Parameters
----------
column_config: dict or None
The user-provided column config mapping.
Returns
-------
dict
The transformed column config mapping.
"""
if column_config is None:
return {}
transformed_column_config: ColumnConfigMapping = {}
for column, config in column_config.items():
if config is None:
transformed_column_config[column] = ColumnConfig(hidden=True)
elif isinstance(config, str):
transformed_column_config[column] = ColumnConfig(label=config)
elif isinstance(config, dict):
transformed_column_config[column] = config
else:
raise StreamlitAPIException(
f"Invalid column config for column `{column}`. "
f"Expected `None`, `str` or `dict`, but got `{type(config)}`."
)
return transformed_column_config | Transforms a user-provided column config mapping into a valid column config mapping that can be used by the frontend. Parameters ---------- column_config: dict or None The user-provided column config mapping. Returns ------- dict The transformed column config mapping. |
178,392 | from __future__ import annotations
import json
from enum import Enum
from typing import TYPE_CHECKING, Dict, Final, Literal, Mapping, Union
from typing_extensions import TypeAlias
from streamlit.elements.lib.column_types import ColumnConfig, ColumnType
from streamlit.elements.lib.dicttools import remove_none_values
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.type_util import DataFormat, is_colum_type_arrow_incompatible
INDEX_IDENTIFIER: IndexIdentifierType = "_index"
ColumnConfigMapping: TypeAlias = Dict[Union[IndexIdentifierType, str], ColumnConfig]
def update_column_config(
column_config_mapping: ColumnConfigMapping, column: str, column_config: ColumnConfig
) -> None:
"""Updates the column config value for a single column within the mapping.
Parameters
----------
column_config_mapping : ColumnConfigMapping
The column config mapping to update.
column : str
The column to update the config value for.
column_config : ColumnConfig
The column config to update.
"""
if column not in column_config_mapping:
column_config_mapping[column] = {}
column_config_mapping[column].update(column_config)
class DataFormat(Enum):
"""DataFormat is used to determine the format of the data."""
UNKNOWN = auto()
EMPTY = auto() # None
PANDAS_DATAFRAME = auto() # pd.DataFrame
PANDAS_SERIES = auto() # pd.Series
PANDAS_INDEX = auto() # pd.Index
NUMPY_LIST = auto() # np.array[Scalar]
NUMPY_MATRIX = auto() # np.array[List[Scalar]]
PYARROW_TABLE = auto() # pyarrow.Table
SNOWPARK_OBJECT = auto() # Snowpark DataFrame, Table, List[Row]
PYSPARK_OBJECT = auto() # pyspark.DataFrame
PANDAS_STYLER = auto() # pandas Styler
LIST_OF_RECORDS = auto() # List[Dict[str, Scalar]]
LIST_OF_ROWS = auto() # List[List[Scalar]]
LIST_OF_VALUES = auto() # List[Scalar]
TUPLE_OF_VALUES = auto() # Tuple[Scalar]
SET_OF_VALUES = auto() # Set[Scalar]
COLUMN_INDEX_MAPPING = auto() # {column: {index: value}}
COLUMN_VALUE_MAPPING = auto() # {column: List[values]}
COLUMN_SERIES_MAPPING = auto() # {column: Series(values)}
KEY_VALUE_DICT = auto() # {index: value}
def is_colum_type_arrow_incompatible(column: Series[Any] | Index) -> bool:
"""Return True if the column type is known to cause issues during Arrow conversion."""
from pandas.api.types import infer_dtype, is_dict_like, is_list_like
if column.dtype.kind in [
"c", # complex64, complex128, complex256
]:
return True
if column.dtype == "object":
# The dtype of mixed type columns is always object, the actual type of the column
# values can be determined via the infer_dtype function:
# https://pandas.pydata.org/docs/reference/api/pandas.api.types.infer_dtype.html
inferred_type = infer_dtype(column, skipna=True)
if inferred_type in [
"mixed-integer",
"complex",
]:
return True
elif inferred_type == "mixed":
# This includes most of the more complex/custom types (objects, dicts, lists, ...)
if len(column) == 0 or not hasattr(column, "iloc"):
# The column seems to be invalid, so we assume it is incompatible.
# But this would most likely never happen since empty columns
# cannot be mixed.
return True
# Get the first value to check if it is a supported list-like type.
first_value = column.iloc[0]
if (
not is_list_like(first_value)
# dicts are list-like, but have issues in Arrow JS (see comments in Quiver.ts)
or is_dict_like(first_value)
# Frozensets are list-like, but are not compatible with pyarrow.
or isinstance(first_value, frozenset)
):
# This seems to be an incompatible list-like type
return True
return False
# We did not detect an incompatible type, so we assume it is compatible:
return False
The provided code snippet includes necessary dependencies for implementing the `apply_data_specific_configs` function. Write a Python function `def apply_data_specific_configs( columns_config: ColumnConfigMapping, data_df: DataFrame, data_format: DataFormat, check_arrow_compatibility: bool = False, ) -> None` to solve the following problem:
Apply data specific configurations to the provided dataframe. This will apply inplace changes to the dataframe and the column configurations depending on the data format. Parameters ---------- columns_config : ColumnConfigMapping A mapping of column names/ids to column configurations. data_df : pd.DataFrame The dataframe to apply the configurations to. data_format : DataFormat The format of the data. check_arrow_compatibility : bool Whether to check if the data is compatible with arrow.
Here is the function:
def apply_data_specific_configs(
columns_config: ColumnConfigMapping,
data_df: DataFrame,
data_format: DataFormat,
check_arrow_compatibility: bool = False,
) -> None:
"""Apply data specific configurations to the provided dataframe.
This will apply inplace changes to the dataframe and the column configurations
depending on the data format.
Parameters
----------
columns_config : ColumnConfigMapping
A mapping of column names/ids to column configurations.
data_df : pd.DataFrame
The dataframe to apply the configurations to.
data_format : DataFormat
The format of the data.
check_arrow_compatibility : bool
Whether to check if the data is compatible with arrow.
"""
import pandas as pd
# Deactivate editing for columns that are not compatible with arrow
if check_arrow_compatibility:
for column_name, column_data in data_df.items():
if is_colum_type_arrow_incompatible(column_data):
update_column_config(columns_config, column_name, {"disabled": True})
# Convert incompatible type to string
data_df[column_name] = column_data.astype("string")
# Pandas adds a range index as default to all datastructures
# but for most of the non-pandas data objects it is unnecessary
# to show this index to the user. Therefore, we will hide it as default.
if data_format in [
DataFormat.SET_OF_VALUES,
DataFormat.TUPLE_OF_VALUES,
DataFormat.LIST_OF_VALUES,
DataFormat.NUMPY_LIST,
DataFormat.NUMPY_MATRIX,
DataFormat.LIST_OF_RECORDS,
DataFormat.LIST_OF_ROWS,
DataFormat.COLUMN_VALUE_MAPPING,
]:
update_column_config(columns_config, INDEX_IDENTIFIER, {"hidden": True})
# Rename the first column to "value" for some of the data formats
if data_format in [
DataFormat.SET_OF_VALUES,
DataFormat.TUPLE_OF_VALUES,
DataFormat.LIST_OF_VALUES,
DataFormat.NUMPY_LIST,
DataFormat.KEY_VALUE_DICT,
]:
# Pandas automatically names the first column "0"
# We rename it to "value" in selected cases to make it more descriptive
data_df.rename(columns={0: "value"}, inplace=True)
if not isinstance(data_df.index, pd.RangeIndex):
# If the index is not a range index, we will configure it as required
# since the user is required to provide a (unique) value for editing.
update_column_config(columns_config, INDEX_IDENTIFIER, {"required": True}) | Apply data specific configurations to the provided dataframe. This will apply inplace changes to the dataframe and the column configurations depending on the data format. Parameters ---------- columns_config : ColumnConfigMapping A mapping of column names/ids to column configurations. data_df : pd.DataFrame The dataframe to apply the configurations to. data_format : DataFormat The format of the data. check_arrow_compatibility : bool Whether to check if the data is compatible with arrow. |
178,393 | from __future__ import annotations
import json
from enum import Enum
from typing import TYPE_CHECKING, Dict, Final, Literal, Mapping, Union
from typing_extensions import TypeAlias
from streamlit.elements.lib.column_types import ColumnConfig, ColumnType
from streamlit.elements.lib.dicttools import remove_none_values
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.type_util import DataFormat, is_colum_type_arrow_incompatible
ColumnConfigMapping: TypeAlias = Dict[Union[IndexIdentifierType, str], ColumnConfig]
def _convert_column_config_to_json(column_config_mapping: ColumnConfigMapping) -> str:
try:
# Ignore all None values and prefix columns specified by numerical index:
return json.dumps(
{
(
f"{_NUMERICAL_POSITION_PREFIX}{str(k)}" if isinstance(k, int) else k
): v
for (k, v) in remove_none_values(column_config_mapping).items()
},
allow_nan=False,
)
except ValueError as ex:
raise StreamlitAPIException(
f"The provided column config cannot be serialized into JSON: {ex}"
) from ex
The provided code snippet includes necessary dependencies for implementing the `marshall_column_config` function. Write a Python function `def marshall_column_config( proto: ArrowProto, column_config_mapping: ColumnConfigMapping ) -> None` to solve the following problem:
Marshall the column config into the Arrow proto. Parameters ---------- proto : ArrowProto The proto to marshall into. column_config_mapping : ColumnConfigMapping The column config to marshall.
Here is the function:
def marshall_column_config(
proto: ArrowProto, column_config_mapping: ColumnConfigMapping
) -> None:
"""Marshall the column config into the Arrow proto.
Parameters
----------
proto : ArrowProto
The proto to marshall into.
column_config_mapping : ColumnConfigMapping
The column config to marshall.
"""
proto.columns = _convert_column_config_to_json(column_config_mapping) | Marshall the column config into the Arrow proto. Parameters ---------- proto : ArrowProto The proto to marshall into. column_config_mapping : ColumnConfigMapping The column config to marshall. |
178,394 | from __future__ import annotations
import contextlib
The provided code snippet includes necessary dependencies for implementing the `configure_streamlit_plotly_theme` function. Write a Python function `def configure_streamlit_plotly_theme() -> None` to solve the following problem:
Configure the Streamlit chart theme for Plotly. The theme is only configured if Plotly is installed.
Here is the function:
def configure_streamlit_plotly_theme() -> None:
"""Configure the Streamlit chart theme for Plotly.
The theme is only configured if Plotly is installed.
"""
# We do nothing if Plotly is not installed. This is expected since Plotly is an optional dependency.
with contextlib.suppress(ImportError):
import plotly.graph_objects as go
import plotly.io as pio
# This is the streamlit theme for plotly where we pass in a template.data
# and a template.layout.
# Template.data is for changing specific graph properties in a general aspect
# such as Contour plots or Waterfall plots.
# Template.layout is for changing things such as the x axis and fonts and other
# general layout properties for general graphs.
# We pass in temporary colors to the frontend and the frontend will replace
# those colors because we want to change colors based on the background color.
# Start at #0000001 because developers may be likely to use #000000
CATEGORY_0 = "#000001"
CATEGORY_1 = "#000002"
CATEGORY_2 = "#000003"
CATEGORY_3 = "#000004"
CATEGORY_4 = "#000005"
CATEGORY_5 = "#000006"
CATEGORY_6 = "#000007"
CATEGORY_7 = "#000008"
CATEGORY_8 = "#000009"
CATEGORY_9 = "#000010"
SEQUENTIAL_0 = "#000011"
SEQUENTIAL_1 = "#000012"
SEQUENTIAL_2 = "#000013"
SEQUENTIAL_3 = "#000014"
SEQUENTIAL_4 = "#000015"
SEQUENTIAL_5 = "#000016"
SEQUENTIAL_6 = "#000017"
SEQUENTIAL_7 = "#000018"
SEQUENTIAL_8 = "#000019"
SEQUENTIAL_9 = "#000020"
DIVERGING_0 = "#000021"
DIVERGING_1 = "#000022"
DIVERGING_2 = "#000023"
DIVERGING_3 = "#000024"
DIVERGING_4 = "#000025"
DIVERGING_5 = "#000026"
DIVERGING_6 = "#000027"
DIVERGING_7 = "#000028"
DIVERGING_8 = "#000029"
DIVERGING_9 = "#000030"
DIVERGING_10 = "#000031"
INCREASING = "#000032"
DECREASING = "#000033"
TOTAL = "#000034"
GRAY_70 = "#000036"
GRAY_90 = "#000037"
BG_COLOR = "#000038"
FADED_TEXT_05 = "#000039"
BG_MIX = "#000040"
# Plotly represents continuous colorscale through an array of pairs.
# The pair's first index is the starting point and the next pair's first index is the end point.
# The pair's second index is the starting color and the next pair's second index is the end color.
# For more information, please refer to https://plotly.com/python/colorscales/
streamlit_colorscale = [
[0.0, SEQUENTIAL_0],
[0.1111111111111111, SEQUENTIAL_1],
[0.2222222222222222, SEQUENTIAL_2],
[0.3333333333333333, SEQUENTIAL_3],
[0.4444444444444444, SEQUENTIAL_4],
[0.5555555555555556, SEQUENTIAL_5],
[0.6666666666666666, SEQUENTIAL_6],
[0.7777777777777778, SEQUENTIAL_7],
[0.8888888888888888, SEQUENTIAL_8],
[1.0, SEQUENTIAL_9],
]
pio.templates["streamlit"] = go.layout.Template(
data=go.layout.template.Data(
candlestick=[
go.layout.template.data.Candlestick(
decreasing=go.candlestick.Decreasing(
line=go.candlestick.decreasing.Line(color=DECREASING)
),
increasing=go.candlestick.Increasing(
line=go.candlestick.increasing.Line(color=INCREASING)
),
)
],
contour=[
go.layout.template.data.Contour(colorscale=streamlit_colorscale)
],
contourcarpet=[
go.layout.template.data.Contourcarpet(
colorscale=streamlit_colorscale
)
],
heatmap=[
go.layout.template.data.Heatmap(colorscale=streamlit_colorscale)
],
histogram2d=[
go.layout.template.data.Histogram2d(colorscale=streamlit_colorscale)
],
icicle=[
go.layout.template.data.Icicle(
textfont=go.icicle.Textfont(color="white")
)
],
sankey=[
go.layout.template.data.Sankey(
textfont=go.sankey.Textfont(color=GRAY_70)
)
],
scatter=[
go.layout.template.data.Scatter(
marker=go.scatter.Marker(line=go.scatter.marker.Line(width=0))
)
],
table=[
go.layout.template.data.Table(
cells=go.table.Cells(
fill=go.table.cells.Fill(color=BG_COLOR),
font=go.table.cells.Font(color=GRAY_90),
line=go.table.cells.Line(color=FADED_TEXT_05),
),
header=go.table.Header(
font=go.table.header.Font(color=GRAY_70),
line=go.table.header.Line(color=FADED_TEXT_05),
fill=go.table.header.Fill(color=BG_MIX),
),
)
],
waterfall=[
go.layout.template.data.Waterfall(
increasing=go.waterfall.Increasing(
marker=go.waterfall.increasing.Marker(color=INCREASING)
),
decreasing=go.waterfall.Decreasing(
marker=go.waterfall.decreasing.Marker(color=DECREASING)
),
totals=go.waterfall.Totals(
marker=go.waterfall.totals.Marker(color=TOTAL)
),
connector=go.waterfall.Connector(
line=go.waterfall.connector.Line(color=GRAY_70, width=2)
),
)
],
),
layout=go.Layout(
colorway=[
CATEGORY_0,
CATEGORY_1,
CATEGORY_2,
CATEGORY_3,
CATEGORY_4,
CATEGORY_5,
CATEGORY_6,
CATEGORY_7,
CATEGORY_8,
CATEGORY_9,
],
colorscale=go.layout.Colorscale(
sequential=streamlit_colorscale,
sequentialminus=streamlit_colorscale,
diverging=[
[0.0, DIVERGING_0],
[0.1, DIVERGING_1],
[0.2, DIVERGING_2],
[0.3, DIVERGING_3],
[0.4, DIVERGING_4],
[0.5, DIVERGING_5],
[0.6, DIVERGING_6],
[0.7, DIVERGING_7],
[0.8, DIVERGING_8],
[0.9, DIVERGING_9],
[1.0, DIVERGING_10],
],
),
coloraxis=go.layout.Coloraxis(colorscale=streamlit_colorscale),
),
)
pio.templates.default = "streamlit" | Configure the Streamlit chart theme for Plotly. The theme is only configured if Plotly is installed. |
178,395 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, cast, overload
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
maybe_coerce_enum_sequence,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id, save_for_app_testing
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
check_python_comparable,
ensure_indexable,
is_iterable,
is_type,
maybe_raise_label_warnings,
to_key,
)
def _check_and_convert_to_indices( # type: ignore[misc]
opt: Sequence[Any], default_values: None
) -> list[int] | None:
... | null |
178,396 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, cast, overload
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
maybe_coerce_enum_sequence,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id, save_for_app_testing
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
check_python_comparable,
ensure_indexable,
is_iterable,
is_type,
maybe_raise_label_warnings,
to_key,
)
def _check_and_convert_to_indices(
opt: Sequence[Any], default_values: Sequence[Any] | Any
) -> list[int]:
... | null |
178,397 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, cast, overload
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
maybe_coerce_enum_sequence,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id, save_for_app_testing
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
check_python_comparable,
ensure_indexable,
is_iterable,
is_type,
maybe_raise_label_warnings,
to_key,
)
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
The provided code snippet includes necessary dependencies for implementing the `_check_and_convert_to_indices` function. Write a Python function `def _check_and_convert_to_indices( opt: Sequence[Any], default_values: Sequence[Any] | Any | None ) -> list[int] | None` to solve the following problem:
Perform validation checks and return indices based on the default values.
Here is the function:
def _check_and_convert_to_indices(
opt: Sequence[Any], default_values: Sequence[Any] | Any | None
) -> list[int] | None:
"""Perform validation checks and return indices based on the default values."""
if default_values is None and None not in opt:
return None
if not isinstance(default_values, list):
# This if is done before others because calling if not x (done
# right below) when x is of type pd.Series() or np.array() throws a
# ValueError exception.
if is_type(default_values, "numpy.ndarray") or is_type(
default_values, "pandas.core.series.Series"
):
default_values = list(cast(Sequence[Any], default_values))
elif not default_values or default_values in opt:
default_values = [default_values]
else:
default_values = list(default_values)
for value in default_values:
if value not in opt:
raise StreamlitAPIException(
"Every Multiselect default value must exist in options"
)
return [opt.index(value) for value in default_values] | Perform validation checks and return indices based on the default values. |
178,398 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, cast, overload
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
maybe_coerce_enum_sequence,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id, save_for_app_testing
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
check_python_comparable,
ensure_indexable,
is_iterable,
is_type,
maybe_raise_label_warnings,
to_key,
)
def is_iterable(obj: object) -> TypeGuard[Iterable[Any]]:
try:
# The ignore statement here is intentional, as this is a
# perfectly fine way of checking for iterables.
iter(obj) # type: ignore[call-overload]
except TypeError:
return False
return True
def _get_default_count(default: Sequence[Any] | Any | None) -> int:
if default is None:
return 0
if not is_iterable(default):
return 1
return len(cast(Sequence[Any], default)) | null |
178,399 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, cast, overload
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
maybe_coerce_enum_sequence,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id, save_for_app_testing
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
check_python_comparable,
ensure_indexable,
is_iterable,
is_type,
maybe_raise_label_warnings,
to_key,
)
def _get_over_max_options_message(current_selections: int, max_selections: int):
curr_selections_noun = "option" if current_selections == 1 else "options"
max_selections_noun = "option" if max_selections == 1 else "options"
return f"""
Multiselect has {current_selections} {curr_selections_noun} selected but `max_selections`
is set to {max_selections}. This happened because you either gave too many options to `default`
or you manipulated the widget's state through `st.session_state`. Note that
the latter can happen before the line indicated in the traceback.
Please select at most {max_selections} {max_selections_noun}.
""" | null |
178,400 | from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, Literal, cast
from streamlit import runtime
from streamlit.elements.form import is_in_form
from streamlit.elements.image import AtomicImage, WidthBehaviour, image_to_url
from streamlit.elements.utils import check_callback_rules, check_session_state_rules
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Block_pb2 import Block as BlockProto
from streamlit.proto.ChatInput_pb2 import ChatInput as ChatInputProto
from streamlit.proto.Common_pb2 import StringTriggerValue as StringTriggerValueProto
from streamlit.proto.RootContainer_pb2 import RootContainer
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id, save_for_app_testing
from streamlit.string_util import is_emoji
from streamlit.type_util import Key, to_key
class PresetNames(str, Enum):
USER = "user"
ASSISTANT = "assistant"
AI = "ai" # Equivalent to assistant
HUMAN = "human" # Equivalent to user
AtomicImage: TypeAlias = Union[PILImage, "npt.NDArray[Any]", io.BytesIO, str]
class WidthBehaviour(IntEnum):
"""
Special values that are recognized by the frontend and allow us to change the
behavior of the displayed image.
"""
ORIGINAL = -1
COLUMN = -2
AUTO = -3
WidthBehaviour.ORIGINAL.__doc__ = """Display the image at its original width"""
WidthBehaviour.COLUMN.__doc__ = (
"""Display the image at the width of the column it's in."""
)
WidthBehaviour.AUTO.__doc__ = """Display the image at its original width, unless it
would exceed the width of its column in which case clamp it to
its column width"""
def image_to_url(
image: AtomicImage,
width: int,
clamp: bool,
channels: Channels,
output_format: ImageFormatOrAuto,
image_id: str,
) -> str:
"""Return a URL that an image can be served from.
If `image` is already a URL, return it unmodified.
Otherwise, add the image to the MediaFileManager and return the URL.
(When running in "raw" mode, we won't actually load data into the
MediaFileManager, and we'll return an empty URL.)
"""
import numpy as np
from PIL import Image, ImageFile
image_data: bytes
# Strings
if isinstance(image, str):
if not os.path.isfile(image) and url_util.is_url(
image, allowed_schemas=("http", "https", "data")
):
# If it's a url, return it directly.
return image
if image.endswith(".svg") and os.path.isfile(image):
# Unpack local SVG image file to an SVG string
with open(image) as textfile:
image = textfile.read()
# Following regex allows svg image files to start either via a "<?xml...>" tag
# eventually followed by a "<svg...>" tag or directly starting with a "<svg>" tag
if re.search(r"(^\s?(<\?xml[\s\S]*<svg\s)|^\s?<svg\s|^\s?<svg>\s)", image):
if "xmlns" not in image:
# The xmlns attribute is required for SVGs to render in an img tag.
# If it's not present, we add to the first SVG tag:
image = image.replace(
"<svg", '<svg xmlns="http://www.w3.org/2000/svg" ', 1
)
# Convert to base64 to prevent issues with encoding:
import base64
image_b64_encoded = base64.b64encode(image.encode("utf-8")).decode("utf-8")
# Return SVG as data URI:
return f"data:image/svg+xml;base64,{image_b64_encoded}"
# Otherwise, try to open it as a file.
try:
with open(image, "rb") as f:
image_data = f.read()
except Exception:
# When we aren't able to open the image file, we still pass the path to
# the MediaFileManager - its storage backend may have access to files
# that Streamlit does not.
import mimetypes
mimetype, _ = mimetypes.guess_type(image)
if mimetype is None:
mimetype = "application/octet-stream"
url = runtime.get_instance().media_file_mgr.add(image, mimetype, image_id)
caching.save_media_data(image, mimetype, image_id)
return url
# PIL Images
elif isinstance(image, (ImageFile.ImageFile, Image.Image)):
format = _validate_image_format_string(image, output_format)
image_data = _PIL_to_bytes(image, format)
# BytesIO
# Note: This doesn't support SVG. We could convert to png (cairosvg.svg2png)
# or just decode BytesIO to string and handle that way.
elif isinstance(image, io.BytesIO):
image_data = _BytesIO_to_bytes(image)
# Numpy Arrays (ie opencv)
elif isinstance(image, np.ndarray):
image = _clip_image(
_verify_np_shape(image),
clamp,
)
if channels == "BGR":
if len(image.shape) == 3:
image = image[:, :, [2, 1, 0]]
else:
raise StreamlitAPIException(
'When using `channels="BGR"`, the input image should '
"have exactly 3 color channels"
)
# Depending on the version of numpy that the user has installed, the
# typechecker may not be able to deduce that indexing into a
# `npt.NDArray[Any]` returns a `npt.NDArray[Any]`, so we need to
# ignore redundant casts below.
image_data = _np_array_to_bytes(
array=cast("npt.NDArray[Any]", image), # type: ignore[redundant-cast]
output_format=output_format,
)
# Raw bytes
else:
image_data = image
# Determine the image's format, resize it, and get its mimetype
image_format = _validate_image_format_string(image_data, output_format)
image_data = _ensure_image_size_and_format(image_data, width, image_format)
mimetype = _get_image_format_mimetype(image_format)
if runtime.exists():
url = runtime.get_instance().media_file_mgr.add(image_data, mimetype, image_id)
caching.save_media_data(image_data, mimetype, image_id)
return url
else:
# When running in "raw mode", we can't access the MediaFileManager.
return ""
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def is_emoji(text: str) -> bool:
"""Check if input string is a valid emoji."""
if not _contains_special_chars(text):
return False
from streamlit.emojis import ALL_EMOJIS
return text.replace("\U0000FE0F", "") in ALL_EMOJIS
The provided code snippet includes necessary dependencies for implementing the `_process_avatar_input` function. Write a Python function `def _process_avatar_input( avatar: str | AtomicImage | None, delta_path: str ) -> tuple[BlockProto.ChatMessage.AvatarType.ValueType, str]` to solve the following problem:
Detects the avatar type and prepares the avatar data for the frontend. Parameters ---------- avatar : The avatar that was provided by the user. delta_path : str The delta path is used as media ID when a local image is served via the media file manager. Returns ------- Tuple[AvatarType, str] The detected avatar type and the prepared avatar data.
Here is the function:
def _process_avatar_input(
avatar: str | AtomicImage | None, delta_path: str
) -> tuple[BlockProto.ChatMessage.AvatarType.ValueType, str]:
"""Detects the avatar type and prepares the avatar data for the frontend.
Parameters
----------
avatar :
The avatar that was provided by the user.
delta_path : str
The delta path is used as media ID when a local image is served via the media
file manager.
Returns
-------
Tuple[AvatarType, str]
The detected avatar type and the prepared avatar data.
"""
AvatarType = BlockProto.ChatMessage.AvatarType
if avatar is None:
return AvatarType.ICON, ""
elif isinstance(avatar, str) and avatar in {item.value for item in PresetNames}:
# On the frontend, we only support "assistant" and "user" for the avatar.
return (
AvatarType.ICON,
"assistant"
if avatar in [PresetNames.AI, PresetNames.ASSISTANT]
else "user",
)
elif isinstance(avatar, str) and is_emoji(avatar):
return AvatarType.EMOJI, avatar
else:
try:
return AvatarType.IMAGE, image_to_url(
avatar,
width=WidthBehaviour.ORIGINAL,
clamp=False,
channels="RGB",
output_format="auto",
image_id=delta_path,
)
except Exception as ex:
raise StreamlitAPIException(
"Failed to load the provided avatar value as an image."
) from ex | Detects the avatar type and prepares the avatar data for the frontend. Parameters ---------- avatar : The avatar that was provided by the user. delta_path : str The delta path is used as media ID when a local image is served via the media file manager. Returns ------- Tuple[AvatarType, str] The detected avatar type and the prepared avatar data. |
178,401 | from __future__ import annotations
import json
from dataclasses import dataclass
from decimal import Decimal
from typing import (
TYPE_CHECKING,
Any,
Dict,
Final,
Iterable,
List,
Literal,
Mapping,
Set,
Tuple,
TypedDict,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias
from streamlit import logger as _logger
from streamlit import type_util
from streamlit.deprecation_util import deprecate_func_name
from streamlit.elements.form import current_form_id
from streamlit.elements.lib.column_config_utils import (
INDEX_IDENTIFIER,
ColumnConfigMapping,
ColumnConfigMappingInput,
ColumnDataKind,
DataframeSchema,
apply_data_specific_configs,
determine_dataframe_schema,
is_type_compatible,
marshall_column_config,
process_config_mapping,
update_column_config,
)
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
from streamlit.elements.utils import check_callback_rules, check_session_state_rules
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import DataFormat, DataFrameGenericAlias, Key, is_type, to_key
from streamlit.util import calc_md5
class EditingState(TypedDict, total=False):
"""
A dictionary representing the current state of the data editor.
Attributes
----------
edited_rows : Dict[int, Dict[str, str | int | float | bool | None]]
An hierarchical mapping of edited cells based on: row position -> column name -> value.
added_rows : List[Dict[str, str | int | float | bool | None]]
A list of added rows, where each row is a mapping from column name to the cell value.
deleted_rows : List[int]
A list of deleted rows, where each row is the numerical position of the deleted row.
"""
edited_rows: dict[int, dict[str, str | int | float | bool | None]]
added_rows: list[dict[str, str | int | float | bool | None]]
deleted_rows: list[int]
def _apply_cell_edits(
df: pd.DataFrame,
edited_rows: Mapping[int, Mapping[str, str | int | float | bool | None]],
dataframe_schema: DataframeSchema,
) -> None:
"""Apply cell edits to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the cell edits to.
edited_rows : Mapping[int, Mapping[str, str | int | float | bool | None]]
A hierarchical mapping based on row position -> column name -> value
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
for row_id, row_changes in edited_rows.items():
row_pos = int(row_id)
for col_name, value in row_changes.items():
if col_name == INDEX_IDENTIFIER:
# The edited cell is part of the index
# TODO(lukasmasuch): To support multi-index in the future:
# use a tuple of values here instead of a single value
df.index.values[row_pos] = _parse_value(
value, dataframe_schema[INDEX_IDENTIFIER]
)
else:
col_pos = df.columns.get_loc(col_name)
df.iat[row_pos, col_pos] = _parse_value(
value, dataframe_schema[col_name]
)
def _apply_row_additions(
df: pd.DataFrame,
added_rows: list[dict[str, Any]],
dataframe_schema: DataframeSchema,
) -> None:
"""Apply row additions to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the row additions to.
added_rows : List[Dict[str, Any]]
A list of row additions. Each row addition is a dictionary with the
column position as key and the new cell value as value.
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
if not added_rows:
return
import pandas as pd
# This is only used if the dataframe has a range index:
# There seems to be a bug in older pandas versions with RangeIndex in
# combination with loc. As a workaround, we manually track the values here:
range_index_stop = None
range_index_step = None
if isinstance(df.index, pd.RangeIndex):
range_index_stop = df.index.stop
range_index_step = df.index.step
for added_row in added_rows:
index_value = None
new_row: list[Any] = [None for _ in range(df.shape[1])]
for col_name in added_row.keys():
value = added_row[col_name]
if col_name == INDEX_IDENTIFIER:
# TODO(lukasmasuch): To support multi-index in the future:
# use a tuple of values here instead of a single value
index_value = _parse_value(value, dataframe_schema[INDEX_IDENTIFIER])
else:
col_pos = df.columns.get_loc(col_name)
new_row[col_pos] = _parse_value(value, dataframe_schema[col_name])
# Append the new row to the dataframe
if range_index_stop is not None:
df.loc[range_index_stop, :] = new_row
# Increment to the next range index value
range_index_stop += range_index_step
elif index_value is not None:
# TODO(lukasmasuch): we are only adding rows that have a non-None index
# value to prevent issues in the frontend component. Also, it just overwrites
# the row in case the index value already exists in the dataframe.
# In the future, it would be better to require users to provide unique
# non-None values for the index with some kind of visual indications.
df.loc[index_value, :] = new_row
def _apply_row_deletions(df: pd.DataFrame, deleted_rows: list[int]) -> None:
"""Apply row deletions to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the row deletions to.
deleted_rows : List[int]
A list of row numbers to delete.
"""
# Drop rows based in numeric row positions
df.drop(df.index[deleted_rows], inplace=True)
DataframeSchema: TypeAlias = Dict[str, ColumnDataKind]
The provided code snippet includes necessary dependencies for implementing the `_apply_dataframe_edits` function. Write a Python function `def _apply_dataframe_edits( df: pd.DataFrame, data_editor_state: EditingState, dataframe_schema: DataframeSchema, ) -> None` to solve the following problem:
Apply edits to the provided dataframe (inplace). This includes cell edits, row additions and row deletions. Parameters ---------- df : pd.DataFrame The dataframe to apply the edits to. data_editor_state : EditingState The editing state of the data editor component. dataframe_schema: DataframeSchema The schema of the dataframe.
Here is the function:
def _apply_dataframe_edits(
df: pd.DataFrame,
data_editor_state: EditingState,
dataframe_schema: DataframeSchema,
) -> None:
"""Apply edits to the provided dataframe (inplace).
This includes cell edits, row additions and row deletions.
Parameters
----------
df : pd.DataFrame
The dataframe to apply the edits to.
data_editor_state : EditingState
The editing state of the data editor component.
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
if data_editor_state.get("edited_rows"):
_apply_cell_edits(df, data_editor_state["edited_rows"], dataframe_schema)
if data_editor_state.get("added_rows"):
_apply_row_additions(df, data_editor_state["added_rows"], dataframe_schema)
if data_editor_state.get("deleted_rows"):
_apply_row_deletions(df, data_editor_state["deleted_rows"]) | Apply edits to the provided dataframe (inplace). This includes cell edits, row additions and row deletions. Parameters ---------- df : pd.DataFrame The dataframe to apply the edits to. data_editor_state : EditingState The editing state of the data editor component. dataframe_schema: DataframeSchema The schema of the dataframe. |
178,402 | from __future__ import annotations
import json
from dataclasses import dataclass
from decimal import Decimal
from typing import (
TYPE_CHECKING,
Any,
Dict,
Final,
Iterable,
List,
Literal,
Mapping,
Set,
Tuple,
TypedDict,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias
from streamlit import logger as _logger
from streamlit import type_util
from streamlit.deprecation_util import deprecate_func_name
from streamlit.elements.form import current_form_id
from streamlit.elements.lib.column_config_utils import (
INDEX_IDENTIFIER,
ColumnConfigMapping,
ColumnConfigMappingInput,
ColumnDataKind,
DataframeSchema,
apply_data_specific_configs,
determine_dataframe_schema,
is_type_compatible,
marshall_column_config,
process_config_mapping,
update_column_config,
)
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
from streamlit.elements.utils import check_callback_rules, check_session_state_rules
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import DataFormat, DataFrameGenericAlias, Key, is_type, to_key
from streamlit.util import calc_md5
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
The provided code snippet includes necessary dependencies for implementing the `_is_supported_index` function. Write a Python function `def _is_supported_index(df_index: pd.Index) -> bool` to solve the following problem:
Check if the index is supported by the data editor component. Parameters ---------- df_index : pd.Index The index to check. Returns ------- bool True if the index is supported, False otherwise.
Here is the function:
def _is_supported_index(df_index: pd.Index) -> bool:
"""Check if the index is supported by the data editor component.
Parameters
----------
df_index : pd.Index
The index to check.
Returns
-------
bool
True if the index is supported, False otherwise.
"""
import pandas as pd
return (
type(df_index)
in [
pd.RangeIndex,
pd.Index,
pd.DatetimeIndex,
# Categorical index doesn't work since arrow
# does serialize the options:
# pd.CategoricalIndex,
# Interval type isn't editable currently:
# pd.IntervalIndex,
# Period type isn't editable currently:
# pd.PeriodIndex,
]
# We need to check these index types without importing, since they are deprecated
# and planned to be removed soon.
or is_type(df_index, "pandas.core.indexes.numeric.Int64Index")
or is_type(df_index, "pandas.core.indexes.numeric.Float64Index")
or is_type(df_index, "pandas.core.indexes.numeric.UInt64Index")
) | Check if the index is supported by the data editor component. Parameters ---------- df_index : pd.Index The index to check. Returns ------- bool True if the index is supported, False otherwise. |
178,403 | from __future__ import annotations
import json
from dataclasses import dataclass
from decimal import Decimal
from typing import (
TYPE_CHECKING,
Any,
Dict,
Final,
Iterable,
List,
Literal,
Mapping,
Set,
Tuple,
TypedDict,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias
from streamlit import logger as _logger
from streamlit import type_util
from streamlit.deprecation_util import deprecate_func_name
from streamlit.elements.form import current_form_id
from streamlit.elements.lib.column_config_utils import (
INDEX_IDENTIFIER,
ColumnConfigMapping,
ColumnConfigMappingInput,
ColumnDataKind,
DataframeSchema,
apply_data_specific_configs,
determine_dataframe_schema,
is_type_compatible,
marshall_column_config,
process_config_mapping,
update_column_config,
)
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
from streamlit.elements.utils import check_callback_rules, check_session_state_rules
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import DataFormat, DataFrameGenericAlias, Key, is_type, to_key
from streamlit.util import calc_md5
The provided code snippet includes necessary dependencies for implementing the `_fix_column_headers` function. Write a Python function `def _fix_column_headers(data_df: pd.DataFrame) -> None` to solve the following problem:
Fix the column headers of the provided dataframe inplace to work correctly for data editing.
Here is the function:
def _fix_column_headers(data_df: pd.DataFrame) -> None:
"""Fix the column headers of the provided dataframe inplace to work
correctly for data editing."""
import pandas as pd
if isinstance(data_df.columns, pd.MultiIndex):
# Flatten hierarchical column headers to a single level:
data_df.columns = [
"_".join(map(str, header)) for header in data_df.columns.to_flat_index()
]
elif pd.api.types.infer_dtype(data_df.columns) != "string":
# If the column names are not all strings, we need to convert them to strings
# to avoid issues with editing:
data_df.rename(
columns={column: str(column) for column in data_df.columns},
inplace=True,
) | Fix the column headers of the provided dataframe inplace to work correctly for data editing. |
178,404 | from __future__ import annotations
import json
from dataclasses import dataclass
from decimal import Decimal
from typing import (
TYPE_CHECKING,
Any,
Dict,
Final,
Iterable,
List,
Literal,
Mapping,
Set,
Tuple,
TypedDict,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias
from streamlit import logger as _logger
from streamlit import type_util
from streamlit.deprecation_util import deprecate_func_name
from streamlit.elements.form import current_form_id
from streamlit.elements.lib.column_config_utils import (
INDEX_IDENTIFIER,
ColumnConfigMapping,
ColumnConfigMappingInput,
ColumnDataKind,
DataframeSchema,
apply_data_specific_configs,
determine_dataframe_schema,
is_type_compatible,
marshall_column_config,
process_config_mapping,
update_column_config,
)
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
from streamlit.elements.utils import check_callback_rules, check_session_state_rules
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import DataFormat, DataFrameGenericAlias, Key, is_type, to_key
from streamlit.util import calc_md5
INDEX_IDENTIFIER: IndexIdentifierType = "_index"
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
The provided code snippet includes necessary dependencies for implementing the `_check_column_names` function. Write a Python function `def _check_column_names(data_df: pd.DataFrame)` to solve the following problem:
Check if the column names in the provided dataframe are valid. It's not allowed to have duplicate column names or column names that are named ``_index``. If the column names are not valid, a ``StreamlitAPIException`` is raised.
Here is the function:
def _check_column_names(data_df: pd.DataFrame):
"""Check if the column names in the provided dataframe are valid.
It's not allowed to have duplicate column names or column names that are
named ``_index``. If the column names are not valid, a ``StreamlitAPIException``
is raised.
"""
if data_df.columns.empty:
return
# Check if the column names are unique and raise an exception if not.
# Add the names of the duplicated columns to the exception message.
duplicated_columns = data_df.columns[data_df.columns.duplicated()]
if len(duplicated_columns) > 0:
raise StreamlitAPIException(
f"All column names are required to be unique for usage with data editor. "
f"The following column names are duplicated: {list(duplicated_columns)}. "
f"Please rename the duplicated columns in the provided data."
)
# Check if the column names are not named "_index" and raise an exception if so.
if INDEX_IDENTIFIER in data_df.columns:
raise StreamlitAPIException(
f"The column name '{INDEX_IDENTIFIER}' is reserved for the index column "
f"and can't be used for data columns. Please rename the column in the "
f"provided data."
) | Check if the column names in the provided dataframe are valid. It's not allowed to have duplicate column names or column names that are named ``_index``. If the column names are not valid, a ``StreamlitAPIException`` is raised. |
178,405 | from __future__ import annotations
import json
from dataclasses import dataclass
from decimal import Decimal
from typing import (
TYPE_CHECKING,
Any,
Dict,
Final,
Iterable,
List,
Literal,
Mapping,
Set,
Tuple,
TypedDict,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias
from streamlit import logger as _logger
from streamlit import type_util
from streamlit.deprecation_util import deprecate_func_name
from streamlit.elements.form import current_form_id
from streamlit.elements.lib.column_config_utils import (
INDEX_IDENTIFIER,
ColumnConfigMapping,
ColumnConfigMappingInput,
ColumnDataKind,
DataframeSchema,
apply_data_specific_configs,
determine_dataframe_schema,
is_type_compatible,
marshall_column_config,
process_config_mapping,
update_column_config,
)
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
from streamlit.elements.utils import check_callback_rules, check_session_state_rules
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import DataFormat, DataFrameGenericAlias, Key, is_type, to_key
from streamlit.util import calc_md5
INDEX_IDENTIFIER: IndexIdentifierType = "_index"
DataframeSchema: TypeAlias = Dict[str, ColumnDataKind]
def is_type_compatible(column_type: ColumnType, data_kind: ColumnDataKind) -> bool:
"""Check if the column type is compatible with the underlying data kind.
This check only applies to editable column types (e.g. number or text).
Non-editable column types (e.g. bar_chart or image) can be configured for
all data kinds (this might change in the future).
Parameters
----------
column_type : ColumnType
The column type to check.
data_kind : ColumnDataKind
The data kind to check.
Returns
-------
bool
True if the column type is compatible with the data kind, False otherwise.
"""
if column_type not in _EDITING_COMPATIBILITY_MAPPING:
return True
return data_kind in _EDITING_COMPATIBILITY_MAPPING[column_type]
ColumnConfigMapping: TypeAlias = Dict[Union[IndexIdentifierType, str], ColumnConfig]
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
The provided code snippet includes necessary dependencies for implementing the `_check_type_compatibilities` function. Write a Python function `def _check_type_compatibilities( data_df: pd.DataFrame, columns_config: ColumnConfigMapping, dataframe_schema: DataframeSchema, )` to solve the following problem:
Check column type to data type compatibility. Iterates the index and all columns of the dataframe to check if the configured column types are compatible with the underlying data types. Parameters ---------- data_df : pd.DataFrame The dataframe to check the type compatibilities for. columns_config : ColumnConfigMapping A mapping of column to column configurations. dataframe_schema : DataframeSchema The schema of the dataframe. Raises ------ StreamlitAPIException If a configured column type is editable and not compatible with the underlying data type.
Here is the function:
def _check_type_compatibilities(
data_df: pd.DataFrame,
columns_config: ColumnConfigMapping,
dataframe_schema: DataframeSchema,
):
"""Check column type to data type compatibility.
Iterates the index and all columns of the dataframe to check if
the configured column types are compatible with the underlying data types.
Parameters
----------
data_df : pd.DataFrame
The dataframe to check the type compatibilities for.
columns_config : ColumnConfigMapping
A mapping of column to column configurations.
dataframe_schema : DataframeSchema
The schema of the dataframe.
Raises
------
StreamlitAPIException
If a configured column type is editable and not compatible with the
underlying data type.
"""
# TODO(lukasmasuch): Update this here to support multi-index in the future:
indices = [(INDEX_IDENTIFIER, data_df.index)]
for column in indices + list(data_df.items()):
column_name, _ = column
column_data_kind = dataframe_schema[column_name]
# TODO(lukasmasuch): support column config via numerical index here?
if column_name in columns_config:
column_config = columns_config[column_name]
if column_config.get("disabled") is True:
# Disabled columns are not checked for compatibility.
# This might change in the future.
continue
type_config = column_config.get("type_config")
if type_config is None:
continue
configured_column_type = type_config.get("type")
if configured_column_type is None:
continue
if is_type_compatible(configured_column_type, column_data_kind) is False:
raise StreamlitAPIException(
f"The configured column type `{configured_column_type}` for column "
f"`{column_name}` is not compatible for editing the underlying "
f"data type `{column_data_kind}`.\n\nYou have following options to "
f"fix this: 1) choose a compatible type 2) disable the column "
f"3) convert the column into a compatible data type."
) | Check column type to data type compatibility. Iterates the index and all columns of the dataframe to check if the configured column types are compatible with the underlying data types. Parameters ---------- data_df : pd.DataFrame The dataframe to check the type compatibilities for. columns_config : ColumnConfigMapping A mapping of column to column configurations. dataframe_schema : DataframeSchema The schema of the dataframe. Raises ------ StreamlitAPIException If a configured column type is editable and not compatible with the underlying data type. |
178,406 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, List, Literal, Sequence, Union, cast, overload
from typing_extensions import TypeAlias
from streamlit import config
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.proto.Common_pb2 import FileUploaderState as FileUploaderStateProto
from streamlit.proto.Common_pb2 import UploadedFileInfo as UploadedFileInfoProto
from streamlit.proto.FileUploader_pb2 import FileUploader as FileUploaderProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.runtime.uploaded_file_manager import DeletedFile, UploadedFile
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
class DeletedFile(NamedTuple):
"""Represents a deleted file in deserialized values for st.file_uploader and
st.camera_input
Return this from st.file_uploader and st.camera_input deserialize (so they can
be used in session_state), when widget value contains file record that is missing
from the storage.
DeleteFile instances filtered out before return final value to the user in script,
or before sending to frontend."""
file_id: str
class UploadedFile(io.BytesIO):
"""A mutable uploaded file.
This class extends BytesIO, which has copy-on-write semantics when
initialized with `bytes`.
"""
def __init__(self, record: UploadedFileRec, file_urls: FileURLsProto):
# BytesIO's copy-on-write semantics doesn't seem to be mentioned in
# the Python docs - possibly because it's a CPython-only optimization
# and not guaranteed to be in other Python runtimes. But it's detailed
# here: https://hg.python.org/cpython/rev/79a5fbe2c78f
super().__init__(record.data)
self.file_id = record.file_id
self.name = record.name
self.type = record.type
self.size = len(record.data)
self._file_urls = file_urls
def __eq__(self, other: object) -> bool:
if not isinstance(other, UploadedFile):
return NotImplemented
return self.file_id == other.file_id
def __repr__(self) -> str:
return util.repr_(self)
def _get_upload_files(
widget_value: FileUploaderStateProto | None,
) -> list[UploadedFile | DeletedFile]:
if widget_value is None:
return []
ctx = get_script_run_ctx()
if ctx is None:
return []
uploaded_file_info = widget_value.uploaded_file_info
if len(uploaded_file_info) == 0:
return []
file_recs_list = ctx.uploaded_file_mgr.get_files(
session_id=ctx.session_id,
file_ids=[f.file_id for f in uploaded_file_info],
)
file_recs = {f.file_id: f for f in file_recs_list}
collected_files: list[UploadedFile | DeletedFile] = []
for f in uploaded_file_info:
maybe_file_rec = file_recs.get(f.file_id)
if maybe_file_rec is not None:
uploaded_file = UploadedFile(maybe_file_rec, f.file_urls)
collected_files.append(uploaded_file)
else:
collected_files.append(DeletedFile(f.file_id))
return collected_files | null |
178,407 | from __future__ import annotations
import io
import os
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, BinaryIO, Final, Literal, TextIO, Union, cast
from typing_extensions import TypeAlias
from streamlit import runtime, source_util
from streamlit.elements.form import current_form_id, is_in_form
from streamlit.elements.utils import check_callback_rules, check_session_state_rules
from streamlit.errors import StreamlitAPIException
from streamlit.file_util import get_main_script_directory, normalize_path_join
from streamlit.proto.Button_pb2 import Button as ButtonProto
from streamlit.proto.DownloadButton_pb2 import DownloadButton as DownloadButtonProto
from streamlit.proto.LinkButton_pb2 import LinkButton as LinkButtonProto
from streamlit.proto.PageLink_pb2 import PageLink as PageLinkProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id, save_for_app_testing
from streamlit.string_util import validate_emoji
from streamlit.type_util import Key, to_key
DownloadButtonDataType: TypeAlias = Union[str, bytes, TextIO, BinaryIO, io.RawIOBase]
def marshall_file(
coordinates: str,
data: DownloadButtonDataType,
proto_download_button: DownloadButtonProto,
mimetype: str | None,
file_name: str | None = None,
) -> None:
data_as_bytes: bytes
if isinstance(data, str):
data_as_bytes = data.encode()
mimetype = mimetype or "text/plain"
elif isinstance(data, io.TextIOWrapper):
string_data = data.read()
data_as_bytes = string_data.encode()
mimetype = mimetype or "text/plain"
# Assume bytes; try methods until we run out.
elif isinstance(data, bytes):
data_as_bytes = data
mimetype = mimetype or "application/octet-stream"
elif isinstance(data, io.BytesIO):
data.seek(0)
data_as_bytes = data.getvalue()
mimetype = mimetype or "application/octet-stream"
elif isinstance(data, io.BufferedReader):
data.seek(0)
data_as_bytes = data.read()
mimetype = mimetype or "application/octet-stream"
elif isinstance(data, io.RawIOBase):
data.seek(0)
data_as_bytes = data.read() or b""
mimetype = mimetype or "application/octet-stream"
else:
raise RuntimeError("Invalid binary data format: %s" % type(data))
if runtime.exists():
file_url = runtime.get_instance().media_file_mgr.add(
data_as_bytes,
mimetype,
coordinates,
file_name=file_name,
is_for_static_download=True,
)
else:
# When running in "raw mode", we can't access the MediaFileManager.
file_url = ""
proto_download_button.url = file_url | null |
178,408 | from __future__ import annotations
import re
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Final,
Literal,
Sequence,
Tuple,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.DateInput_pb2 import DateInput as DateInputProto
from streamlit.proto.TimeInput_pb2 import TimeInput as TimeInputProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
get_session_state,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
DateValue: TypeAlias = Union[SingleDateValue, Sequence[SingleDateValue]]
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def _parse_date_value(
value: DateValue | Literal["today"] | Literal["default_value_today"],
) -> tuple[list[date] | None, bool]:
parsed_dates: list[date]
range_value: bool = False
if value is None:
return None, range_value
if value == "today":
parsed_dates = [datetime.now().date()]
elif value == "default_value_today":
# Set value default.
parsed_dates = [datetime.now().date()]
elif isinstance(value, datetime):
parsed_dates = [value.date()]
elif isinstance(value, date):
parsed_dates = [value]
elif isinstance(value, (list, tuple)):
if not len(value) in (0, 1, 2):
raise StreamlitAPIException(
"DateInput value should either be an date/datetime or a list/tuple of "
"0 - 2 date/datetime values"
)
parsed_dates = [v.date() if isinstance(v, datetime) else v for v in value]
range_value = True
else:
raise StreamlitAPIException(
"DateInput value should either be an date/datetime or a list/tuple of "
"0 - 2 date/datetime values"
)
return parsed_dates, range_value | null |
178,409 | from __future__ import annotations
import re
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Final,
Literal,
Sequence,
Tuple,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.DateInput_pb2 import DateInput as DateInputProto
from streamlit.proto.TimeInput_pb2 import TimeInput as TimeInputProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
get_session_state,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
SingleDateValue: TypeAlias = Union[date, datetime, None]
def _adjust_years(input_date: date, years: int) -> date:
"""Add or subtract years from a date."""
try:
# Attempt to directly add/subtract years
return input_date.replace(year=input_date.year + years)
except ValueError as err:
# Handle case for leap year date (February 29) that doesn't exist in the target year
# by moving the date to February 28
if input_date.month == 2 and input_date.day == 29:
return input_date.replace(year=input_date.year + years, month=2, day=28)
raise StreamlitAPIException(
f"Date {input_date} does not exist in the target year {input_date.year + years}. "
"This should never happen. Please report this bug."
) from err
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def _parse_min_date(
min_value: SingleDateValue,
parsed_dates: Sequence[date] | None,
) -> date:
parsed_min_date: date
if isinstance(min_value, datetime):
parsed_min_date = min_value.date()
elif isinstance(min_value, date):
parsed_min_date = min_value
elif min_value is None:
if parsed_dates:
parsed_min_date = _adjust_years(parsed_dates[0], years=-10)
else:
parsed_min_date = _adjust_years(date.today(), years=-10)
else:
raise StreamlitAPIException(
"DateInput min should either be a date/datetime or None"
)
return parsed_min_date | null |
178,410 | from __future__ import annotations
import re
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Final,
Literal,
Sequence,
Tuple,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.DateInput_pb2 import DateInput as DateInputProto
from streamlit.proto.TimeInput_pb2 import TimeInput as TimeInputProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
get_session_state,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
SingleDateValue: TypeAlias = Union[date, datetime, None]
def _adjust_years(input_date: date, years: int) -> date:
"""Add or subtract years from a date."""
try:
# Attempt to directly add/subtract years
return input_date.replace(year=input_date.year + years)
except ValueError as err:
# Handle case for leap year date (February 29) that doesn't exist in the target year
# by moving the date to February 28
if input_date.month == 2 and input_date.day == 29:
return input_date.replace(year=input_date.year + years, month=2, day=28)
raise StreamlitAPIException(
f"Date {input_date} does not exist in the target year {input_date.year + years}. "
"This should never happen. Please report this bug."
) from err
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def _parse_max_date(
max_value: SingleDateValue,
parsed_dates: Sequence[date] | None,
) -> date:
parsed_max_date: date
if isinstance(max_value, datetime):
parsed_max_date = max_value.date()
elif isinstance(max_value, date):
parsed_max_date = max_value
elif max_value is None:
if parsed_dates:
parsed_max_date = _adjust_years(parsed_dates[-1], years=10)
else:
parsed_max_date = _adjust_years(date.today(), years=10)
else:
raise StreamlitAPIException(
"DateInput max should either be a date/datetime or None"
)
return parsed_max_date | null |
178,411 | from __future__ import annotations
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from numbers import Integral, Real
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Final, Sequence, Tuple, TypeVar, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.js_number import JSNumber, JSNumberBoundsException
from streamlit.proto.Slider_pb2 import Slider as SliderProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
get_session_state,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
def _time_to_datetime(time_: time) -> datetime:
# Note, here we pick an arbitrary date well after Unix epoch.
# This prevents pre-epoch timezone issues (https://bugs.python.org/issue36759)
# We're dropping the date from datetime later, anyway.
return datetime.combine(date(2000, 1, 1), time_) | null |
178,412 | from __future__ import annotations
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from numbers import Integral, Real
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Final, Sequence, Tuple, TypeVar, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.js_number import JSNumber, JSNumberBoundsException
from streamlit.proto.Slider_pb2 import Slider as SliderProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
get_session_state,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
def _date_to_datetime(date_: date) -> datetime:
return datetime.combine(date_, time()) | null |
178,413 | from __future__ import annotations
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from numbers import Integral, Real
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Final, Sequence, Tuple, TypeVar, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.js_number import JSNumber, JSNumberBoundsException
from streamlit.proto.Slider_pb2 import Slider as SliderProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
get_session_state,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
UTC_EPOCH: Final = datetime(1970, 1, 1, tzinfo=timezone.utc)
def _delta_to_micros(delta: timedelta) -> int:
return (
delta.microseconds
+ delta.seconds * SECONDS_TO_MICROS
+ delta.days * DAYS_TO_MICROS
)
def _datetime_to_micros(dt: datetime) -> int:
# The frontend is not aware of timezones and only expects a UTC-based
# timestamp (in microseconds). Since we want to show the date/time exactly
# as it is in the given datetime object, we just set the tzinfo to UTC and
# do not do any timezone conversions. Only the backend knows about
# original timezone and will replace the UTC timestamp in the deserialization.
utc_dt = dt.replace(tzinfo=timezone.utc)
return _delta_to_micros(utc_dt - UTC_EPOCH) | null |
178,414 | from __future__ import annotations
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from numbers import Integral, Real
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Final, Sequence, Tuple, TypeVar, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
)
from streamlit.errors import StreamlitAPIException
from streamlit.js_number import JSNumber, JSNumberBoundsException
from streamlit.proto.Slider_pb2 import Slider as SliderProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
get_session_state,
register_widget,
)
from streamlit.runtime.state.common import compute_widget_id
from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings, to_key
UTC_EPOCH: Final = datetime(1970, 1, 1, tzinfo=timezone.utc)
The provided code snippet includes necessary dependencies for implementing the `_micros_to_datetime` function. Write a Python function `def _micros_to_datetime(micros: int, orig_tz: tzinfo | None) -> datetime` to solve the following problem:
Restore times/datetimes to original timezone (dates are always naive)
Here is the function:
def _micros_to_datetime(micros: int, orig_tz: tzinfo | None) -> datetime:
"""Restore times/datetimes to original timezone (dates are always naive)"""
utc_dt = UTC_EPOCH + timedelta(microseconds=micros)
# Add the original timezone. No conversion is required here,
# since in the serialization, we also just replace the timestamp with UTC.
return utc_dt.replace(tzinfo=orig_tz) | Restore times/datetimes to original timezone (dates are always naive) |
178,415 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, Tuple, cast
from typing_extensions import TypeGuard
from streamlit.elements.form import current_form_id
from streamlit.elements.utils import (
check_callback_rules,
check_session_state_rules,
get_label_visibility_proto_value,
maybe_coerce_enum,
maybe_coerce_enum_sequence,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Slider_pb2 import Slider as SliderProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
register_widget,
)
from streamlit.runtime.state.common import (
RegisterWidgetResult,
compute_widget_id,
save_for_app_testing,
)
from streamlit.type_util import (
Key,
LabelVisibility,
OptionSequence,
T,
check_python_comparable,
ensure_indexable,
maybe_raise_label_warnings,
to_key,
)
from streamlit.util import index_
T = TypeVar("T")
def _is_range_value(value: T | Sequence[T]) -> TypeGuard[Sequence[T]]:
return isinstance(value, (list, tuple)) | null |
178,416 | from __future__ import annotations
import ast
import contextlib
import inspect
import re
import types
from typing import TYPE_CHECKING, Any, Final, cast
import streamlit
from streamlit.proto.DocString_pb2 import DocString as DocStringProto
from streamlit.proto.DocString_pb2 import Member as MemberProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner.script_runner import (
__file__ as SCRIPTRUNNER_FILENAME,
)
from streamlit.runtime.secrets import Secrets
from streamlit.string_util import is_mem_address_str
def _get_docstring(obj):
doc_string = inspect.getdoc(obj)
# Sometimes an object has no docstring, but the object's type does.
# If that's the case here, use the type's docstring.
# For objects where type is "type" we do not print the docs (e.g. int).
# We also do not print the docs for functions and methods if the docstring is empty.
if doc_string is None:
obj_type = type(obj)
if (
obj_type is not type
and obj_type is not types.ModuleType
and not inspect.isfunction(obj)
and not inspect.ismethod(obj)
):
doc_string = inspect.getdoc(obj_type)
if doc_string:
return doc_string.strip()
return None
def _get_variable_name():
"""Try to get the name of the variable in the current line, as set by the user.
For example:
foo = bar.Baz(123)
st.help(foo)
The name is "foo"
"""
code = _get_current_line_of_code_as_str()
if code is None:
return None
return _get_variable_name_from_code_str(code)
def _get_type_as_str(obj):
if inspect.isclass(obj):
return "class"
return str(type(obj).__name__)
def _get_value(obj, var_name):
obj_value = _get_human_readable_value(obj)
if obj_value is not None:
return obj_value
# If there's no human-readable value, it's some complex object.
# So let's provide other info about it.
name = _get_name(obj)
if name:
name_obj = obj
else:
# If the object itself doesn't have a name, then it's probably an instance
# of some class Foo. So let's show info about Foo in the value slot.
name_obj = type(obj)
name = _get_name(name_obj)
module = _get_module(name_obj)
sig = _get_signature(name_obj) or ""
if name:
if module:
obj_value = f"{module}.{name}{sig}"
else:
obj_value = f"{name}{sig}"
if obj_value == var_name:
# No need to repeat the same info.
# For example: st.help(re) shouldn't show "re module re", just "re module".
obj_value = None
return obj_value
def _get_members(obj):
members_for_sorting = []
for attr_name in dir(obj):
if attr_name.startswith("_"):
continue
try:
is_computed_value = _is_computed_property(obj, attr_name)
if is_computed_value:
parent_attr = getattr(obj.__class__, attr_name)
member_type = "property"
weight = 0
member_docs = _get_docstring(parent_attr)
member_value = None
else:
attr_value = getattr(obj, attr_name)
weight = _get_weight(attr_value)
human_readable_value = _get_human_readable_value(attr_value)
member_type = _get_type_as_str(attr_value)
if human_readable_value is None:
member_docs = _get_docstring(attr_value)
member_value = None
else:
member_docs = None
member_value = human_readable_value
except AttributeError:
# If there's an AttributeError, we can just skip it.
# This can happen when members are exposed with `dir()`
# but are conditionally unavailable.
continue
if member_type == "module":
# Don't pollute the output with all imported modules.
continue
member = MemberProto()
member.name = attr_name
member.type = member_type
if member_docs is not None:
member.doc_string = _get_first_line(member_docs)
if member_value is not None:
member.value = member_value
members_for_sorting.append((weight, member))
if members_for_sorting:
sorted_members = sorted(members_for_sorting, key=lambda x: (x[0], x[1].name))
return [m for _, m in sorted_members]
return []
The provided code snippet includes necessary dependencies for implementing the `_marshall` function. Write a Python function `def _marshall(doc_string_proto: DocStringProto, obj: Any) -> None` to solve the following problem:
Construct a DocString object. See DeltaGenerator.help for docs.
Here is the function:
def _marshall(doc_string_proto: DocStringProto, obj: Any) -> None:
"""Construct a DocString object.
See DeltaGenerator.help for docs.
"""
var_name = _get_variable_name()
if var_name is not None:
doc_string_proto.name = var_name
obj_type = _get_type_as_str(obj)
doc_string_proto.type = obj_type
obj_docs = _get_docstring(obj)
if obj_docs is not None:
doc_string_proto.doc_string = obj_docs
obj_value = _get_value(obj, var_name)
if obj_value is not None:
doc_string_proto.value = obj_value
doc_string_proto.members.extend(_get_members(obj)) | Construct a DocString object. See DeltaGenerator.help for docs. |
178,417 | from __future__ import annotations
from contextlib import nullcontext
from datetime import date
from enum import Enum
from typing import TYPE_CHECKING, Any, Collection, Literal, Sequence, cast
import streamlit.elements.arrow_vega_lite as arrow_vega_lite
from streamlit import type_util
from streamlit.color_util import (
Color,
is_color_like,
is_color_tuple_like,
is_hex_color_like,
to_css_color,
)
from streamlit.elements.altair_utils import AddRowsMetadata
from streamlit.elements.arrow import Data
from streamlit.elements.utils import last_index_for_melted_dataframes
from streamlit.errors import Error, StreamlitAPIException
from streamlit.proto.ArrowVegaLiteChart_pb2 import (
ArrowVegaLiteChart as ArrowVegaLiteChartProto,
)
from streamlit.runtime.metrics_util import gather_metrics
class ChartType(Enum):
AREA = {"mark_type": "area"}
BAR = {"mark_type": "bar"}
LINE = {"mark_type": "line"}
SCATTER = {"mark_type": "circle"}
def prep_data(
df: pd.DataFrame,
x_column: str | None,
y_column_list: list[str],
color_column: str | None,
size_column: str | None,
) -> tuple[pd.DataFrame, str | None, str | None, str | None, str | None]:
"""Prepares the data for charting. This is also used in add_rows.
Returns the prepared dataframe and the new names of the x column (taking the index reset into
consideration) and y, color, and size columns.
"""
# If y is provided, but x is not, we'll use the index as x.
# So we need to pull the index into its own column.
x_column = _maybe_reset_index_in_place(df, x_column, y_column_list)
# Drop columns we're not using.
selected_data = _drop_unused_columns(
df, x_column, color_column, size_column, *y_column_list
)
# Maybe convert color to Vega colors.
_maybe_convert_color_column_in_place(selected_data, color_column)
# Make sure all columns have string names.
(
x_column,
y_column_list,
color_column,
size_column,
) = _convert_col_names_to_str_in_place(
selected_data, x_column, y_column_list, color_column, size_column
)
# Maybe melt data from wide format into long format.
melted_data, y_column, color_column = _maybe_melt(
selected_data, x_column, y_column_list, color_column, size_column
)
# Return the data, but also the new names to use for x, y, and color.
return melted_data, x_column, y_column, color_column, size_column
def _parse_generic_column(
df: pd.DataFrame, column_or_value: Any
) -> tuple[str | None, Any]:
if isinstance(column_or_value, str) and column_or_value in df.columns:
column_name = column_or_value
value = None
else:
column_name = None
value = column_or_value
return column_name, value
def _parse_x_column(df: pd.DataFrame, x_from_user: str | None) -> str | None:
if x_from_user is None:
return None
elif isinstance(x_from_user, str):
if x_from_user not in df.columns:
raise StreamlitColumnNotFoundError(df, x_from_user)
return x_from_user
else:
raise StreamlitAPIException(
"x parameter should be a column name (str) or None to use the "
f" dataframe's index. Value given: {x_from_user} "
f"(type {type(x_from_user)})"
)
def _parse_y_columns(
df: pd.DataFrame,
y_from_user: str | Sequence[str] | None,
x_column: str | None,
) -> list[str]:
y_column_list: list[str] = []
if y_from_user is None:
y_column_list = list(df.columns)
elif isinstance(y_from_user, str):
y_column_list = [y_from_user]
elif type_util.is_sequence(y_from_user):
y_column_list = list(str(col) for col in y_from_user)
else:
raise StreamlitAPIException(
"y parameter should be a column name (str) or list thereof. "
f"Value given: {y_from_user} (type {type(y_from_user)})"
)
for col in y_column_list:
if col not in df.columns:
raise StreamlitColumnNotFoundError(df, col)
# y_column_list should only include x_column when user explicitly asked for it.
if x_column in y_column_list and (not y_from_user or x_column not in y_from_user):
y_column_list.remove(x_column)
return y_column_list
def _get_opacity_encoding(
chart_type: ChartType, color_column: str | None
) -> alt.OpacityValue | None:
import altair as alt
if color_column and chart_type == ChartType.AREA:
return alt.OpacityValue(0.7)
return None
def _get_x_encoding(
df: pd.DataFrame,
x_column: str | None,
x_from_user: str | None,
chart_type: ChartType,
) -> alt.X:
import altair as alt
if x_column is None:
# If no field is specified, the full axis disappears when no data is present.
# Maybe a bug in vega-lite? So we pass a field that doesn't exist.
x_field = NON_EXISTENT_COLUMN_NAME
x_title = ""
elif x_column == SEPARATED_INDEX_COLUMN_NAME:
# If the x column name is the crazy anti-collision name we gave it, then need to set
# up a title so we never show the crazy name to the user.
x_field = x_column
# Don't show a label in the x axis (not even a nice label like
# SEPARATED_INDEX_COLUMN_TITLE) when we pull the x axis from the index.
x_title = ""
else:
x_field = x_column
# Only show a label in the x axis if the user passed a column explicitly. We
# could go either way here, but I'm keeping this to avoid breaking the existing
# behavior.
if x_from_user is None:
x_title = ""
else:
x_title = x_column
return alt.X(
x_field,
title=x_title,
type=_get_x_encoding_type(df, chart_type, x_column),
scale=_get_scale(df, x_column),
axis=_get_axis_config(df, x_column, grid=False),
)
def _get_y_encoding(
df: pd.DataFrame,
y_column: str | None,
y_from_user: str | Sequence[str] | None,
) -> alt.Y:
import altair as alt
if y_column is None:
# If no field is specified, the full axis disappears when no data is present.
# Maybe a bug in vega-lite? So we pass a field that doesn't exist.
y_field = NON_EXISTENT_COLUMN_NAME
y_title = ""
elif y_column == MELTED_Y_COLUMN_NAME:
# If the y column name is the crazy anti-collision name we gave it, then need to set
# up a title so we never show the crazy name to the user.
y_field = y_column
# Don't show a label in the y axis (not even a nice label like
# MELTED_Y_COLUMN_TITLE) when we pull the x axis from the index.
y_title = ""
else:
y_field = y_column
# Only show a label in the y axis if the user passed a column explicitly. We
# could go either way here, but I'm keeping this to avoid breaking the existing
# behavior.
if y_from_user is None:
y_title = ""
else:
y_title = y_column
return alt.Y(
field=y_field,
title=y_title,
type=_get_y_encoding_type(df, y_column),
scale=_get_scale(df, y_column),
axis=_get_axis_config(df, y_column, grid=True),
)
def _get_color_encoding(
df: pd.DataFrame,
color_value: Color | None,
color_column: str | None,
y_column_list: list[str],
color_from_user: str | Color | list[Color] | None,
) -> alt.Color | alt.ColorValue | None:
import altair as alt
has_color_value = color_value not in [None, [], tuple()]
# If user passed a color value, that should win over colors coming from the
# color column (be they manual or auto-assigned due to melting)
if has_color_value:
# If the color value is color-like, return that.
if is_color_like(cast(Any, color_value)):
if len(y_column_list) != 1:
raise StreamlitColorLengthError([color_value], y_column_list)
return alt.ColorValue(to_css_color(cast(Any, color_value)))
# If the color value is a list of colors of approriate length, return that.
elif isinstance(color_value, (list, tuple)):
color_values = cast(Collection[Color], color_value)
if len(color_values) != len(y_column_list):
raise StreamlitColorLengthError(color_values, y_column_list)
if len(color_value) == 1:
return alt.ColorValue(to_css_color(cast(Any, color_value[0])))
else:
return alt.Color(
field=color_column,
scale=alt.Scale(range=[to_css_color(c) for c in color_values]),
legend=COLOR_LEGEND_SETTINGS,
type="nominal",
title=" ",
)
raise StreamlitInvalidColorError(df, color_from_user)
elif color_column is not None:
column_type: str | tuple[str, list[Any]]
if color_column == MELTED_COLOR_COLUMN_NAME:
column_type = "nominal"
else:
column_type = type_util.infer_vegalite_type(df[color_column])
color_enc = alt.Color(
field=color_column, legend=COLOR_LEGEND_SETTINGS, type=column_type
)
# Fix title if DF was melted
if color_column == MELTED_COLOR_COLUMN_NAME:
# This has to contain an empty space, otherwise the
# full y-axis disappears (maybe a bug in vega-lite)?
color_enc["title"] = " "
# If the 0th element in the color column looks like a color, we'll use the color column's
# values as the colors in our chart.
elif len(df[color_column]) and is_color_like(df[color_column].iat[0]):
color_range = [to_css_color(c) for c in df[color_column].unique()]
color_enc["scale"] = alt.Scale(range=color_range)
# Don't show the color legend, because it will just show text with the color values,
# like #f00, #00f, etc, which are not user-readable.
color_enc["legend"] = None
# Otherwise, let Vega-Lite auto-assign colors.
# This codepath is typically reached when the color column contains numbers (in which case
# Vega-Lite uses a color gradient to represent them) or strings (in which case Vega-Lite
# assigns one color for each unique value).
else:
pass
return color_enc
return None
def _get_size_encoding(
chart_type: ChartType,
size_column: str | None,
size_value: str | float | None,
) -> alt.Size | alt.SizeValue | None:
import altair as alt
if chart_type == ChartType.SCATTER:
if size_column is not None:
return alt.Size(
size_column,
legend=SIZE_LEGEND_SETTINGS,
)
elif isinstance(size_value, (float, int)):
return alt.SizeValue(size_value)
elif size_value is None:
return alt.SizeValue(100)
else:
raise StreamlitAPIException(
f"This does not look like a valid size: {repr(size_value)}"
)
elif size_column is not None or size_value is not None:
raise Error(
f"Chart type {chart_type.name} does not support size argument. "
"This should never happen!"
)
return None
def _get_tooltip_encoding(
x_column: str,
y_column: str,
size_column: str | None,
color_column: str | None,
color_enc: alt.Color | alt.ColorValue | None,
) -> list[alt.Tooltip]:
import altair as alt
tooltip = []
# If the x column name is the crazy anti-collision name we gave it, then need to set
# up a tooltip title so we never show the crazy name to the user.
if x_column == SEPARATED_INDEX_COLUMN_NAME:
tooltip.append(alt.Tooltip(x_column, title=SEPARATED_INDEX_COLUMN_TITLE))
else:
tooltip.append(alt.Tooltip(x_column))
# If the y column name is the crazy anti-collision name we gave it, then need to set
# up a tooltip title so we never show the crazy name to the user.
if y_column == MELTED_Y_COLUMN_NAME:
tooltip.append(
alt.Tooltip(
y_column,
title=MELTED_Y_COLUMN_TITLE,
type="quantitative", # Just picked something random. Doesn't really matter!
)
)
else:
tooltip.append(alt.Tooltip(y_column))
# If we earlier decided that there should be no color legend, that's because the
# user passed a color column with actual color values (like "#ff0"), so we should
# not show the color values in the tooltip.
if color_column and getattr(color_enc, "legend", True) is not None:
# Use a human-readable title for the color.
if color_column == MELTED_COLOR_COLUMN_NAME:
tooltip.append(
alt.Tooltip(
color_column,
title=MELTED_COLOR_COLUMN_TITLE,
type="nominal",
)
)
else:
tooltip.append(alt.Tooltip(color_column))
if size_column:
tooltip.append(alt.Tooltip(size_column))
return tooltip
Color: TypeAlias = Union[ColorTuple, ColorStr]
class AddRowsMetadata:
"""Metadata needed by add_rows on native charts."""
last_index: Hashable | None
columns: PrepDataColumns
Data: TypeAlias = Union[
"DataFrame",
"Series",
"Styler",
"Index",
"pa.Table",
"ndarray",
Iterable,
Dict[str, List[Any]],
None,
]
def last_index_for_melted_dataframes(
data: DataFrameCompatible | Any,
) -> Hashable | None:
if type_util.is_dataframe_compatible(data):
data = type_util.convert_anything_to_df(data)
if data.index.size > 0:
return cast(Hashable, data.index[-1])
return None
The provided code snippet includes necessary dependencies for implementing the `_generate_chart` function. Write a Python function `def _generate_chart( chart_type: ChartType, data: Data | None, x_from_user: str | None = None, y_from_user: str | Sequence[str] | None = None, color_from_user: str | Color | list[Color] | None = None, size_from_user: str | float | None = None, width: int = 0, height: int = 0, ) -> tuple[alt.Chart, AddRowsMetadata]` to solve the following problem:
Function to use the chart's type, data columns and indices to figure out the chart's spec.
Here is the function:
def _generate_chart(
chart_type: ChartType,
data: Data | None,
x_from_user: str | None = None,
y_from_user: str | Sequence[str] | None = None,
color_from_user: str | Color | list[Color] | None = None,
size_from_user: str | float | None = None,
width: int = 0,
height: int = 0,
) -> tuple[alt.Chart, AddRowsMetadata]:
"""Function to use the chart's type, data columns and indices to figure out the chart's spec."""
import altair as alt
df = type_util.convert_anything_to_df(data, ensure_copy=True)
# From now on, use "df" instead of "data". Deleting "data" to guarantee we follow this.
del data
# Convert arguments received from the user to things Vega-Lite understands.
# Get name of column to use for x.
x_column = _parse_x_column(df, x_from_user)
# Get name of columns to use for y.
y_column_list = _parse_y_columns(df, y_from_user, x_column)
# Get name of column to use for color, or constant value to use. Any/both could be None.
color_column, color_value = _parse_generic_column(df, color_from_user)
# Get name of column to use for size, or constant value to use. Any/both could be None.
size_column, size_value = _parse_generic_column(df, size_from_user)
# Store some info so we can use it in add_rows.
add_rows_metadata = AddRowsMetadata(
# The last index of df so we can adjust the input df in add_rows:
last_index=last_index_for_melted_dataframes(df),
# This is the input to prep_data (except for the df):
columns=dict(
x_column=x_column,
y_column_list=y_column_list,
color_column=color_column,
size_column=size_column,
),
)
# At this point, all foo_column variables are either None/empty or contain actual
# columns that are guaranteed to exist.
df, x_column, y_column, color_column, size_column = prep_data(
df, x_column, y_column_list, color_column, size_column
)
# At this point, x_column is only None if user did not provide one AND df is empty.
# Create a Chart with x and y encodings.
chart = alt.Chart(
data=df,
mark=chart_type.value["mark_type"],
width=width,
height=height,
).encode(
x=_get_x_encoding(df, x_column, x_from_user, chart_type),
y=_get_y_encoding(df, y_column, y_from_user),
)
# Set up opacity encoding.
opacity_enc = _get_opacity_encoding(chart_type, color_column)
if opacity_enc is not None:
chart = chart.encode(opacity=opacity_enc)
# Set up color encoding.
color_enc = _get_color_encoding(
df, color_value, color_column, y_column_list, color_from_user
)
if color_enc is not None:
chart = chart.encode(color=color_enc)
# Set up size encoding.
size_enc = _get_size_encoding(chart_type, size_column, size_value)
if size_enc is not None:
chart = chart.encode(size=size_enc)
# Set up tooltip encoding.
if x_column is not None and y_column is not None:
chart = chart.encode(
tooltip=_get_tooltip_encoding(
x_column,
y_column,
size_column,
color_column,
color_enc,
)
)
return chart.interactive(), add_rows_metadata | Function to use the chart's type, data columns and indices to figure out the chart's spec. |
178,418 | from __future__ import annotations
from contextlib import nullcontext
from datetime import date
from enum import Enum
from typing import TYPE_CHECKING, Any, Collection, Literal, Sequence, cast
import streamlit.elements.arrow_vega_lite as arrow_vega_lite
from streamlit import type_util
from streamlit.color_util import (
Color,
is_color_like,
is_color_tuple_like,
is_hex_color_like,
to_css_color,
)
from streamlit.elements.altair_utils import AddRowsMetadata
from streamlit.elements.arrow import Data
from streamlit.elements.utils import last_index_for_melted_dataframes
from streamlit.errors import Error, StreamlitAPIException
from streamlit.proto.ArrowVegaLiteChart_pb2 import (
ArrowVegaLiteChart as ArrowVegaLiteChartProto,
)
from streamlit.runtime.metrics_util import gather_metrics
The provided code snippet includes necessary dependencies for implementing the `marshall` function. Write a Python function `def marshall( vega_lite_chart: ArrowVegaLiteChartProto, altair_chart: alt.Chart, use_container_width: bool = False, theme: None | Literal["streamlit"] = "streamlit", **kwargs: Any, ) -> None` to solve the following problem:
Marshall chart's data into proto.
Here is the function:
def marshall(
vega_lite_chart: ArrowVegaLiteChartProto,
altair_chart: alt.Chart,
use_container_width: bool = False,
theme: None | Literal["streamlit"] = "streamlit",
**kwargs: Any,
) -> None:
"""Marshall chart's data into proto."""
import altair as alt
# Normally altair_chart.to_dict() would transform the dataframe used by the
# chart into an array of dictionaries. To avoid that, we install a
# transformer that replaces datasets with a reference by the object id of
# the dataframe. We then fill in the dataset manually later on.
datasets = {}
def id_transform(data) -> dict[str, str]:
"""Altair data transformer that returns a fake named dataset with the
object id.
"""
name = str(id(data))
datasets[name] = data
return {"name": name}
alt.data_transformers.register("id", id_transform) # type: ignore[attr-defined,unused-ignore]
# The default altair theme has some width/height defaults defined
# which are not useful for Streamlit. Therefore, we change the theme to
# "none" to avoid those defaults.
with alt.themes.enable("none") if alt.themes.active == "default" else nullcontext(): # type: ignore[attr-defined,unused-ignore]
with alt.data_transformers.enable("id"): # type: ignore[attr-defined,unused-ignore]
chart_dict = altair_chart.to_dict()
# Put datasets back into the chart dict but note how they weren't
# transformed.
chart_dict["datasets"] = datasets
arrow_vega_lite.marshall(
vega_lite_chart,
chart_dict,
use_container_width=use_container_width,
theme=theme,
**kwargs,
) | Marshall chart's data into proto. |
178,419 | from __future__ import annotations
import io
from typing import TYPE_CHECKING, Any, cast
import streamlit.elements.image as image_utils
from streamlit import config
from streamlit.errors import StreamlitDeprecationWarning
from streamlit.proto.Image_pb2 import ImageList as ImageListProto
from streamlit.runtime.metrics_util import gather_metrics
def marshall(
coordinates: str,
image_list_proto: ImageListProto,
fig: Figure | None = None,
clear_figure: bool | None = True,
use_container_width: bool = True,
**kwargs: Any,
) -> None:
try:
import matplotlib
import matplotlib.pyplot as plt
plt.ioff()
except ImportError:
raise ImportError("pyplot() command requires matplotlib")
# You can call .savefig() on a Figure object or directly on the pyplot
# module, in which case you're doing it to the latest Figure.
if not fig:
if clear_figure is None:
clear_figure = True
fig = cast("Figure", plt)
# Normally, dpi is set to 'figure', and the figure's dpi is set to 100.
# So here we pick double of that to make things look good in a high
# DPI display.
options = {"bbox_inches": "tight", "dpi": 200, "format": "png"}
# If some options are passed in from kwargs then replace the values in
# options with the ones from kwargs
options = {a: kwargs.get(a, b) for a, b in options.items()}
# Merge options back into kwargs.
kwargs.update(options)
image = io.BytesIO()
fig.savefig(image, **kwargs)
image_width = (
image_utils.WidthBehaviour.COLUMN
if use_container_width
else image_utils.WidthBehaviour.ORIGINAL
)
image_utils.marshall_images(
coordinates=coordinates,
image=image,
caption=None,
width=image_width,
proto_imgs=image_list_proto,
clamp=False,
channels="RGB",
output_format="PNG",
)
# Clear the figure after rendering it. This means that subsequent
# plt calls will be starting fresh.
if clear_figure:
fig.clf() | null |
178,420 | from __future__ import annotations
import json
import urllib.parse
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Union, cast
from typing_extensions import TypeAlias
from streamlit import type_util
from streamlit.elements.lib.streamlit_plotly_theme import (
configure_streamlit_plotly_theme,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.PlotlyChart_pb2 import PlotlyChart as PlotlyChartProto
from streamlit.runtime.legacy_caching import caching
from streamlit.runtime.metrics_util import gather_metrics
SharingMode: TypeAlias = Literal["streamlit", "private", "public", "secret"]
SHARING_MODES: set[SharingMode] = {
# This means the plot will be sent to the Streamlit app rather than to
# Plotly.
"streamlit",
# The three modes below are for plots that should be hosted in Plotly.
# These are the names Plotly uses for them.
"private",
"public",
"secret",
}
FigureOrData: TypeAlias = Union[
_AtomicFigureOrData,
List[_AtomicFigureOrData],
# It is kind of hard to figure out exactly what kind of dict is supported
# here, as plotly hasn't embraced typing yet. This version is chosen to
# align with the docstring.
Dict[str, _AtomicFigureOrData],
"BaseFigure",
"matplotlib.figure.Figure",
]
def _plot_to_url_or_load_cached_url(*args: Any, **kwargs: Any) -> go.Figure:
"""Call plotly.plot wrapped in st.cache.
This is so we don't unnecessarily upload data to Plotly's SASS if nothing
changed since the previous upload.
"""
try:
# Plotly 4 changed its main package.
import chart_studio.plotly as ply
except ImportError:
import plotly.plotly as ply
return ply.plot(*args, **kwargs)
def _get_embed_url(url: str) -> str:
parsed_url = urllib.parse.urlparse(url)
# Plotly's embed URL is the normal URL plus ".embed".
# (Note that our use namedtuple._replace is fine because that's not a
# private method! It just has an underscore to avoid clashing with the
# tuple field names)
parsed_embed_url = parsed_url._replace(path=parsed_url.path + ".embed")
return urllib.parse.urlunparse(parsed_embed_url)
import json
The provided code snippet includes necessary dependencies for implementing the `marshall` function. Write a Python function `def marshall( proto: PlotlyChartProto, figure_or_data: FigureOrData, use_container_width: bool, sharing: SharingMode, theme: Literal["streamlit"] | None, **kwargs: Any, ) -> None` to solve the following problem:
Marshall a proto with a Plotly spec. See DeltaGenerator.plotly_chart for docs.
Here is the function:
def marshall(
proto: PlotlyChartProto,
figure_or_data: FigureOrData,
use_container_width: bool,
sharing: SharingMode,
theme: Literal["streamlit"] | None,
**kwargs: Any,
) -> None:
"""Marshall a proto with a Plotly spec.
See DeltaGenerator.plotly_chart for docs.
"""
# NOTE: "figure_or_data" is the name used in Plotly's .plot() method
# for their main parameter. I don't like the name, but its best to keep
# it in sync with what Plotly calls it.
import plotly.tools
if type_util.is_type(figure_or_data, "matplotlib.figure.Figure"):
figure = plotly.tools.mpl_to_plotly(figure_or_data)
else:
figure = plotly.tools.return_figure_from_figure_or_data(
figure_or_data, validate_figure=True
)
if not isinstance(sharing, str) or sharing.lower() not in SHARING_MODES:
raise ValueError("Invalid sharing mode for Plotly chart: %s" % sharing)
proto.use_container_width = use_container_width
if sharing == "streamlit":
import plotly.io
config = dict(kwargs.get("config", {}))
# Copy over some kwargs to config dict. Plotly does the same in plot().
config.setdefault("showLink", kwargs.get("show_link", False))
config.setdefault("linkText", kwargs.get("link_text", False))
proto.figure.spec = plotly.io.to_json(figure, validate=False)
proto.figure.config = json.dumps(config)
else:
url = _plot_to_url_or_load_cached_url(
figure, sharing=sharing, auto_open=False, **kwargs
)
proto.url = _get_embed_url(url)
proto.theme = theme or "" | Marshall a proto with a Plotly spec. See DeltaGenerator.plotly_chart for docs. |
178,421 | from __future__ import annotations
import textwrap
from typing import TYPE_CHECKING, Literal, NamedTuple, cast
from streamlit import runtime
from streamlit.errors import StreamlitAPIException
from streamlit.proto import Block_pb2
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import ScriptRunContext, get_script_run_ctx
from streamlit.runtime.state import WidgetArgs, WidgetCallback, WidgetKwargs
def _build_duplicate_form_message(user_key: str | None = None) -> str:
if user_key is not None:
message = textwrap.dedent(
f"""
There are multiple identical forms with `key='{user_key}'`.
To fix this, please make sure that the `key` argument is unique for
each `st.form` you create.
"""
)
else:
message = textwrap.dedent(
"""
There are multiple identical forms with the same generated key.
When a form is created, it's assigned an internal key based on
its structure. Multiple forms with an identical structure will
result in the same internal key, which causes this error.
To fix this error, please pass a unique `key` argument to
`st.form`.
"""
)
return message.strip("\n") | null |
178,422 | from __future__ import annotations
from typing import TYPE_CHECKING, cast
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Toast_pb2 import Toast as ToastProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text, validate_emoji
from streamlit.type_util import SupportsStr
class StreamlitAPIException(MarkdownFormattedException):
def __repr__(self) -> str:
class SupportsStr(Protocol):
def __str__(self) -> str:
def validate_text(toast_text: SupportsStr) -> SupportsStr:
if str(toast_text) == "":
raise StreamlitAPIException(
f"Toast body cannot be blank - please provide a message."
)
else:
return toast_text | null |
178,423 | from __future__ import annotations
from typing import TYPE_CHECKING, cast
from streamlit.proto.IFrame_pb2 import IFrame as IFrameProto
from streamlit.runtime.metrics_util import gather_metrics
The provided code snippet includes necessary dependencies for implementing the `marshall` function. Write a Python function `def marshall( proto: IFrameProto, src: str | None = None, srcdoc: str | None = None, width: int | None = None, height: int | None = None, scrolling: bool = False, ) -> None` to solve the following problem:
Marshalls data into an IFrame proto. These parameters correspond directly to <iframe> attributes, which are described in more detail at https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe. Parameters ---------- proto : IFrame protobuf The protobuf object to marshall data into. src : str The URL of the page to embed. srcdoc : str Inline HTML to embed. Overrides src. width : int The width of the frame in CSS pixels. Defaults to the app's default element width. height : int The height of the frame in CSS pixels. Defaults to 150. scrolling : bool If true, show a scrollbar when the content is larger than the iframe. Otherwise, never show a scrollbar.
Here is the function:
def marshall(
proto: IFrameProto,
src: str | None = None,
srcdoc: str | None = None,
width: int | None = None,
height: int | None = None,
scrolling: bool = False,
) -> None:
"""Marshalls data into an IFrame proto.
These parameters correspond directly to <iframe> attributes, which are
described in more detail at
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe.
Parameters
----------
proto : IFrame protobuf
The protobuf object to marshall data into.
src : str
The URL of the page to embed.
srcdoc : str
Inline HTML to embed. Overrides src.
width : int
The width of the frame in CSS pixels. Defaults to the app's
default element width.
height : int
The height of the frame in CSS pixels. Defaults to 150.
scrolling : bool
If true, show a scrollbar when the content is larger than the iframe.
Otherwise, never show a scrollbar.
"""
if src is not None:
proto.src = src
if srcdoc is not None:
proto.srcdoc = srcdoc
if width is not None:
proto.width = width
proto.has_width = True
if height is not None:
proto.height = height
else:
proto.height = 150
proto.scrolling = scrolling | Marshalls data into an IFrame proto. These parameters correspond directly to <iframe> attributes, which are described in more detail at https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe. Parameters ---------- proto : IFrame protobuf The protobuf object to marshall data into. src : str The URL of the page to embed. srcdoc : str Inline HTML to embed. Overrides src. width : int The width of the frame in CSS pixels. Defaults to the app's default element width. height : int The height of the frame in CSS pixels. Defaults to 150. scrolling : bool If true, show a scrollbar when the content is larger than the iframe. Otherwise, never show a scrollbar. |
178,424 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Literal, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements.utils import get_label_visibility_proto_value
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Metric_pb2 import Metric as MetricProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text
from streamlit.type_util import LabelVisibility, maybe_raise_label_warnings
def _parse_label(label: str) -> str:
if not isinstance(label, str):
raise TypeError(
f"'{str(label)}' is of type {str(type(label))}, which is not an accepted type."
" label only accepts: str. Please convert the label to an accepted type."
)
return label | null |
178,425 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Literal, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements.utils import get_label_visibility_proto_value
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Metric_pb2 import Metric as MetricProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text
from streamlit.type_util import LabelVisibility, maybe_raise_label_warnings
Value: TypeAlias = Union["np.integer", "np.floating", float, int, str, None]
def _parse_value(value: Value) -> str:
if value is None:
return "—"
if isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
return str(value)
elif hasattr(value, "item"):
# Add support for numpy values (e.g. int16, float64, etc.)
try:
# Item could also be just a variable, so we use try, except
if isinstance(value.item(), float) or isinstance(value.item(), int):
return str(value.item())
except Exception:
# If the numpy item is not a valid value, the TypeError below will be raised.
pass
raise TypeError(
f"'{str(value)}' is of type {str(type(value))}, which is not an accepted type."
" value only accepts: int, float, str, or None."
" Please convert the value to an accepted type."
) | null |
178,426 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Literal, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements.utils import get_label_visibility_proto_value
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Metric_pb2 import Metric as MetricProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text
from streamlit.type_util import LabelVisibility, maybe_raise_label_warnings
Delta: TypeAlias = Union[float, int, str, None]
def _parse_delta(delta: Delta) -> str:
if delta is None or delta == "":
return ""
if isinstance(delta, str):
return dedent(delta)
elif isinstance(delta, int) or isinstance(delta, float):
return str(delta)
else:
raise TypeError(
f"'{str(delta)}' is of type {str(type(delta))}, which is not an accepted type."
" delta only accepts: int, float, str, or None."
" Please convert the value to an accepted type."
) | null |
178,427 | from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
from typing import TYPE_CHECKING, Literal, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements.utils import get_label_visibility_proto_value
from streamlit.errors import StreamlitAPIException
from streamlit.proto.Metric_pb2 import Metric as MetricProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import clean_text
from streamlit.type_util import LabelVisibility, maybe_raise_label_warnings
Delta: TypeAlias = Union[float, int, str, None]
DeltaColor: TypeAlias = Literal["normal", "inverse", "off"]
class MetricColorAndDirection:
color: MetricProto.MetricColor.ValueType
direction: MetricProto.MetricDirection.ValueType
def _is_negative_delta(delta: Delta) -> bool:
return dedent(str(delta)).startswith("-")
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def _determine_delta_color_and_direction(
delta_color: DeltaColor,
delta: Delta,
) -> MetricColorAndDirection:
if delta_color not in {"normal", "inverse", "off"}:
raise StreamlitAPIException(
f"'{str(delta_color)}' is not an accepted value. delta_color only accepts: "
"'normal', 'inverse', or 'off'"
)
if delta is None or delta == "":
return MetricColorAndDirection(
color=MetricProto.MetricColor.GRAY,
direction=MetricProto.MetricDirection.NONE,
)
if _is_negative_delta(delta):
if delta_color == "normal":
cd_color = MetricProto.MetricColor.RED
elif delta_color == "inverse":
cd_color = MetricProto.MetricColor.GREEN
else:
cd_color = MetricProto.MetricColor.GRAY
cd_direction = MetricProto.MetricDirection.DOWN
else:
if delta_color == "normal":
cd_color = MetricProto.MetricColor.GREEN
elif delta_color == "inverse":
cd_color = MetricProto.MetricColor.RED
else:
cd_color = MetricProto.MetricColor.GRAY
cd_direction = MetricProto.MetricDirection.UP
return MetricColorAndDirection(
color=cd_color,
direction=cd_direction,
) | null |
178,428 | from __future__ import annotations
import hashlib
from typing import TYPE_CHECKING, Union, cast
from typing_extensions import TypeAlias
from streamlit import type_util
from streamlit.errors import StreamlitAPIException
from streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.util import HASHLIB_KWARGS
FigureOrDot: TypeAlias = Union["graphviz.Graph", "graphviz.Digraph", str]
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
The provided code snippet includes necessary dependencies for implementing the `marshall` function. Write a Python function `def marshall( proto: GraphVizChartProto, figure_or_dot: FigureOrDot, use_container_width: bool, element_id: str, ) -> None` to solve the following problem:
Construct a GraphViz chart object. See DeltaGenerator.graphviz_chart for docs.
Here is the function:
def marshall(
proto: GraphVizChartProto,
figure_or_dot: FigureOrDot,
use_container_width: bool,
element_id: str,
) -> None:
"""Construct a GraphViz chart object.
See DeltaGenerator.graphviz_chart for docs.
"""
if type_util.is_graphviz_chart(figure_or_dot):
dot = figure_or_dot.source
engine = figure_or_dot.engine
elif isinstance(figure_or_dot, str):
dot = figure_or_dot
engine = "dot"
else:
raise StreamlitAPIException(
"Unhandled type for graphviz chart: %s" % type(figure_or_dot)
)
proto.spec = dot
proto.engine = engine
proto.use_container_width = use_container_width
proto.element_id = element_id | Construct a GraphViz chart object. See DeltaGenerator.graphviz_chart for docs. |
178,429 | from __future__ import annotations
import hashlib
import json
from typing import TYPE_CHECKING, Any, Final, Mapping, cast
from streamlit import config
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as PydeckProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.util import HASHLIB_KWARGS
EMPTY_MAP: Final[Mapping[str, Any]] = {
"initialViewState": {"latitude": 0, "longitude": 0, "pitch": 0, "zoom": 1},
}
def _get_pydeck_tooltip(pydeck_obj: Deck | None) -> dict[str, str] | None:
import json
HASHLIB_KWARGS: dict[str, Any] = (
{"usedforsecurity": False} if sys.version_info >= (3, 9) else {}
)
def marshall(
pydeck_proto: PydeckProto,
pydeck_obj: Deck | None,
use_container_width: bool,
) -> None:
if pydeck_obj is None:
spec = json.dumps(EMPTY_MAP)
id = ""
else:
spec = pydeck_obj.to_json()
json_string = json.dumps(spec)
json_bytes = json_string.encode("utf-8")
id = hashlib.md5(json_bytes, **HASHLIB_KWARGS).hexdigest()
pydeck_proto.json = spec
pydeck_proto.use_container_width = use_container_width
pydeck_proto.id = id
tooltip = _get_pydeck_tooltip(pydeck_obj)
if tooltip:
pydeck_proto.tooltip = json.dumps(tooltip)
mapbox_token = config.get_option("mapbox.token")
if mapbox_token:
pydeck_proto.mapbox_token = mapbox_token | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.