id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
178,229 | import torch
import numpy as np
import requests
from PIL import Image
from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration
from mplug_owl.processing_mplug_owl import MplugOwlImageProcessor, MplugOwlProcessor
from transformers import AutoTokenizer
The provided code snippet includes necessary dependencies for implementing the `do_generate` function. Write a Python function `def do_generate(prompts, image_list, model, tokenizer, processor, use_bf16=False, **generate_kwargs)` to solve the following problem:
The interface for generation Args: prompts (List[str]): The prompt text image_list (List[str]): Paths of images model (MplugOwlForConditionalGeneration): MplugOwlForConditionalGeneration tokenizer (AutoTokenizer): AutoTokenizer processor (MplugOwlProcessor): MplugOwlProcessor use_bf16 (bool, optional): Whether to use bfloat16. Defaults to False. Returns: sentence (str): Generated sentence.
Here is the function:
def do_generate(prompts, image_list, model, tokenizer, processor, use_bf16=False, **generate_kwargs):
"""The interface for generation
Args:
prompts (List[str]): The prompt text
image_list (List[str]): Paths of images
model (MplugOwlForConditionalGeneration): MplugOwlForConditionalGeneration
tokenizer (AutoTokenizer): AutoTokenizer
processor (MplugOwlProcessor): MplugOwlProcessor
use_bf16 (bool, optional): Whether to use bfloat16. Defaults to False.
Returns:
sentence (str): Generated sentence.
"""
if image_list:
images = [Image.open(_) for _ in image_list]
else:
images = None
inputs = processor(text=prompts, images=images, return_tensors='pt')
inputs = {k: v.bfloat16() if v.dtype == torch.float else v for k, v in inputs.items()}
inputs = {k: v.to(model.device) for k, v in inputs.items()}
with torch.no_grad():
res = model.generate(**inputs, **generate_kwargs)
sentence = tokenizer.decode(res.tolist()[0], skip_special_tokens=True)
return sentence | The interface for generation Args: prompts (List[str]): The prompt text image_list (List[str]): Paths of images model (MplugOwlForConditionalGeneration): MplugOwlForConditionalGeneration tokenizer (AutoTokenizer): AutoTokenizer processor (MplugOwlProcessor): MplugOwlProcessor use_bf16 (bool, optional): Whether to use bfloat16. Defaults to False. Returns: sentence (str): Generated sentence. |
178,230 | import math
from typing import Any, Optional, Tuple, Union
import torch
from dataclasses import dataclass
import torch.utils.checkpoint
from torch import nn
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPastAndCrossAttentions
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.auto import AutoModelForCausalLM
from .configuration_mplug_owl import MplugOwlConfig, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
The provided code snippet includes necessary dependencies for implementing the `get_ltor_masks_and_position_ids_from_embeddings` function. Write a Python function `def get_ltor_masks_and_position_ids_from_embeddings(data)` to solve the following problem:
Build masks and position id for left to right model.
Here is the function:
def get_ltor_masks_and_position_ids_from_embeddings(data):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.size()[:2]
# Attention mask (lower triangular).
att_mask_batch = 1
attention_mask = torch.tril(torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)).view(
att_mask_batch, 1, seq_length, seq_length
)
# Loss mask.
loss_mask = torch.ones(data.size()[:2], dtype=torch.float, device=data.device)
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data[..., 0])
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, loss_mask, position_ids | Build masks and position id for left to right model. |
178,231 | import math
from typing import Any, Optional, Tuple, Union
import torch
from dataclasses import dataclass
import torch.utils.checkpoint
from torch import nn
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPastAndCrossAttentions
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.auto import AutoModelForCausalLM
from .configuration_mplug_owl import MplugOwlConfig, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
def get_media_indices(my_list):
if isinstance(my_list, torch.Tensor):
my_list = my_list.cpu().tolist()
result = []
for i in range(len(my_list)):
if i == 0 and my_list[i] < 0:
result.append(i)
elif my_list[i] != my_list[i - 1] and my_list[i] < 0:
result.append(i)
return result | null |
178,232 | import math
from typing import Any, Optional, Tuple, Union
import torch
from dataclasses import dataclass
import torch.utils.checkpoint
from torch import nn
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPastAndCrossAttentions
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.auto import AutoModelForCausalLM
from .configuration_mplug_owl import MplugOwlConfig, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
logger = logging.get_logger(__name__)
def bloom_forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
if deprecated_arguments.pop("position_ids", False) is not False:
# `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
" passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past_key_values is None:
past_key_values = tuple([None] * len(self.h))
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape batch_size x num_heads x N x N
# head_mask has shape n_layer x batch x num_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
inputs_embeds = self.word_embeddings_layernorm(inputs_embeds)
hidden_states = inputs_embeds
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# Compute alibi tensor: check build_alibi_tensor documentation
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values[0] is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
else:
attention_mask = attention_mask.to(hidden_states.device)
alibi = self.build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype)
causal_mask = self._prepare_attn_mask(
attention_mask,
input_shape=(batch_size, seq_length),
past_key_values_length=past_key_values_length,
)
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache=use_cache, output_attentions=output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
alibi,
causal_mask,
layer_past,
head_mask[i],
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=causal_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
alibi=alibi,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
# Add last hidden state
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
) | null |
178,233 | import re
import torch
import torch.utils.checkpoint
from transformers.processing_utils import ProcessorMixin
from transformers.tokenization_utils_base import BatchEncoding
from transformers.models.clip.image_processing_clip import CLIPImageProcessor
The provided code snippet includes necessary dependencies for implementing the `detokenize_generations` function. Write a Python function `def detokenize_generations(tokens_gpu_tensor, lengths_gpu_tensor, return_segments, tokenizer)` to solve the following problem:
Detokenize the generated tokens.
Here is the function:
def detokenize_generations(tokens_gpu_tensor, lengths_gpu_tensor, return_segments, tokenizer):
"""Detokenize the generated tokens."""
prompts_plus_generations = []
if return_segments:
prompts_plus_generations_segments = []
tokens = tokens_gpu_tensor.cpu().numpy().tolist()
lengths = lengths_gpu_tensor.cpu().numpy().tolist()
for sequence_tokens, length in zip(tokens, lengths):
sequence_tokens = sequence_tokens[:length]
prompts_plus_generations.append(tokenizer.detokenize(sequence_tokens))
if return_segments:
from tokenizers.decoders import Metaspace
if hasattr(tokenizer, "tokenizer"):
if isinstance(tokenizer.tokenizer.decoder, Metaspace):
words = tokenizer.tokenizer.decode(sequence_tokens)
else:
words = []
for token in sequence_tokens:
word = tokenizer.tokenizer.decoder[token]
word = bytearray([tokenizer.tokenizer.byte_decoder[c] for c in word]).decode(
"utf-8", errors="replace"
)
words.append(word)
prompts_plus_generations_segments.append(words)
else:
words = tokenizer.detokenize(sequence_tokens)
# else:
# words = []
# for token in sequence_tokens:
# word = tokenizer.tokenizer.decoder[token]
# word = bytearray(
# [tokenizer.tokenizer.byte_decoder[c] for c in word]).decode(
# 'utf-8', errors='replace')
# words.append(word)
prompts_plus_generations_segments.append(words)
if return_segments:
return tokens, prompts_plus_generations, prompts_plus_generations_segments
return tokens, prompts_plus_generations | Detokenize the generated tokens. |
178,234 | import re
import torch
import torch.utils.checkpoint
from transformers.processing_utils import ProcessorMixin
from transformers.tokenization_utils_base import BatchEncoding
from transformers.models.clip.image_processing_clip import CLIPImageProcessor
def _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS, tokenizer, **kwargs):
"""Given a set of prompts and number of tokens to generate:
- tokenize prompts
- set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate
- pad all the sequences to this length so we can convert them into a 2D tensor.
"""
# Tokenize all the prompts.
# if add_BOS:
# prompts_tokens = [[tokenizer.bos] + tokenizer.tokenize(prompt)
# for prompt in prompts]
# else:
# prompts_tokens = [tokenizer.tokenize(prompt) for prompt in prompts]
prompts_tokens = [_tokenize_prompt(prompt, tokenizer, add_BOS, **kwargs) for prompt in prompts]
# Now we have a list of list of tokens which each list has a different
# size. We want to extend this list to:
# - incorporate the tokens that need to be generated
# - make all the sequences equal length.
# Get the prompts length.
prompts_length = [len(prompt_tokens) for prompt_tokens in prompts_tokens]
# Get the max prompts length.
max_prompt_len = max(prompts_length)
# Number of tokens in the each sample of the batch.
samples_length = max_prompt_len + tokens_to_generate
# Now update the list of list to be of the same size: samples_length.
for prompt_tokens, prompt_length in zip(prompts_tokens, prompts_length):
padding_size = samples_length - prompt_length
prompt_tokens.extend([tokenizer.eos_token_id] * padding_size)
# Now we are in a structured format, we can convert to tensors.
prompts_tokens_tensor = torch.LongTensor(prompts_tokens)
prompts_length_tensor = torch.LongTensor(prompts_length)
attention_mask = torch.zeros(prompts_tokens_tensor.shape[:2])
for i, l in enumerate(prompts_length_tensor):
attention_mask[i, :l] = 1
return prompts_tokens_tensor, prompts_length_tensor, attention_mask
The provided code snippet includes necessary dependencies for implementing the `tokenize_prompts` function. Write a Python function `def tokenize_prompts( prompts=None, tokens_to_generate=None, add_BOS=None, rank=0, tokenizer=None, ignore_dist=False, **kwargs )` to solve the following problem:
Tokenize prompts and make them avaiable on all ranks.
Here is the function:
def tokenize_prompts(
prompts=None, tokens_to_generate=None, add_BOS=None, rank=0, tokenizer=None, ignore_dist=False, **kwargs
):
"""Tokenize prompts and make them avaiable on all ranks."""
# On all ranks set to None so we can pass them to functions
prompts_tokens_cuda_long_tensor = None
prompts_length_cuda_long_tensor = None
# On the specified rank, build the above.
attention_mask = None
if ignore_dist or torch.distributed.get_rank() == rank:
assert prompts is not None
assert tokens_to_generate is not None
# Tensor of tokens padded and their unpadded length.
prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor, attention_mask = _tokenize_prompts_and_batch(
prompts, tokens_to_generate, add_BOS, tokenizer, **kwargs
)
return {
"input_ids": prompts_tokens_cuda_long_tensor,
"attention_mask": attention_mask,
# "prompt_length": prompts_length_cuda_long_tensor,
} | Tokenize prompts and make them avaiable on all ranks. |
178,235 | import requests
import streamlit as st
def audio():
url = "https://www.w3schools.com/html/horse.ogg"
file = requests.get(url).content
st.audio(file) | null |
178,236 | import requests
import streamlit as st
def video():
url = "https://www.w3schools.com/html/mov_bbb.mp4"
file = requests.get(url).content
st.video(file) | null |
178,237 | import streamlit as st
st.set_page_config(
page_title="Heya, world?",
page_icon=":shark:",
layout="wide",
initial_sidebar_state="collapsed",
)
st.sidebar.button("Sidebar!")
st.markdown("Main!")
st.button("Balloons", on_click=show_balloons)
st.button("Double Set Page Config", on_click=double_set_page_config)
st.button("Single Set Page Config", on_click=single_set_page_config)
def show_balloons():
st.balloons() | null |
178,238 | import streamlit as st
st.set_page_config(
page_title="Heya, world?",
page_icon=":shark:",
layout="wide",
initial_sidebar_state="collapsed",
)
st.sidebar.button("Sidebar!")
st.markdown("Main!")
st.button("Balloons", on_click=show_balloons)
st.button("Double Set Page Config", on_click=double_set_page_config)
st.button("Single Set Page Config", on_click=single_set_page_config)
def double_set_page_config():
st.set_page_config(
page_title="Change 1",
page_icon=":shark:",
layout="wide",
initial_sidebar_state="collapsed",
)
st.set_page_config(
page_title="Change 2",
page_icon=":shark:",
layout="wide",
initial_sidebar_state="collapsed",
) | null |
178,239 | import streamlit as st
st.set_page_config(
page_title="Heya, world?",
page_icon=":shark:",
layout="wide",
initial_sidebar_state="collapsed",
)
st.sidebar.button("Sidebar!")
st.markdown("Main!")
st.button("Balloons", on_click=show_balloons)
st.button("Double Set Page Config", on_click=double_set_page_config)
st.button("Single Set Page Config", on_click=single_set_page_config)
def single_set_page_config():
st.set_page_config(
page_title="Change 3",
page_icon=":shark:",
layout="wide",
initial_sidebar_state="collapsed",
) | null |
178,240 | import streamlit as st
st.write(
"""
If this is failing locally, it could be because you have a browser with
Streamlit open. Close it and the test should pass.
"""
)
def cached_write(value):
st.write(value) | null |
178,241 | import streamlit as st
st.write(
"""
If this is failing locally, it could be because you have a browser with
Streamlit open. Close it and the test should pass.
"""
)
def cached_write_nowarn(value):
st.write(value) | null |
178,242 | import streamlit as st
st.write(
"""
If this is failing locally, it could be because you have a browser with
Streamlit open. Close it and the test should pass.
"""
)
def cached_widget(name):
st.button(name) | null |
178,243 | import io
import numpy as np
from PIL import Image, ImageDraw
import streamlit as st
def create_gif(size, frames=1):
# Create grayscale image.
im = Image.new("L", (size, size), "white")
images = []
# Make circle of a constant size with a number of frames, moving across the
# principal diagonal of a 64x64 image. The GIF will not loop and stops
# animating after frames x 100ms.
for i in range(0, frames):
frame = im.copy()
draw = ImageDraw.Draw(frame)
pos = (i, i)
circle_size = size / 2
draw.ellipse([pos, tuple(p + circle_size for p in pos)], "black")
images.append(frame.copy())
# Save the frames as an animated GIF
data = io.BytesIO()
images[0].save(
data,
format="GIF",
save_all=True,
append_images=images[1:],
duration=1,
)
return data.getvalue() | null |
178,244 | import io
import numpy as np
from PIL import Image, ImageDraw
import streamlit as st
img = np.repeat(0, 10000).reshape(100, 100)
def numpy_image():
st.image(img, caption="Black Square with no output format specified", width=100) | null |
178,245 | import io
import numpy as np
from PIL import Image, ImageDraw
import streamlit as st
def svg_image():
st.image(
"""
<svg>
<circle cx="50" cy="50" r="40" stroke="black" stroke-width="3" fill="red" />
</svg>
"""
) | null |
178,246 | import io
import numpy as np
from PIL import Image, ImageDraw
import streamlit as st
gif = create_gif(64, frames=32)
def gif_image():
st.image(gif, width=100) | null |
178,247 | import io
import numpy as np
from PIL import Image, ImageDraw
import streamlit as st
def url_image():
st.image("https://avatars.githubusercontent.com/anoctopus", width=200) | null |
178,248 | import streamlit as st
from streamlit import runtime
def on_checkbox_change(changed_checkbox_number):
if changed_checkbox_number == 1:
st.session_state.checkbox2 = False
elif changed_checkbox_number == 2:
st.session_state.checkbox1 = False | null |
178,249 | import io
from typing import TYPE_CHECKING, Any
import numpy as np
from PIL import Image, ImageDraw
import streamlit as st
def create_gif(size, frames=1):
# Create grayscale image.
im = Image.new("L", (size, size), "white")
images = []
# Make circle of a constant size with a number of frames, moving across the
# principal diagonal of a 64x64 image. The GIF will not loop and stops
# animating after frames x 100ms.
for i in range(0, frames):
frame = im.copy()
draw = ImageDraw.Draw(frame)
pos = (i, i)
circle_size = size / 2
draw.ellipse([pos, tuple(p + circle_size for p in pos)], "black")
images.append(frame.copy())
# Save the frames as an animated GIF
data = io.BytesIO()
images[0].save(
data,
format="GIF",
save_all=True,
append_images=images[1:],
duration=1,
)
return data.getvalue() | null |
178,250 | import streamlit as st
with st.sidebar:
st.text_input("This is a label", key="1")
draw_header_test(True)
st.text("Headers in single st.markdown")
st.text("Headers in multiple st.markdown")
st.text("Headers in columns")
st.text("Headers in columns with other elements above")
st.text("Headers in column beside widget")
st.text("End of page")
def draw_header_test(join_output):
strings = [
"# Header header",
"## Header header",
"### Header header",
"#### Header header",
"##### Header header",
"###### Header header",
"Quisque vel blandit mi. Fusce dignissim leo purus, in imperdiet lectus suscipit nec.",
]
if join_output:
st.write("\n\n".join(strings))
else:
for string in strings:
st.write(string) | null |
178,251 | import streamlit as st
def rerun_record():
return [0] | null |
178,252 | import numpy as np
import pandas as pd
import streamlit as st
The provided code snippet includes necessary dependencies for implementing the `color_negative_red` function. Write a Python function `def color_negative_red(val)` to solve the following problem:
Takes a scalar and returns a string with the css property `'color: red'` for negative strings, black otherwise.
Here is the function:
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = "red" if val < 0 else "black"
return "color: %s" % color | Takes a scalar and returns a string with the css property `'color: red'` for negative strings, black otherwise. |
178,253 | import numpy as np
import pandas as pd
import streamlit as st
np.random.seed(24)
The provided code snippet includes necessary dependencies for implementing the `highlight_max` function. Write a Python function `def highlight_max(data, color="yellow")` to solve the following problem:
highlight the maximum in a Series or DataFrame
Here is the function:
def highlight_max(data, color="yellow"):
"""highlight the maximum in a Series or DataFrame"""
attr = "background-color: {}".format(color)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data == data.max()
return [attr if v else "" for v in is_max]
else: # from .apply(axis=None)
is_max = data == data.max().max()
return pd.DataFrame(
np.where(is_max, attr, ""), index=data.index, columns=data.columns
) | highlight the maximum in a Series or DataFrame |
178,254 | import numpy as np
import streamlit as st
img = np.repeat(0, 10000).reshape(100, 100)
if st.checkbox("show image", True):
image()
def image():
st.image(img) | null |
178,255 | import streamlit as st
from streamlit import runtime
with st.form("foo"):
form_file = st.file_uploader("Inside form:", type=["txt"])
st.form_submit_button("Submit")
if form_file is None:
st.text("No upload")
else:
st.text(form_file.read())
def file_uploader_on_change():
st.session_state.counter += 1 | null |
178,256 | import streamlit as st
st.button("click to rerun")
side_effects = []
st.text(side_effects)
def foo():
side_effects.append("function ran")
r = st.radio("radio", ["foo", "bar", "baz", "qux"], index=1)
return r | null |
178,257 | from datetime import date
import streamlit as st
from streamlit import runtime
st.sidebar.write("Value A:", s1)
st.sidebar.write("Range Value A:", r1)
with st.sidebar.expander("Expander", expanded=True):
s2 = st.slider("Label B", 10000, 25000, 10000)
st.write("Value B:", s2)
r2 = st.slider("Range B", 10000, 25000, [10000, 25000])
st.write("Range Value B:", r2)
st.write("Value 1:", w1)
st.write("Value 2:", w2)
st.write("Value 3:", w3)
st.write("Value 4:", w4)
st.write("Value 5:", w5)
st.write("Value 6:", w6)
st.write("Value 7:", dates[0], dates[1])
def on_change():
st.session_state.slider_changed = True | null |
178,258 | import numpy as np
import pandas as pd
import streamlit as st
from streamlit import runtime
st.write("Value 1:", w1)
st.write("Value 2:", w2)
st.write("Value 3:", w3)
st.write("Value 4:", w4)
st.write("Value 5:", w5)
st.write("Value 6:", w6)
st.write("Value 7:", w7)
with st.expander("Expander", expanded=True):
w9 = st.select_slider(
label="Label 9",
options=["foo", "bar", "baz", "This is a very, very long option"],
value="This is a very, very long option",
)
st.write("Value 9:", w9)
def on_change():
st.session_state.select_slider_changed = True | null |
178,259 | import random
import streamlit as st
from streamlit import config
cache_was_hit = True
if cache_was_hit:
st.warning("You must clear your cache before you run this script!")
st.write(
"""
To clear the cache, press `C` then `Enter`. Then press `R` on this page
to rerun.
"""
)
else:
st.warning(
"""
IMPORTANT: You should test rerunning this script (to get a failing
test), then clearing the cache with the `C` shortcut and checking that
the test passes again.
"""
)
st.subheader("Test that basic caching works")
u = my_func(1, 2, dont_care=10)
v = my_func(1, 2, dont_care=10)
if u == v:
st.success("OK")
else:
st.error("Fail")
st.subheader("Test that when you change arguments it's a cache miss")
v = my_func(10, 2, dont_care=10)
if u != v:
st.success("OK")
else:
st.error("Fail")
st.subheader("Test that when you change **kwargs it's a cache miss")
v = my_func(10, 2, dont_care=100)
if u != v:
st.success("OK")
else:
st.error("Fail")
st.subheader("Test that you can turn off caching")
config.set_option("client.caching", False)
v = my_func(1, 2, dont_care=10)
if u != v:
st.success("OK")
else:
st.error("Fail")
st.subheader("Test that you can turn on caching")
config.set_option("client.caching", True)
# Redefine my_func because the st.cache-decorated function "remembers" the
# config option from when it was declared.
u = my_func(1, 2, dont_care=10)
v = my_func(1, 2, dont_care=10)
if u == v:
st.success("OK")
else:
st.error("Fail")
def check_if_cached():
global cache_was_hit
cache_was_hit = False | null |
178,260 | import random
import streamlit as st
from streamlit import config
def my_func(arg1, arg2=None, *args, **kwargs):
return random.randint(0, 2**32) | null |
178,262 | import os
import streamlit as st
VIDEO_EXTENSIONS = ["mp4", "ogv", "m4v", "webm"]
def get_video_files_in_dir(directory):
out = []
for item in os.listdir(directory):
try:
name, ext = item.split(".")
except:
continue
if name and ext:
if ext in VIDEO_EXTENSIONS:
out.append(item)
return out | null |
178,263 | import os
import streamlit as st
def shorten_vid_option(opt):
return opt.split("/")[-1] | null |
178,264 | import io
import os
import wave
import numpy as np
import streamlit as st
AUDIO_EXTENSIONS = ["wav", "flac", "mp3", "aac", "ogg", "oga", "m4a", "opus", "wma"]
def get_audio_files_in_dir(directory):
out = []
for item in os.listdir(directory):
try:
name, ext = item.split(".")
except:
continue
if name and ext:
if ext in AUDIO_EXTENSIONS:
out.append(item)
return out | null |
178,265 | import io
import os
import wave
import numpy as np
import streamlit as st
def note(freq, length, amp, rate):
t = np.linspace(0, length, length * rate)
data = np.sin(2 * np.pi * freq * t) * amp
return data.astype(np.int16) | null |
178,266 | import io
import os
import wave
import numpy as np
import streamlit as st
def shorten_audio_option(opt):
return opt.split("/")[-1] | null |
178,267 | from __future__ import annotations
import re
import textwrap
from typing import TYPE_CHECKING, Any, Final, cast
from streamlit.errors import StreamlitAPIException
The provided code snippet includes necessary dependencies for implementing the `decode_ascii` function. Write a Python function `def decode_ascii(string: bytes) -> str` to solve the following problem:
Decodes a string as ascii.
Here is the function:
def decode_ascii(string: bytes) -> str:
"""Decodes a string as ascii."""
return string.decode("ascii") | Decodes a string as ascii. |
178,268 | from __future__ import annotations
import re
import textwrap
from typing import TYPE_CHECKING, Any, Final, cast
from streamlit.errors import StreamlitAPIException
def is_emoji(text: str) -> bool:
class StreamlitAPIException(MarkdownFormattedException):
def __repr__(self) -> str:
def validate_emoji(maybe_emoji: str | None) -> str:
if maybe_emoji is None:
return ""
elif is_emoji(maybe_emoji):
return maybe_emoji
else:
raise StreamlitAPIException(
f'The value "{maybe_emoji}" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.'
) | null |
178,269 | from __future__ import annotations
import re
import textwrap
from typing import TYPE_CHECKING, Any, Final, cast
from streamlit.errors import StreamlitAPIException
The provided code snippet includes necessary dependencies for implementing the `max_char_sequence` function. Write a Python function `def max_char_sequence(string: str, char: str) -> int` to solve the following problem:
Returns the count of the max sequence of a given char in a string.
Here is the function:
def max_char_sequence(string: str, char: str) -> int:
"""Returns the count of the max sequence of a given char in a string."""
max_sequence = 0
current_sequence = 0
for c in string:
if c == char:
current_sequence += 1
max_sequence = max(max_sequence, current_sequence)
else:
current_sequence = 0
return max_sequence | Returns the count of the max sequence of a given char in a string. |
178,270 | from __future__ import annotations
import re
import textwrap
from typing import TYPE_CHECKING, Any, Final, cast
from streamlit.errors import StreamlitAPIException
_RE_CONTAINS_HTML: Final = re.compile(r"(?:</[^<]+>)|(?:<[^<]+/>)")
The provided code snippet includes necessary dependencies for implementing the `probably_contains_html_tags` function. Write a Python function `def probably_contains_html_tags(s: str) -> bool` to solve the following problem:
Returns True if the given string contains what seem to be HTML tags. Note that false positives/negatives are possible, so this function should not be used in contexts where complete correctness is required.
Here is the function:
def probably_contains_html_tags(s: str) -> bool:
"""Returns True if the given string contains what seem to be HTML tags.
Note that false positives/negatives are possible, so this function should not be
used in contexts where complete correctness is required."""
return bool(_RE_CONTAINS_HTML.search(s)) | Returns True if the given string contains what seem to be HTML tags. Note that false positives/negatives are possible, so this function should not be used in contexts where complete correctness is required. |
178,271 | from __future__ import annotations
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.scriptrunner import get_script_run_ctx
The provided code snippet includes necessary dependencies for implementing the `post_parent_message` function. Write a Python function `def post_parent_message(message: str) -> None` to solve the following problem:
Sends a string message to the parent window (when host configuration allows).
Here is the function:
def post_parent_message(message: str) -> None:
"""
Sends a string message to the parent window (when host configuration allows).
"""
ctx = get_script_run_ctx()
if ctx is None:
return
fwd_msg = ForwardMsg()
fwd_msg.parent_message.message = message
ctx.enqueue(fwd_msg) | Sends a string message to the parent window (when host configuration allows). |
178,272 | from __future__ import annotations
import os
from typing import Final, NoReturn
import streamlit as st
from streamlit import source_util
from streamlit.deprecation_util import make_deprecated_name_warning
from streamlit.errors import NoSessionContext, StreamlitAPIException
from streamlit.file_util import get_main_script_directory, normalize_path_join
from streamlit.logger import get_logger
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import RerunData, get_script_run_ctx
The provided code snippet includes necessary dependencies for implementing the `stop` function. Write a Python function `def stop() -> NoReturn` to solve the following problem:
Stops execution immediately. Streamlit will not run any statements after `st.stop()`. We recommend rendering a message to explain why the script has stopped. Example ------- >>> import streamlit as st >>> >>> name = st.text_input('Name') >>> if not name: >>> st.warning('Please input a name.') >>> st.stop() >>> st.success('Thank you for inputting a name.')
Here is the function:
def stop() -> NoReturn: # type: ignore[misc]
"""Stops execution immediately.
Streamlit will not run any statements after `st.stop()`.
We recommend rendering a message to explain why the script has stopped.
Example
-------
>>> import streamlit as st
>>>
>>> name = st.text_input('Name')
>>> if not name:
>>> st.warning('Please input a name.')
>>> st.stop()
>>> st.success('Thank you for inputting a name.')
"""
ctx = get_script_run_ctx()
if ctx and ctx.script_requests:
ctx.script_requests.request_stop()
# Force a yield point so the runner can stop
st.empty() | Stops execution immediately. Streamlit will not run any statements after `st.stop()`. We recommend rendering a message to explain why the script has stopped. Example ------- >>> import streamlit as st >>> >>> name = st.text_input('Name') >>> if not name: >>> st.warning('Please input a name.') >>> st.stop() >>> st.success('Thank you for inputting a name.') |
178,273 | from __future__ import annotations
import os
from typing import Final, NoReturn
import streamlit as st
from streamlit import source_util
from streamlit.deprecation_util import make_deprecated_name_warning
from streamlit.errors import NoSessionContext, StreamlitAPIException
from streamlit.file_util import get_main_script_directory, normalize_path_join
from streamlit.logger import get_logger
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import RerunData, get_script_run_ctx
_LOGGER: Final = get_logger(__name__)
def rerun() -> NoReturn: # type: ignore[misc]
"""Rerun the script immediately.
When ``st.rerun()`` is called, the script is halted - no more statements will
be run, and the script will be queued to re-run from the top.
"""
ctx = get_script_run_ctx()
if ctx and ctx.script_requests:
query_string = ctx.query_string
page_script_hash = ctx.page_script_hash
ctx.script_requests.request_rerun(
RerunData(
query_string=query_string,
page_script_hash=page_script_hash,
)
)
# Force a yield point so the runner can do the rerun
st.empty()
def make_deprecated_name_warning(
old_name: str,
new_name: str,
removal_date: str,
extra_message: str | None = None,
include_st_prefix: bool = True,
) -> str:
if include_st_prefix:
old_name = f"st.{old_name}"
new_name = f"st.{new_name}"
return (
f"Please replace `{old_name}` with `{new_name}`.\n\n"
f"`{old_name}` will be removed after {removal_date}."
+ (f"\n\n{extra_message}" if extra_message else "")
)
The provided code snippet includes necessary dependencies for implementing the `experimental_rerun` function. Write a Python function `def experimental_rerun() -> NoReturn` to solve the following problem:
Rerun the script immediately. When ``st.experimental_rerun()`` is called, the script is halted - no more statements will be run, and the script will be queued to re-run from the top.
Here is the function:
def experimental_rerun() -> NoReturn:
"""Rerun the script immediately.
When ``st.experimental_rerun()`` is called, the script is halted - no
more statements will be run, and the script will be queued to re-run
from the top.
"""
msg = make_deprecated_name_warning("experimental_rerun", "rerun", "2024-04-01")
# Log warning before the rerun, or else it would be interrupted
# by the rerun. We do not send a frontend warning because it wouldn't
# be seen.
_LOGGER.warning(msg)
rerun() | Rerun the script immediately. When ``st.experimental_rerun()`` is called, the script is halted - no more statements will be run, and the script will be queued to re-run from the top. |
178,274 | from __future__ import annotations
import os
from typing import Final, NoReturn
import streamlit as st
from streamlit import source_util
from streamlit.deprecation_util import make_deprecated_name_warning
from streamlit.errors import NoSessionContext, StreamlitAPIException
from streamlit.file_util import get_main_script_directory, normalize_path_join
from streamlit.logger import get_logger
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import RerunData, get_script_run_ctx
class NoSessionContext(Error):
pass
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def normalize_path_join(*args):
"""Return the normalized path of the joined path.
Parameters
----------
*args : str
The path components to join.
Returns
-------
str
The normalized path of the joined path.
"""
return os.path.normpath(os.path.join(*args))
def get_main_script_directory(main_script):
"""Return the full path to the main script directory.
Parameters
----------
main_script : str
The main script path. The path can be an absolute path or a relative
path.
Returns
-------
str
The full path to the main script directory.
"""
main_script_path = normalize_path_join(os.getcwd(), main_script)
return os.path.dirname(main_script_path)
The provided code snippet includes necessary dependencies for implementing the `switch_page` function. Write a Python function `def switch_page(page: str) -> NoReturn` to solve the following problem:
Programmatically switch the current page in a multipage app. When ``st.switch_page`` is called, the current page execution stops and the specified page runs as if the user clicked on it in the sidebar navigation. The specified page must be recognized by Streamlit's multipage architecture (your main Python file or a Python file in a ``pages/`` folder). Arbitrary Python scripts cannot be passed to ``st.switch_page``. Parameters ---------- page: str The file path (relative to the main script) of the page to switch to. Example ------- Consider the following example given this file structure: >>> your-repository/ >>> ├── pages/ >>> │ ├── page_1.py >>> │ └── page_2.py >>> └── your_app.py >>> import streamlit as st >>> >>> if st.button("Home"): >>> st.switch_page("your_app.py") >>> if st.button("Page 1"): >>> st.switch_page("pages/page_1.py") >>> if st.button("Page 2"): >>> st.switch_page("pages/page_2.py") .. output :: https://doc-switch-page.streamlit.app/ height: 350px
Here is the function:
def switch_page(page: str) -> NoReturn: # type: ignore[misc]
"""Programmatically switch the current page in a multipage app.
When ``st.switch_page`` is called, the current page execution stops and
the specified page runs as if the user clicked on it in the sidebar
navigation. The specified page must be recognized by Streamlit's multipage
architecture (your main Python file or a Python file in a ``pages/``
folder). Arbitrary Python scripts cannot be passed to ``st.switch_page``.
Parameters
----------
page: str
The file path (relative to the main script) of the page to switch to.
Example
-------
Consider the following example given this file structure:
>>> your-repository/
>>> ├── pages/
>>> │ ├── page_1.py
>>> │ └── page_2.py
>>> └── your_app.py
>>> import streamlit as st
>>>
>>> if st.button("Home"):
>>> st.switch_page("your_app.py")
>>> if st.button("Page 1"):
>>> st.switch_page("pages/page_1.py")
>>> if st.button("Page 2"):
>>> st.switch_page("pages/page_2.py")
.. output ::
https://doc-switch-page.streamlit.app/
height: 350px
"""
ctx = get_script_run_ctx()
if not ctx or not ctx.script_requests:
# This should never be the case
raise NoSessionContext()
main_script_directory = get_main_script_directory(ctx.main_script_path)
requested_page = os.path.realpath(normalize_path_join(main_script_directory, page))
all_app_pages = source_util.get_pages(ctx.main_script_path).values()
matched_pages = [p for p in all_app_pages if p["script_path"] == requested_page]
if len(matched_pages) == 0:
raise StreamlitAPIException(
f"Could not find page: `{page}`. Must be the file path relative to the main script, from the directory: `{os.path.basename(main_script_directory)}`. Only the main app file and files in the `pages/` directory are supported."
)
ctx.script_requests.request_rerun(
RerunData(
query_string=ctx.query_string,
page_script_hash=matched_pages[0]["page_script_hash"],
)
)
# Force a yield point so the runner can do the rerun
st.empty() | Programmatically switch the current page in a multipage app. When ``st.switch_page`` is called, the current page execution stops and the specified page runs as if the user clicked on it in the sidebar navigation. The specified page must be recognized by Streamlit's multipage architecture (your main Python file or a Python file in a ``pages/`` folder). Arbitrary Python scripts cannot be passed to ``st.switch_page``. Parameters ---------- page: str The file path (relative to the main script) of the page to switch to. Example ------- Consider the following example given this file structure: >>> your-repository/ >>> ├── pages/ >>> │ ├── page_1.py >>> │ └── page_2.py >>> └── your_app.py >>> import streamlit as st >>> >>> if st.button("Home"): >>> st.switch_page("your_app.py") >>> if st.button("Page 1"): >>> st.switch_page("pages/page_1.py") >>> if st.button("Page 2"): >>> st.switch_page("pages/page_2.py") .. output :: https://doc-switch-page.streamlit.app/ height: 350px |
178,275 | from __future__ import annotations
import random
from textwrap import dedent
from typing import TYPE_CHECKING, Final, Literal, Mapping, Union, cast
from typing_extensions import TypeAlias
from streamlit.elements import image
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg as ForwardProto
from streamlit.proto.PageConfig_pb2 import PageConfig as PageConfigProto
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.string_util import is_emoji
from streamlit.url_util import is_url
from streamlit.util import lower_clean_dict_keys
PageIcon: TypeAlias = Union[image.AtomicImage, str]
Layout: TypeAlias = Literal["centered", "wide"]
InitialSideBarState: TypeAlias = Literal["auto", "expanded", "collapsed"]
MenuItems: TypeAlias = Mapping[MenuKey, Union[str, None]]
def _get_favicon_string(page_icon: PageIcon) -> str:
"""Return the string to pass to the frontend to have it show
the given PageIcon.
If page_icon is a string that looks like an emoji (or an emoji shortcode),
we return it as-is. Otherwise we use `image_to_url` to return a URL.
(If `image_to_url` raises an error and page_icon is a string, return
the unmodified page_icon string instead of re-raising the error.)
"""
# Choose a random emoji.
if page_icon == "random":
return get_random_emoji()
# If page_icon is an emoji, return it as is.
if isinstance(page_icon, str) and is_emoji(page_icon):
return page_icon
# Fall back to image_to_url.
try:
return image.image_to_url(
page_icon,
width=-1, # Always use full width for favicons
clamp=False,
channels="RGB",
output_format="auto",
image_id="favicon",
)
except Exception:
if isinstance(page_icon, str):
# This fall-thru handles emoji shortcode strings (e.g. ":shark:"),
# which aren't valid filenames and so will cause an Exception from
# `image_to_url`.
return page_icon
raise
def set_menu_items_proto(lowercase_menu_items, menu_items_proto) -> None:
if GET_HELP_KEY in lowercase_menu_items:
if lowercase_menu_items[GET_HELP_KEY] is not None:
menu_items_proto.get_help_url = lowercase_menu_items[GET_HELP_KEY]
else:
menu_items_proto.hide_get_help = True
if REPORT_A_BUG_KEY in lowercase_menu_items:
if lowercase_menu_items[REPORT_A_BUG_KEY] is not None:
menu_items_proto.report_a_bug_url = lowercase_menu_items[REPORT_A_BUG_KEY]
else:
menu_items_proto.hide_report_a_bug = True
if ABOUT_KEY in lowercase_menu_items:
if lowercase_menu_items[ABOUT_KEY] is not None:
menu_items_proto.about_section_md = dedent(lowercase_menu_items[ABOUT_KEY])
def validate_menu_items(menu_items: MenuItems) -> None:
for k, v in menu_items.items():
if not valid_menu_item_key(k):
raise StreamlitAPIException(
"We only accept the keys: "
'"Get help", "Report a bug", and "About" '
f'("{k}" is not a valid key.)'
)
if v is not None and (
not is_url(v, ("http", "https", "mailto")) and k != ABOUT_KEY
):
raise StreamlitAPIException(f'"{v}" is a not a valid URL!')
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def lower_clean_dict_keys(dict: Mapping[_Key, _Value]) -> dict[str, _Value]:
return {k.lower().strip(): v for k, v in dict.items()}
The provided code snippet includes necessary dependencies for implementing the `set_page_config` function. Write a Python function `def set_page_config( page_title: str | None = None, page_icon: PageIcon | None = None, layout: Layout = "centered", initial_sidebar_state: InitialSideBarState = "auto", menu_items: MenuItems | None = None, ) -> None` to solve the following problem:
Configures the default settings of the page. .. note:: This must be the first Streamlit command used on an app page, and must only be set once per page. Parameters ---------- page_title: str or None The page title, shown in the browser tab. If None, defaults to the filename of the script ("app.py" would show "app • Streamlit"). page_icon : Anything supported by st.image or str or None The page favicon. Besides the types supported by `st.image` (like URLs or numpy arrays), you can pass in an emoji as a string ("🦈") or a shortcode (":shark:"). If you're feeling lucky, try "random" for a random emoji! Emoji icons are courtesy of Twemoji and loaded from MaxCDN. layout: "centered" or "wide" How the page content should be laid out. Defaults to "centered", which constrains the elements into a centered column of fixed width; "wide" uses the entire screen. initial_sidebar_state: "auto", "expanded", or "collapsed" How the sidebar should start out. Defaults to "auto", which hides the sidebar on small devices and shows it otherwise. "expanded" shows the sidebar initially; "collapsed" hides it. In most cases, you should just use "auto", otherwise the app will look bad when embedded and viewed on mobile. menu_items: dict Configure the menu that appears on the top-right side of this app. The keys in this dict denote the menu item you'd like to configure: - "Get help": str or None The URL this menu item should point to. If None, hides this menu item. - "Report a Bug": str or None The URL this menu item should point to. If None, hides this menu item. - "About": str or None A markdown string to show in the About dialog. If None, only shows Streamlit's default About text. The URL may also refer to an email address e.g. ``mailto:john@example.com``. Example ------- >>> import streamlit as st >>> >>> st.set_page_config( ... page_title="Ex-stream-ly Cool App", ... page_icon="🧊", ... layout="wide", ... initial_sidebar_state="expanded", ... menu_items={ ... 'Get Help': 'https://www.extremelycoolapp.com/help', ... 'Report a bug': "https://www.extremelycoolapp.com/bug", ... 'About': "# This is a header. This is an *extremely* cool app!" ... } ... )
Here is the function:
def set_page_config(
page_title: str | None = None,
page_icon: PageIcon | None = None,
layout: Layout = "centered",
initial_sidebar_state: InitialSideBarState = "auto",
menu_items: MenuItems | None = None,
) -> None:
"""
Configures the default settings of the page.
.. note::
This must be the first Streamlit command used on an app page, and must only
be set once per page.
Parameters
----------
page_title: str or None
The page title, shown in the browser tab. If None, defaults to the
filename of the script ("app.py" would show "app • Streamlit").
page_icon : Anything supported by st.image or str or None
The page favicon.
Besides the types supported by `st.image` (like URLs or numpy arrays),
you can pass in an emoji as a string ("🦈") or a shortcode (":shark:").
If you're feeling lucky, try "random" for a random emoji!
Emoji icons are courtesy of Twemoji and loaded from MaxCDN.
layout: "centered" or "wide"
How the page content should be laid out. Defaults to "centered",
which constrains the elements into a centered column of fixed width;
"wide" uses the entire screen.
initial_sidebar_state: "auto", "expanded", or "collapsed"
How the sidebar should start out. Defaults to "auto",
which hides the sidebar on small devices and shows it otherwise.
"expanded" shows the sidebar initially; "collapsed" hides it.
In most cases, you should just use "auto", otherwise the app will
look bad when embedded and viewed on mobile.
menu_items: dict
Configure the menu that appears on the top-right side of this app.
The keys in this dict denote the menu item you'd like to configure:
- "Get help": str or None
The URL this menu item should point to.
If None, hides this menu item.
- "Report a Bug": str or None
The URL this menu item should point to.
If None, hides this menu item.
- "About": str or None
A markdown string to show in the About dialog.
If None, only shows Streamlit's default About text.
The URL may also refer to an email address e.g. ``mailto:john@example.com``.
Example
-------
>>> import streamlit as st
>>>
>>> st.set_page_config(
... page_title="Ex-stream-ly Cool App",
... page_icon="🧊",
... layout="wide",
... initial_sidebar_state="expanded",
... menu_items={
... 'Get Help': 'https://www.extremelycoolapp.com/help',
... 'Report a bug': "https://www.extremelycoolapp.com/bug",
... 'About': "# This is a header. This is an *extremely* cool app!"
... }
... )
"""
msg = ForwardProto()
if page_title is not None:
msg.page_config_changed.title = page_title
if page_icon is not None:
msg.page_config_changed.favicon = _get_favicon_string(page_icon)
pb_layout: PageConfigProto.Layout.ValueType
if layout == "centered":
pb_layout = PageConfigProto.CENTERED
elif layout == "wide":
pb_layout = PageConfigProto.WIDE
else:
raise StreamlitAPIException(
f'`layout` must be "centered" or "wide" (got "{layout}")'
)
msg.page_config_changed.layout = pb_layout
pb_sidebar_state: PageConfigProto.SidebarState.ValueType
if initial_sidebar_state == "auto":
pb_sidebar_state = PageConfigProto.AUTO
elif initial_sidebar_state == "expanded":
pb_sidebar_state = PageConfigProto.EXPANDED
elif initial_sidebar_state == "collapsed":
pb_sidebar_state = PageConfigProto.COLLAPSED
else:
raise StreamlitAPIException(
"`initial_sidebar_state` must be "
'"auto" or "expanded" or "collapsed" '
f'(got "{initial_sidebar_state}")'
)
msg.page_config_changed.initial_sidebar_state = pb_sidebar_state
if menu_items is not None:
lowercase_menu_items = cast(MenuItems, lower_clean_dict_keys(menu_items))
validate_menu_items(lowercase_menu_items)
menu_items_proto = msg.page_config_changed.menu_items
set_menu_items_proto(lowercase_menu_items, menu_items_proto)
ctx = get_script_run_ctx()
if ctx is None:
return
ctx.enqueue(msg) | Configures the default settings of the page. .. note:: This must be the first Streamlit command used on an app page, and must only be set once per page. Parameters ---------- page_title: str or None The page title, shown in the browser tab. If None, defaults to the filename of the script ("app.py" would show "app • Streamlit"). page_icon : Anything supported by st.image or str or None The page favicon. Besides the types supported by `st.image` (like URLs or numpy arrays), you can pass in an emoji as a string ("🦈") or a shortcode (":shark:"). If you're feeling lucky, try "random" for a random emoji! Emoji icons are courtesy of Twemoji and loaded from MaxCDN. layout: "centered" or "wide" How the page content should be laid out. Defaults to "centered", which constrains the elements into a centered column of fixed width; "wide" uses the entire screen. initial_sidebar_state: "auto", "expanded", or "collapsed" How the sidebar should start out. Defaults to "auto", which hides the sidebar on small devices and shows it otherwise. "expanded" shows the sidebar initially; "collapsed" hides it. In most cases, you should just use "auto", otherwise the app will look bad when embedded and viewed on mobile. menu_items: dict Configure the menu that appears on the top-right side of this app. The keys in this dict denote the menu item you'd like to configure: - "Get help": str or None The URL this menu item should point to. If None, hides this menu item. - "Report a Bug": str or None The URL this menu item should point to. If None, hides this menu item. - "About": str or None A markdown string to show in the About dialog. If None, only shows Streamlit's default About text. The URL may also refer to an email address e.g. ``mailto:john@example.com``. Example ------- >>> import streamlit as st >>> >>> st.set_page_config( ... page_title="Ex-stream-ly Cool App", ... page_icon="🧊", ... layout="wide", ... initial_sidebar_state="expanded", ... menu_items={ ... 'Get Help': 'https://www.extremelycoolapp.com/help', ... 'Report a bug': "https://www.extremelycoolapp.com/bug", ... 'About': "# This is a header. This is an *extremely* cool app!" ... } ... ) |
178,276 | from __future__ import annotations
import urllib.parse as parse
from typing import Any
from streamlit import util
from streamlit.constants import (
EMBED_OPTIONS_QUERY_PARAM,
EMBED_QUERY_PARAM,
EMBED_QUERY_PARAMS_KEYS,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
EMBED_QUERY_PARAMS_KEYS = [EMBED_QUERY_PARAM, EMBED_OPTIONS_QUERY_PARAM]
The provided code snippet includes necessary dependencies for implementing the `get_query_params` function. Write a Python function `def get_query_params() -> dict[str, list[str]]` to solve the following problem:
Return the query parameters that is currently showing in the browser's URL bar. Returns ------- dict The current query parameters as a dict. "Query parameters" are the part of the URL that comes after the first "?". Example ------- Let's say the user's web browser is at `http://localhost:8501/?show_map=True&selected=asia&selected=america`. Then, you can get the query parameters using the following: >>> import streamlit as st >>> >>> st.experimental_get_query_params() {"show_map": ["True"], "selected": ["asia", "america"]} Note that the values in the returned dict are *always* lists. This is because we internally use Python's urllib.parse.parse_qs(), which behaves this way. And this behavior makes sense when you consider that every item in a query string is potentially a 1-element array.
Here is the function:
def get_query_params() -> dict[str, list[str]]:
"""Return the query parameters that is currently showing in the browser's URL bar.
Returns
-------
dict
The current query parameters as a dict. "Query parameters" are the part of the URL that comes
after the first "?".
Example
-------
Let's say the user's web browser is at
`http://localhost:8501/?show_map=True&selected=asia&selected=america`.
Then, you can get the query parameters using the following:
>>> import streamlit as st
>>>
>>> st.experimental_get_query_params()
{"show_map": ["True"], "selected": ["asia", "america"]}
Note that the values in the returned dict are *always* lists. This is
because we internally use Python's urllib.parse.parse_qs(), which behaves
this way. And this behavior makes sense when you consider that every item
in a query string is potentially a 1-element array.
"""
ctx = get_script_run_ctx()
if ctx is None:
return {}
ctx.mark_experimental_query_params_used()
# Return new query params dict, but without embed, embed_options query params
return util.exclude_keys_in_dict(
parse.parse_qs(ctx.query_string, keep_blank_values=True),
keys_to_exclude=EMBED_QUERY_PARAMS_KEYS,
) | Return the query parameters that is currently showing in the browser's URL bar. Returns ------- dict The current query parameters as a dict. "Query parameters" are the part of the URL that comes after the first "?". Example ------- Let's say the user's web browser is at `http://localhost:8501/?show_map=True&selected=asia&selected=america`. Then, you can get the query parameters using the following: >>> import streamlit as st >>> >>> st.experimental_get_query_params() {"show_map": ["True"], "selected": ["asia", "america"]} Note that the values in the returned dict are *always* lists. This is because we internally use Python's urllib.parse.parse_qs(), which behaves this way. And this behavior makes sense when you consider that every item in a query string is potentially a 1-element array. |
178,277 | from __future__ import annotations
import urllib.parse as parse
from typing import Any
from streamlit import util
from streamlit.constants import (
EMBED_OPTIONS_QUERY_PARAM,
EMBED_QUERY_PARAM,
EMBED_QUERY_PARAMS_KEYS,
)
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner import get_script_run_ctx
def _ensure_no_embed_params(
query_params: dict[str, list[str] | str], query_string: str
) -> str:
"""Ensures there are no embed params set (raises StreamlitAPIException) if there is a try,
also makes sure old param values in query_string are preserved. Returns query_string : str.
"""
# Get query params dict without embed, embed_options params
query_params_without_embed = util.exclude_keys_in_dict(
query_params, keys_to_exclude=EMBED_QUERY_PARAMS_KEYS
)
if query_params != query_params_without_embed:
raise StreamlitAPIException(
"Query param embed and embed_options (case-insensitive) cannot be set using set_query_params method."
)
all_current_params = parse.parse_qs(query_string, keep_blank_values=True)
current_embed_params = parse.urlencode(
{
EMBED_QUERY_PARAM: [
param
for param in util.extract_key_query_params(
all_current_params, param_key=EMBED_QUERY_PARAM
)
],
EMBED_OPTIONS_QUERY_PARAM: [
param
for param in util.extract_key_query_params(
all_current_params, param_key=EMBED_OPTIONS_QUERY_PARAM
)
],
},
doseq=True,
)
query_string = parse.urlencode(query_params, doseq=True)
if query_string:
separator = "&" if current_embed_params else ""
return separator.join([query_string, current_embed_params])
return current_embed_params
The provided code snippet includes necessary dependencies for implementing the `set_query_params` function. Write a Python function `def set_query_params(**query_params: Any) -> None` to solve the following problem:
Set the query parameters that are shown in the browser's URL bar. .. warning:: Query param `embed` cannot be set using this method. Parameters ---------- **query_params : dict The query parameters to set, as key-value pairs. Example ------- To point the user's web browser to something like "http://localhost:8501/?show_map=True&selected=asia&selected=america", you would do the following: >>> import streamlit as st >>> >>> st.experimental_set_query_params( ... show_map=True, ... selected=["asia", "america"], ... )
Here is the function:
def set_query_params(**query_params: Any) -> None:
"""Set the query parameters that are shown in the browser's URL bar.
.. warning::
Query param `embed` cannot be set using this method.
Parameters
----------
**query_params : dict
The query parameters to set, as key-value pairs.
Example
-------
To point the user's web browser to something like
"http://localhost:8501/?show_map=True&selected=asia&selected=america",
you would do the following:
>>> import streamlit as st
>>>
>>> st.experimental_set_query_params(
... show_map=True,
... selected=["asia", "america"],
... )
"""
ctx = get_script_run_ctx()
if ctx is None:
return
ctx.mark_experimental_query_params_used()
msg = ForwardMsg()
msg.page_info_changed.query_string = _ensure_no_embed_params(
query_params, ctx.query_string
)
ctx.query_string = msg.page_info_changed.query_string
ctx.enqueue(msg) | Set the query parameters that are shown in the browser's URL bar. .. warning:: Query param `embed` cannot be set using this method. Parameters ---------- **query_params : dict The query parameters to set, as key-value pairs. Example ------- To point the user's web browser to something like "http://localhost:8501/?show_map=True&selected=asia&selected=america", you would do the following: >>> import streamlit as st >>> >>> st.experimental_set_query_params( ... show_map=True, ... selected=["asia", "america"], ... ) |
178,278 | from __future__ import annotations
import functools
from typing import Any, Callable, Final, TypeVar, cast
import streamlit
from streamlit import config
from streamlit.logger import get_logger
TFunc = TypeVar("TFunc", bound=Callable[..., Any])
def show_deprecation_warning(message: str) -> None:
"""Show a deprecation warning message."""
if _should_show_deprecation_warning_in_browser():
streamlit.warning(message)
# We always log deprecation warnings
_LOGGER.warning(message)
def make_deprecated_name_warning(
old_name: str,
new_name: str,
removal_date: str,
extra_message: str | None = None,
include_st_prefix: bool = True,
) -> str:
if include_st_prefix:
old_name = f"st.{old_name}"
new_name = f"st.{new_name}"
return (
f"Please replace `{old_name}` with `{new_name}`.\n\n"
f"`{old_name}` will be removed after {removal_date}."
+ (f"\n\n{extra_message}" if extra_message else "")
)
The provided code snippet includes necessary dependencies for implementing the `deprecate_func_name` function. Write a Python function `def deprecate_func_name( func: TFunc, old_name: str, removal_date: str, extra_message: str | None = None, name_override: str | None = None, ) -> TFunc` to solve the following problem:
Wrap an `st` function whose name has changed. Wrapped functions will run as normal, but will also show an st.warning saying that the old name will be removed after removal_date. (We generally set `removal_date` to 3 months from the deprecation date.) Parameters ---------- func The `st.` function whose name has changed. old_name The function's deprecated name within __init__.py. removal_date A date like "2020-01-01", indicating the last day we'll guarantee support for the deprecated name. extra_message An optional extra message to show in the deprecation warning. name_override An optional name to use in place of func.__name__.
Here is the function:
def deprecate_func_name(
func: TFunc,
old_name: str,
removal_date: str,
extra_message: str | None = None,
name_override: str | None = None,
) -> TFunc:
"""Wrap an `st` function whose name has changed.
Wrapped functions will run as normal, but will also show an st.warning
saying that the old name will be removed after removal_date.
(We generally set `removal_date` to 3 months from the deprecation date.)
Parameters
----------
func
The `st.` function whose name has changed.
old_name
The function's deprecated name within __init__.py.
removal_date
A date like "2020-01-01", indicating the last day we'll guarantee
support for the deprecated name.
extra_message
An optional extra message to show in the deprecation warning.
name_override
An optional name to use in place of func.__name__.
"""
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
result = func(*args, **kwargs)
show_deprecation_warning(
make_deprecated_name_warning(
old_name, name_override or func.__name__, removal_date, extra_message
)
)
return result
# Update the wrapped func's name & docstring so st.help does the right thing
wrapped_func.__name__ = old_name
wrapped_func.__doc__ = func.__doc__
return cast(TFunc, wrapped_func) | Wrap an `st` function whose name has changed. Wrapped functions will run as normal, but will also show an st.warning saying that the old name will be removed after removal_date. (We generally set `removal_date` to 3 months from the deprecation date.) Parameters ---------- func The `st.` function whose name has changed. old_name The function's deprecated name within __init__.py. removal_date A date like "2020-01-01", indicating the last day we'll guarantee support for the deprecated name. extra_message An optional extra message to show in the deprecation warning. name_override An optional name to use in place of func.__name__. |
178,279 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
def connection_factory(
name: Literal["sql"],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
autocommit: bool = False,
**kwargs,
) -> SQLConnection:
pass | null |
178,280 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
def connection_factory(
name: str,
type: Literal["sql"],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
autocommit: bool = False,
**kwargs,
) -> SQLConnection:
pass | null |
178,281 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
def connection_factory(
name: Literal["snowflake"],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
autocommit: bool = False,
**kwargs,
) -> SnowflakeConnection:
pass | null |
178,282 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
def connection_factory(
name: str,
type: Literal["snowflake"],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
autocommit: bool = False,
**kwargs,
) -> SnowflakeConnection:
pass | null |
178,283 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
def connection_factory(
name: Literal["snowpark"],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
**kwargs,
) -> SnowparkConnection:
pass | null |
178,284 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
def connection_factory(
name: str,
type: Literal["snowpark"],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
**kwargs,
) -> SnowparkConnection:
pass | null |
178,285 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
ConnectionClass = TypeVar("ConnectionClass", bound=BaseConnection[Any])
def connection_factory(
name: str,
type: type[ConnectionClass],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
**kwargs,
) -> ConnectionClass:
pass | null |
178,286 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
def connection_factory(
name: str,
type: str | None = None,
max_entries: int | None = None,
ttl: float | timedelta | None = None,
**kwargs,
) -> BaseConnection[Any]:
pass | null |
178,287 | from __future__ import annotations
import os
import re
from datetime import timedelta
from typing import Any, Final, Literal, TypeVar, overload
from streamlit.connections import (
BaseConnection,
SnowflakeConnection,
SnowparkConnection,
SQLConnection,
)
from streamlit.deprecation_util import deprecate_obj_name
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching import cache_resource
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.secrets import secrets_singleton
FIRST_PARTY_CONNECTIONS = {
"snowflake": SnowflakeConnection,
"snowpark": SnowparkConnection,
"sql": SQLConnection,
}
MODULE_EXTRACTION_REGEX = re.compile(r"No module named \'(.+)\'")
MODULES_TO_PYPI_PACKAGES: Final[dict[str, str]] = {
"MySQLdb": "mysqlclient",
"psycopg2": "psycopg2-binary",
"sqlalchemy": "sqlalchemy",
"snowflake": "snowflake-connector-python",
"snowflake.connector": "snowflake-connector-python",
"snowflake.snowpark": "snowflake-snowpark-python",
}
def _create_connection(
name: str,
connection_class: type[ConnectionClass],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
**kwargs,
) -> ConnectionClass:
"""Create an instance of connection_class with the given name and kwargs.
The weird implementation of this function with the @cache_resource annotated
function defined internally is done to:
* Always @gather_metrics on the call even if the return value is a cached one.
* Allow the user to specify ttl and max_entries when calling st.connection.
"""
def __create_connection(
name: str, connection_class: type[ConnectionClass], **kwargs
) -> ConnectionClass:
return connection_class(connection_name=name, **kwargs)
if not issubclass(connection_class, BaseConnection):
raise StreamlitAPIException(
f"{connection_class} is not a subclass of BaseConnection!"
)
# We modify our helper function's `__qualname__` here to work around default
# `@st.cache_resource` behavior. Otherwise, `st.connection` being called with
# different `ttl` or `max_entries` values will reset the cache with each call.
ttl_str = str(ttl).replace( # Avoid adding extra `.` characters to `__qualname__`
".", "_"
)
__create_connection.__qualname__ = (
f"{__create_connection.__qualname__}_{ttl_str}_{max_entries}"
)
__create_connection = cache_resource(
max_entries=max_entries,
show_spinner="Running `st.connection(...)`.",
ttl=ttl,
)(__create_connection)
return __create_connection(name, connection_class, **kwargs)
def _get_first_party_connection(connection_class: str):
if connection_class in FIRST_PARTY_CONNECTIONS:
return FIRST_PARTY_CONNECTIONS[connection_class]
raise StreamlitAPIException(
f"Invalid connection '{connection_class}'. "
f"Supported connection classes: {FIRST_PARTY_CONNECTIONS}"
)
def deprecate_obj_name(
obj: TObj,
old_name: str,
new_name: str,
removal_date: str,
include_st_prefix: bool = True,
) -> TObj:
"""Wrap an `st` object whose name has changed.
Wrapped objects will behave as normal, but will also show an st.warning
saying that the old name will be removed after `removal_date`.
(We generally set `removal_date` to 3 months from the deprecation date.)
Parameters
----------
obj
The `st.` object whose name has changed.
old_name
The object's deprecated name within __init__.py.
new_name
The object's new name within __init__.py.
removal_date
A date like "2020-01-01", indicating the last day we'll guarantee
support for the deprecated name.
include_st_prefix
If False, does not prefix each of the object names in the deprecation
essage with `st.*`. Defaults to True.
"""
return _create_deprecated_obj_wrapper(
obj,
lambda: show_deprecation_warning(
make_deprecated_name_warning(
old_name, new_name, removal_date, include_st_prefix=include_st_prefix
)
),
)
secrets_singleton: Final = Secrets(SECRETS_FILE_LOCS)
The provided code snippet includes necessary dependencies for implementing the `connection_factory` function. Write a Python function `def connection_factory( name, type=None, max_entries=None, ttl=None, **kwargs, )` to solve the following problem:
Create a new connection to a data store or API, or return an existing one. Config options, credentials, secrets, etc. for connections are taken from various sources: - Any connection-specific configuration files. - An app's ``secrets.toml`` files. - The kwargs passed to this function. Parameters ---------- name : str The connection name used for secrets lookup in ``[connections.<name>]``. Type will be inferred from passing ``"sql"``, ``"snowflake"``, or ``"snowpark"``. type : str, connection class, or None The type of connection to create. It can be a keyword (``"sql"``, ``"snowflake"``, or ``"snowpark"``), a path to an importable class, or an imported class reference. All classes must extend ``st.connections.BaseConnection`` and implement the ``_connect()`` method. If the type kwarg is None, a ``type`` field must be set in the connection's section in ``secrets.toml``. max_entries : int or None The maximum number of connections to keep in the cache, or None for an unbounded cache. (When a new entry is added to a full cache, the oldest cached entry will be removed.) The default is None. ttl : float, timedelta, or None The maximum number of seconds to keep results in the cache, or None if cached results should not expire. The default is None. **kwargs : any Additional connection specific kwargs that are passed to the Connection's ``_connect()`` method. Learn more from the specific Connection's documentation. Returns ------- Connection object An initialized Connection object of the specified type. Examples -------- The easiest way to create a first-party (SQL, Snowflake, or Snowpark) connection is to use their default names and define corresponding sections in your ``secrets.toml`` file. >>> import streamlit as st >>> conn = st.connection("sql") # Config section defined in [connections.sql] in secrets.toml. Creating a SQLConnection with a custom name requires you to explicitly specify the type. If type is not passed as a kwarg, it must be set in the appropriate section of ``secrets.toml``. >>> import streamlit as st >>> conn1 = st.connection("my_sql_connection", type="sql") # Config section defined in [connections.my_sql_connection]. >>> conn2 = st.connection("my_other_sql_connection") # type must be set in [connections.my_other_sql_connection]. Passing the full module path to the connection class that you want to use can be useful, especially when working with a custom connection: >>> import streamlit as st >>> conn = st.connection("my_sql_connection", type="streamlit.connections.SQLConnection") Finally, you can pass the connection class to use directly to this function. Doing so allows static type checking tools such as ``mypy`` to infer the exact return type of ``st.connection``. >>> import streamlit as st >>> from streamlit.connections import SQLConnection >>> conn = st.connection("my_sql_connection", type=SQLConnection)
Here is the function:
def connection_factory(
name,
type=None,
max_entries=None,
ttl=None,
**kwargs,
):
"""Create a new connection to a data store or API, or return an existing one.
Config options, credentials, secrets, etc. for connections are taken from various
sources:
- Any connection-specific configuration files.
- An app's ``secrets.toml`` files.
- The kwargs passed to this function.
Parameters
----------
name : str
The connection name used for secrets lookup in ``[connections.<name>]``.
Type will be inferred from passing ``"sql"``, ``"snowflake"``, or ``"snowpark"``.
type : str, connection class, or None
The type of connection to create. It can be a keyword (``"sql"``, ``"snowflake"``,
or ``"snowpark"``), a path to an importable class, or an imported class reference.
All classes must extend ``st.connections.BaseConnection`` and implement the
``_connect()`` method. If the type kwarg is None, a ``type`` field must be set in
the connection's section in ``secrets.toml``.
max_entries : int or None
The maximum number of connections to keep in the cache, or None
for an unbounded cache. (When a new entry is added to a full cache,
the oldest cached entry will be removed.) The default is None.
ttl : float, timedelta, or None
The maximum number of seconds to keep results in the cache, or
None if cached results should not expire. The default is None.
**kwargs : any
Additional connection specific kwargs that are passed to the Connection's
``_connect()`` method. Learn more from the specific Connection's documentation.
Returns
-------
Connection object
An initialized Connection object of the specified type.
Examples
--------
The easiest way to create a first-party (SQL, Snowflake, or Snowpark) connection is
to use their default names and define corresponding sections in your ``secrets.toml``
file.
>>> import streamlit as st
>>> conn = st.connection("sql") # Config section defined in [connections.sql] in secrets.toml.
Creating a SQLConnection with a custom name requires you to explicitly specify the
type. If type is not passed as a kwarg, it must be set in the appropriate section of
``secrets.toml``.
>>> import streamlit as st
>>> conn1 = st.connection("my_sql_connection", type="sql") # Config section defined in [connections.my_sql_connection].
>>> conn2 = st.connection("my_other_sql_connection") # type must be set in [connections.my_other_sql_connection].
Passing the full module path to the connection class that you want to use can be
useful, especially when working with a custom connection:
>>> import streamlit as st
>>> conn = st.connection("my_sql_connection", type="streamlit.connections.SQLConnection")
Finally, you can pass the connection class to use directly to this function. Doing
so allows static type checking tools such as ``mypy`` to infer the exact return
type of ``st.connection``.
>>> import streamlit as st
>>> from streamlit.connections import SQLConnection
>>> conn = st.connection("my_sql_connection", type=SQLConnection)
"""
USE_ENV_PREFIX = "env:"
if name.startswith(USE_ENV_PREFIX):
# It'd be nice to use str.removeprefix() here, but we won't be able to do that
# until the minimium Python version we support is 3.9.
envvar_name = name[len(USE_ENV_PREFIX) :]
name = os.environ[envvar_name]
if type is None:
if name in FIRST_PARTY_CONNECTIONS:
# We allow users to simply write `st.connection("sql")` instead of
# `st.connection("sql", type="sql")`.
type = _get_first_party_connection(name)
else:
# The user didn't specify a type, so we try to pull it out from their
# secrets.toml file. NOTE: we're okay with any of the dict lookups below
# exploding with a KeyError since, if type isn't explicitly specified here,
# it must be the case that it's defined in secrets.toml and should raise an
# Exception otherwise.
secrets_singleton.load_if_toml_exists()
type = secrets_singleton["connections"][name]["type"]
# type is a nice kwarg name for the st.connection user but is annoying to work with
# since it conflicts with the builtin function name and thus gets syntax
# highlighted.
connection_class = type
if isinstance(connection_class, str):
# We assume that a connection_class specified via string is either the fully
# qualified name of a class (its module and exported classname) or the string
# literal shorthand for one of our first party connections. In the former case,
# connection_class will always contain a "." in its name.
if "." in connection_class:
parts = connection_class.split(".")
classname = parts.pop()
import importlib
connection_module = importlib.import_module(".".join(parts))
connection_class = getattr(connection_module, classname)
else:
connection_class = _get_first_party_connection(connection_class)
# At this point, connection_class should be of type Type[ConnectionClass].
try:
conn = _create_connection(
name, connection_class, max_entries=max_entries, ttl=ttl, **kwargs
)
if isinstance(conn, SnowparkConnection):
conn = deprecate_obj_name(
conn,
'connection("snowpark")',
'connection("snowflake")',
"2024-04-01",
)
return conn
except ModuleNotFoundError as e:
err_string = str(e)
missing_module = re.search(MODULE_EXTRACTION_REGEX, err_string)
extra_info = "You may be missing a dependency required to use this connection."
if missing_module:
pypi_package = MODULES_TO_PYPI_PACKAGES.get(missing_module.group(1))
if pypi_package:
extra_info = f"You need to install the '{pypi_package}' package to use this connection."
raise ModuleNotFoundError(f"{str(e)}. {extra_info}") | Create a new connection to a data store or API, or return an existing one. Config options, credentials, secrets, etc. for connections are taken from various sources: - Any connection-specific configuration files. - An app's ``secrets.toml`` files. - The kwargs passed to this function. Parameters ---------- name : str The connection name used for secrets lookup in ``[connections.<name>]``. Type will be inferred from passing ``"sql"``, ``"snowflake"``, or ``"snowpark"``. type : str, connection class, or None The type of connection to create. It can be a keyword (``"sql"``, ``"snowflake"``, or ``"snowpark"``), a path to an importable class, or an imported class reference. All classes must extend ``st.connections.BaseConnection`` and implement the ``_connect()`` method. If the type kwarg is None, a ``type`` field must be set in the connection's section in ``secrets.toml``. max_entries : int or None The maximum number of connections to keep in the cache, or None for an unbounded cache. (When a new entry is added to a full cache, the oldest cached entry will be removed.) The default is None. ttl : float, timedelta, or None The maximum number of seconds to keep results in the cache, or None if cached results should not expire. The default is None. **kwargs : any Additional connection specific kwargs that are passed to the Connection's ``_connect()`` method. Learn more from the specific Connection's documentation. Returns ------- Connection object An initialized Connection object of the specified type. Examples -------- The easiest way to create a first-party (SQL, Snowflake, or Snowpark) connection is to use their default names and define corresponding sections in your ``secrets.toml`` file. >>> import streamlit as st >>> conn = st.connection("sql") # Config section defined in [connections.sql] in secrets.toml. Creating a SQLConnection with a custom name requires you to explicitly specify the type. If type is not passed as a kwarg, it must be set in the appropriate section of ``secrets.toml``. >>> import streamlit as st >>> conn1 = st.connection("my_sql_connection", type="sql") # Config section defined in [connections.my_sql_connection]. >>> conn2 = st.connection("my_other_sql_connection") # type must be set in [connections.my_other_sql_connection]. Passing the full module path to the connection class that you want to use can be useful, especially when working with a custom connection: >>> import streamlit as st >>> conn = st.connection("my_sql_connection", type="streamlit.connections.SQLConnection") Finally, you can pass the connection class to use directly to this function. Doing so allows static type checking tools such as ``mypy`` to infer the exact return type of ``st.connection``. >>> import streamlit as st >>> from streamlit.connections import SQLConnection >>> conn = st.connection("my_sql_connection", type=SQLConnection) |
178,288 | from __future__ import annotations
import collections
import threading
from typing import Final
from streamlit.logger import get_logger
from streamlit.runtime.media_file_storage import MediaFileKind, MediaFileStorage
The provided code snippet includes necessary dependencies for implementing the `_get_session_id` function. Write a Python function `def _get_session_id() -> str` to solve the following problem:
Get the active AppSession's session_id.
Here is the function:
def _get_session_id() -> str:
"""Get the active AppSession's session_id."""
from streamlit.runtime.scriptrunner import get_script_run_ctx
ctx = get_script_run_ctx()
if ctx is None:
# This is only None when running "python myscript.py" rather than
# "streamlit run myscript.py". In which case the session ID doesn't
# matter and can just be a constant, as there's only ever "session".
return "dontcare"
else:
return ctx.session_id | Get the active AppSession's session_id. |
178,289 | from __future__ import annotations
import itertools
from abc import abstractmethod
from typing import TYPE_CHECKING, NamedTuple, Protocol, runtime_checkable
class CacheStat(NamedTuple):
"""Describes a single cache entry.
Properties
----------
category_name : str
A human-readable name for the cache "category" that the entry belongs
to - e.g. "st.memo", "session_state", etc.
cache_name : str
A human-readable name for cache instance that the entry belongs to.
For "st.memo" and other function decorator caches, this might be the
name of the cached function. If the cache category doesn't have
multiple separate cache instances, this can just be the empty string.
byte_length : int
The entry's memory footprint in bytes.
"""
category_name: str
cache_name: str
byte_length: int
def to_metric_str(self) -> str:
return f'cache_memory_bytes{{cache_type="{self.category_name}",cache="{self.cache_name}"}} {self.byte_length}'
def marshall_metric_proto(self, metric: MetricProto) -> None:
"""Fill an OpenMetrics `Metric` protobuf object."""
label = metric.labels.add()
label.name = "cache_type"
label.value = self.category_name
label = metric.labels.add()
label.name = "cache"
label.value = self.cache_name
metric_point = metric.metric_points.add()
metric_point.gauge_value.int_value = self.byte_length
The provided code snippet includes necessary dependencies for implementing the `group_stats` function. Write a Python function `def group_stats(stats: list[CacheStat]) -> list[CacheStat]` to solve the following problem:
Group a list of CacheStats by category_name and cache_name and sum byte_length
Here is the function:
def group_stats(stats: list[CacheStat]) -> list[CacheStat]:
"""Group a list of CacheStats by category_name and cache_name and sum byte_length"""
def key_function(individual_stat):
return individual_stat.category_name, individual_stat.cache_name
result: list[CacheStat] = []
sorted_stats = sorted(stats, key=key_function)
grouped_stats = itertools.groupby(sorted_stats, key=key_function)
for (category_name, cache_name), single_group_stats in grouped_stats:
result.append(
CacheStat(
category_name=category_name,
cache_name=cache_name,
byte_length=sum(map(lambda item: item.byte_length, single_group_stats)),
)
)
return result | Group a list of CacheStats by category_name and cache_name and sum byte_length |
178,290 | from __future__ import annotations
import hashlib
from typing import TYPE_CHECKING, Final, MutableMapping
from weakref import WeakKeyDictionary
from streamlit import config, util
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
from streamlit.util import HASHLIB_KWARGS
def populate_hash_if_needed(msg: ForwardMsg) -> str:
"""Computes and assigns the unique hash for a ForwardMsg.
If the ForwardMsg already has a hash, this is a no-op.
Parameters
----------
msg : ForwardMsg
Returns
-------
string
The message's hash, returned here for convenience. (The hash
will also be assigned to the ForwardMsg; callers do not need
to do this.)
"""
if msg.hash == "":
# Move the message's metadata aside. It's not part of the
# hash calculation.
metadata = msg.metadata
msg.ClearField("metadata")
# MD5 is good enough for what we need, which is uniqueness.
hasher = hashlib.md5(**HASHLIB_KWARGS)
hasher.update(msg.SerializeToString())
msg.hash = hasher.hexdigest()
# Restore metadata.
msg.metadata.CopyFrom(metadata)
return msg.hash
The provided code snippet includes necessary dependencies for implementing the `create_reference_msg` function. Write a Python function `def create_reference_msg(msg: ForwardMsg) -> ForwardMsg` to solve the following problem:
Create a ForwardMsg that refers to the given message via its hash. The reference message will also get a copy of the source message's metadata. Parameters ---------- msg : ForwardMsg The ForwardMsg to create the reference to. Returns ------- ForwardMsg A new ForwardMsg that "points" to the original message via the ref_hash field.
Here is the function:
def create_reference_msg(msg: ForwardMsg) -> ForwardMsg:
"""Create a ForwardMsg that refers to the given message via its hash.
The reference message will also get a copy of the source message's
metadata.
Parameters
----------
msg : ForwardMsg
The ForwardMsg to create the reference to.
Returns
-------
ForwardMsg
A new ForwardMsg that "points" to the original message via the
ref_hash field.
"""
ref_msg = ForwardMsg()
ref_msg.ref_hash = populate_hash_if_needed(msg)
ref_msg.metadata.CopyFrom(msg.metadata)
return ref_msg | Create a ForwardMsg that refers to the given message via its hash. The reference message will also get a copy of the source message's metadata. Parameters ---------- msg : ForwardMsg The ForwardMsg to create the reference to. Returns ------- ForwardMsg A new ForwardMsg that "points" to the original message via the ref_hash field. |
178,291 | from __future__ import annotations
import asyncio
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Callable, Final
import streamlit.elements.exception as exception_utils
from streamlit import config, runtime, source_util
from streamlit.case_converters import to_snake_case
from streamlit.logger import get_logger
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.Common_pb2 import FileURLs, FileURLsRequest
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.GitInfo_pb2 import GitInfo
from streamlit.proto.NewSession_pb2 import (
Config,
CustomThemeConfig,
NewSession,
UserInfo,
)
from streamlit.proto.PagesChanged_pb2 import PagesChanged
from streamlit.runtime import caching, legacy_caching
from streamlit.runtime.forward_msg_queue import ForwardMsgQueue
from streamlit.runtime.metrics_util import Installation
from streamlit.runtime.script_data import ScriptData
from streamlit.runtime.scriptrunner import RerunData, ScriptRunner, ScriptRunnerEvent
from streamlit.runtime.scriptrunner.script_cache import ScriptCache
from streamlit.runtime.secrets import secrets_singleton
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
from streamlit.version import STREAMLIT_VERSION_STRING
from streamlit.watcher import LocalSourcesWatcher
The provided code snippet includes necessary dependencies for implementing the `_generate_scriptrun_id` function. Write a Python function `def _generate_scriptrun_id() -> str` to solve the following problem:
Randomly generate a unique ID for a script execution.
Here is the function:
def _generate_scriptrun_id() -> str:
"""Randomly generate a unique ID for a script execution."""
return str(uuid.uuid4()) | Randomly generate a unique ID for a script execution. |
178,292 | from __future__ import annotations
import asyncio
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Callable, Final
import streamlit.elements.exception as exception_utils
from streamlit import config, runtime, source_util
from streamlit.case_converters import to_snake_case
from streamlit.logger import get_logger
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.Common_pb2 import FileURLs, FileURLsRequest
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.GitInfo_pb2 import GitInfo
from streamlit.proto.NewSession_pb2 import (
Config,
CustomThemeConfig,
NewSession,
UserInfo,
)
from streamlit.proto.PagesChanged_pb2 import PagesChanged
from streamlit.runtime import caching, legacy_caching
from streamlit.runtime.forward_msg_queue import ForwardMsgQueue
from streamlit.runtime.metrics_util import Installation
from streamlit.runtime.script_data import ScriptData
from streamlit.runtime.scriptrunner import RerunData, ScriptRunner, ScriptRunnerEvent
from streamlit.runtime.scriptrunner.script_cache import ScriptCache
from streamlit.runtime.secrets import secrets_singleton
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
from streamlit.version import STREAMLIT_VERSION_STRING
from streamlit.watcher import LocalSourcesWatcher
def _get_toolbar_mode() -> Config.ToolbarMode.ValueType:
config_key = "client.toolbarMode"
config_value = config.get_option(config_key)
enum_value: Config.ToolbarMode.ValueType | None = getattr(
Config.ToolbarMode, config_value.upper()
)
if enum_value is None:
allowed_values = ", ".join(k.lower() for k in Config.ToolbarMode.keys())
raise ValueError(
f"Config {config_key!r} expects to have one of "
f"the following values: {allowed_values}. "
f"Current value: {config_value}"
)
return enum_value
def _populate_config_msg(msg: Config) -> None:
msg.gather_usage_stats = config.get_option("browser.gatherUsageStats")
msg.max_cached_message_age = config.get_option("global.maxCachedMessageAge")
msg.allow_run_on_save = config.get_option("server.allowRunOnSave")
msg.hide_top_bar = config.get_option("ui.hideTopBar")
# ui.hideSidebarNav is deprecated, will be removed in the future
msg.hide_sidebar_nav = config.get_option("ui.hideSidebarNav")
if config.get_option("client.showSidebarNavigation") == False:
msg.hide_sidebar_nav = True
msg.toolbar_mode = _get_toolbar_mode() | null |
178,293 | from __future__ import annotations
import asyncio
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Callable, Final
import streamlit.elements.exception as exception_utils
from streamlit import config, runtime, source_util
from streamlit.case_converters import to_snake_case
from streamlit.logger import get_logger
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.Common_pb2 import FileURLs, FileURLsRequest
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.GitInfo_pb2 import GitInfo
from streamlit.proto.NewSession_pb2 import (
Config,
CustomThemeConfig,
NewSession,
UserInfo,
)
from streamlit.proto.PagesChanged_pb2 import PagesChanged
from streamlit.runtime import caching, legacy_caching
from streamlit.runtime.forward_msg_queue import ForwardMsgQueue
from streamlit.runtime.metrics_util import Installation
from streamlit.runtime.script_data import ScriptData
from streamlit.runtime.scriptrunner import RerunData, ScriptRunner, ScriptRunnerEvent
from streamlit.runtime.scriptrunner.script_cache import ScriptCache
from streamlit.runtime.secrets import secrets_singleton
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
from streamlit.version import STREAMLIT_VERSION_STRING
from streamlit.watcher import LocalSourcesWatcher
_LOGGER: Final = get_logger(__name__)
def to_snake_case(camel_case_str: str) -> str:
"""Converts UpperCamelCase and lowerCamelCase to snake_case.
Examples
--------
fooBar -> foo_bar
BazBang -> baz_bang
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_case_str)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def _populate_theme_msg(msg: CustomThemeConfig) -> None:
enum_encoded_options = {"base", "font"}
theme_opts = config.get_options_for_section("theme")
if not any(theme_opts.values()):
return
for option_name, option_val in theme_opts.items():
if option_name not in enum_encoded_options and option_val is not None:
setattr(msg, to_snake_case(option_name), option_val)
# NOTE: If unset, base and font will default to the protobuf enum zero
# values, which are BaseTheme.LIGHT and FontFamily.SANS_SERIF,
# respectively. This is why we both don't handle the cases explicitly and
# also only log a warning when receiving invalid base/font options.
base_map = {
"light": msg.BaseTheme.LIGHT,
"dark": msg.BaseTheme.DARK,
}
base = theme_opts["base"]
if base is not None:
if base not in base_map:
_LOGGER.warning(
f'"{base}" is an invalid value for theme.base.'
f" Allowed values include {list(base_map.keys())}."
' Setting theme.base to "light".'
)
else:
msg.base = base_map[base]
font_map = {
"sans serif": msg.FontFamily.SANS_SERIF,
"serif": msg.FontFamily.SERIF,
"monospace": msg.FontFamily.MONOSPACE,
}
font = theme_opts["font"]
if font is not None:
if font not in font_map:
_LOGGER.warning(
f'"{font}" is an invalid value for theme.font.'
f" Allowed values include {list(font_map.keys())}."
' Setting theme.font to "sans serif".'
)
else:
msg.font = font_map[font] | null |
178,294 | from __future__ import annotations
import asyncio
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Callable, Final
import streamlit.elements.exception as exception_utils
from streamlit import config, runtime, source_util
from streamlit.case_converters import to_snake_case
from streamlit.logger import get_logger
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.Common_pb2 import FileURLs, FileURLsRequest
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.GitInfo_pb2 import GitInfo
from streamlit.proto.NewSession_pb2 import (
Config,
CustomThemeConfig,
NewSession,
UserInfo,
)
from streamlit.proto.PagesChanged_pb2 import PagesChanged
from streamlit.runtime import caching, legacy_caching
from streamlit.runtime.forward_msg_queue import ForwardMsgQueue
from streamlit.runtime.metrics_util import Installation
from streamlit.runtime.script_data import ScriptData
from streamlit.runtime.scriptrunner import RerunData, ScriptRunner, ScriptRunnerEvent
from streamlit.runtime.scriptrunner.script_cache import ScriptCache
from streamlit.runtime.secrets import secrets_singleton
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
from streamlit.version import STREAMLIT_VERSION_STRING
from streamlit.watcher import LocalSourcesWatcher
class Installation:
_instance_lock = threading.Lock()
_instance: Installation | None = None
def instance(cls) -> Installation:
"""Returns the singleton Installation"""
# We use a double-checked locking optimization to avoid the overhead
# of acquiring the lock in the common case:
# https://en.wikipedia.org/wiki/Double-checked_locking
if cls._instance is None:
with cls._instance_lock:
if cls._instance is None:
cls._instance = Installation()
return cls._instance
def __init__(self):
self.installation_id_v3 = str(
uuid.uuid5(uuid.NAMESPACE_DNS, _get_machine_id_v3())
)
def __repr__(self) -> str:
return util.repr_(self)
def installation_id(self):
return self.installation_id_v3
def _populate_user_info_msg(msg: UserInfo) -> None:
msg.installation_id = Installation.instance().installation_id
msg.installation_id_v3 = Installation.instance().installation_id_v3 | null |
178,295 | from __future__ import annotations
import asyncio
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Callable, Final
import streamlit.elements.exception as exception_utils
from streamlit import config, runtime, source_util
from streamlit.case_converters import to_snake_case
from streamlit.logger import get_logger
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.Common_pb2 import FileURLs, FileURLsRequest
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.GitInfo_pb2 import GitInfo
from streamlit.proto.NewSession_pb2 import (
Config,
CustomThemeConfig,
NewSession,
UserInfo,
)
from streamlit.proto.PagesChanged_pb2 import PagesChanged
from streamlit.runtime import caching, legacy_caching
from streamlit.runtime.forward_msg_queue import ForwardMsgQueue
from streamlit.runtime.metrics_util import Installation
from streamlit.runtime.script_data import ScriptData
from streamlit.runtime.scriptrunner import RerunData, ScriptRunner, ScriptRunnerEvent
from streamlit.runtime.scriptrunner.script_cache import ScriptCache
from streamlit.runtime.secrets import secrets_singleton
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
from streamlit.version import STREAMLIT_VERSION_STRING
from streamlit.watcher import LocalSourcesWatcher
def _populate_app_pages(msg: NewSession | PagesChanged, main_script_path: str) -> None:
for page_script_hash, page_info in source_util.get_pages(main_script_path).items():
page_proto = msg.app_pages.add()
page_proto.page_script_hash = page_script_hash
page_proto.page_name = page_info["page_name"]
page_proto.icon = page_info["icon"] | null |
178,296 | from __future__ import annotations
import contextlib
import inspect
import os
import sys
import threading
import time
import uuid
from collections.abc import Sized
from functools import wraps
from typing import Any, Callable, Final, TypeVar, cast, overload
from streamlit import config, util
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.PageProfile_pb2 import Argument, Command
_ETC_MACHINE_ID_PATH = "/etc/machine-id"
_DBUS_MACHINE_ID_PATH = "/var/lib/dbus/machine-id"
The provided code snippet includes necessary dependencies for implementing the `_get_machine_id_v3` function. Write a Python function `def _get_machine_id_v3() -> str` to solve the following problem:
Get the machine ID This is a unique identifier for a user for tracking metrics in Segment, that is broken in different ways in some Linux distros and Docker images. - at times just a hash of '', which means many machines map to the same ID - at times a hash of the same string, when running in a Docker container
Here is the function:
def _get_machine_id_v3() -> str:
"""Get the machine ID
This is a unique identifier for a user for tracking metrics in Segment,
that is broken in different ways in some Linux distros and Docker images.
- at times just a hash of '', which means many machines map to the same ID
- at times a hash of the same string, when running in a Docker container
"""
machine_id = str(uuid.getnode())
if os.path.isfile(_ETC_MACHINE_ID_PATH):
with open(_ETC_MACHINE_ID_PATH) as f:
machine_id = f.read()
elif os.path.isfile(_DBUS_MACHINE_ID_PATH):
with open(_DBUS_MACHINE_ID_PATH) as f:
machine_id = f.read()
return machine_id | Get the machine ID This is a unique identifier for a user for tracking metrics in Segment, that is broken in different ways in some Linux distros and Docker images. - at times just a hash of '', which means many machines map to the same ID - at times a hash of the same string, when running in a Docker container |
178,297 | from __future__ import annotations
import contextlib
import inspect
import os
import sys
import threading
import time
import uuid
from collections.abc import Sized
from functools import wraps
from typing import Any, Callable, Final, TypeVar, cast, overload
from streamlit import config, util
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.PageProfile_pb2 import Argument, Command
F = TypeVar("F", bound=Callable[..., Any])
def gather_metrics(
name: str,
func: F,
) -> F:
... | null |
178,298 | from __future__ import annotations
import contextlib
import inspect
import os
import sys
import threading
import time
import uuid
from collections.abc import Sized
from functools import wraps
from typing import Any, Callable, Final, TypeVar, cast, overload
from streamlit import config, util
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.PageProfile_pb2 import Argument, Command
F = TypeVar("F", bound=Callable[..., Any])
def gather_metrics(
name: str,
func: None = None,
) -> Callable[[F], F]:
... | null |
178,299 | from __future__ import annotations
import contextlib
import inspect
import os
import sys
import threading
import time
import uuid
from collections.abc import Sized
from functools import wraps
from typing import Any, Callable, Final, TypeVar, cast, overload
from streamlit import config, util
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.PageProfile_pb2 import Argument, Command
_LOGGER: Final = get_logger(__name__)
_MAX_TRACKED_COMMANDS: Final = 200
_MAX_TRACKED_PER_COMMAND: Final = 25
def _get_command_telemetry(
_command_func: Callable[..., Any], _command_name: str, *args, **kwargs
) -> Command:
"""Get telemetry information for the given callable and its arguments."""
arg_keywords = inspect.getfullargspec(_command_func).args
self_arg: Any | None = None
arguments: list[Argument] = []
is_method = inspect.ismethod(_command_func)
name = _command_name
for i, arg in enumerate(args):
pos = i
if is_method:
# If func is a method, ignore the first argument (self)
i = i + 1
keyword = arg_keywords[i] if len(arg_keywords) > i else f"{i}"
if keyword == "self":
self_arg = arg
continue
argument = Argument(k=keyword, t=_get_type_name(arg), p=pos)
arg_metadata = _get_arg_metadata(arg)
if arg_metadata:
argument.m = arg_metadata
arguments.append(argument)
for kwarg, kwarg_value in kwargs.items():
argument = Argument(k=kwarg, t=_get_type_name(kwarg_value))
arg_metadata = _get_arg_metadata(kwarg_value)
if arg_metadata:
argument.m = arg_metadata
arguments.append(argument)
top_level_module = _get_top_level_module(_command_func)
if top_level_module != "streamlit":
# If the gather_metrics decorator is used outside of streamlit library
# we enforce a prefix to be added to the tracked command:
name = f"external:{top_level_module}:{name}"
if (
name == "create_instance"
and self_arg
and hasattr(self_arg, "name")
and self_arg.name
):
name = f"component:{self_arg.name}"
return Command(name=name, args=arguments)
def to_microseconds(seconds: float) -> int:
"""Convert seconds into microseconds."""
return int(seconds * 1_000_000)
F = TypeVar("F", bound=Callable[..., Any])
class RerunException(ScriptControlException):
"""Silently stop and rerun the user's script."""
def __init__(self, rerun_data: RerunData):
"""Construct a RerunException
Parameters
----------
rerun_data : RerunData
The RerunData that should be used to rerun the script
"""
self.rerun_data = rerun_data
def __repr__(self) -> str:
return util.repr_(self)
The provided code snippet includes necessary dependencies for implementing the `gather_metrics` function. Write a Python function `def gather_metrics(name: str, func: F | None = None) -> Callable[[F], F] | F` to solve the following problem:
Function decorator to add telemetry tracking to commands. Parameters ---------- func : callable The function to track for telemetry. name : str or None Overwrite the function name with a custom name that is used for telemetry tracking. Example ------- >>> @st.gather_metrics ... def my_command(url): ... return url >>> @st.gather_metrics(name="custom_name") ... def my_command(url): ... return url
Here is the function:
def gather_metrics(name: str, func: F | None = None) -> Callable[[F], F] | F:
"""Function decorator to add telemetry tracking to commands.
Parameters
----------
func : callable
The function to track for telemetry.
name : str or None
Overwrite the function name with a custom name that is used for telemetry tracking.
Example
-------
>>> @st.gather_metrics
... def my_command(url):
... return url
>>> @st.gather_metrics(name="custom_name")
... def my_command(url):
... return url
"""
if not name:
_LOGGER.warning("gather_metrics: name is empty")
name = "undefined"
if func is None:
# Support passing the params via function decorator
def wrapper(f: F) -> F:
return gather_metrics(
name=name,
func=f,
)
return wrapper
else:
# To make mypy type narrow F | None -> F
non_optional_func = func
@wraps(non_optional_func)
def wrapped_func(*args, **kwargs):
from timeit import default_timer as timer
exec_start = timer()
# Local imports to prevent circular dependencies
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.runtime.scriptrunner.script_runner import RerunException
ctx = get_script_run_ctx(suppress_warning=True)
tracking_activated = (
ctx is not None
and ctx.gather_usage_stats
and not ctx.command_tracking_deactivated
and len(ctx.tracked_commands)
< _MAX_TRACKED_COMMANDS # Prevent too much memory usage
)
command_telemetry: Command | None = None
if ctx and tracking_activated:
try:
command_telemetry = _get_command_telemetry(
non_optional_func, name, *args, **kwargs
)
if (
command_telemetry.name not in ctx.tracked_commands_counter
or ctx.tracked_commands_counter[command_telemetry.name]
< _MAX_TRACKED_PER_COMMAND
):
ctx.tracked_commands.append(command_telemetry)
ctx.tracked_commands_counter.update([command_telemetry.name])
# Deactivate tracking to prevent calls inside already tracked commands
ctx.command_tracking_deactivated = True
except Exception as ex:
# Always capture all exceptions since we want to make sure that
# the telemetry never causes any issues.
_LOGGER.debug("Failed to collect command telemetry", exc_info=ex)
try:
result = non_optional_func(*args, **kwargs)
except RerunException as ex:
# Duplicated from below, because static analysis tools get confused
# by deferring the rethrow.
if tracking_activated and command_telemetry:
command_telemetry.time = to_microseconds(timer() - exec_start)
raise ex
finally:
# Activate tracking again if command executes without any exceptions
if ctx:
ctx.command_tracking_deactivated = False
if tracking_activated and command_telemetry:
# Set the execution time to the measured value
command_telemetry.time = to_microseconds(timer() - exec_start)
return result
with contextlib.suppress(AttributeError):
# Make this a well-behaved decorator by preserving important function
# attributes.
wrapped_func.__dict__.update(non_optional_func.__dict__)
wrapped_func.__signature__ = inspect.signature(non_optional_func) # type: ignore
return cast(F, wrapped_func) | Function decorator to add telemetry tracking to commands. Parameters ---------- func : callable The function to track for telemetry. name : str or None Overwrite the function name with a custom name that is used for telemetry tracking. Example ------- >>> @st.gather_metrics ... def my_command(url): ... return url >>> @st.gather_metrics(name="custom_name") ... def my_command(url): ... return url |
178,300 | from __future__ import annotations
import contextlib
import inspect
import os
import sys
import threading
import time
import uuid
from collections.abc import Sized
from functools import wraps
from typing import Any, Callable, Final, TypeVar, cast, overload
from streamlit import config, util
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.PageProfile_pb2 import Argument, Command
_ATTRIBUTIONS_TO_CHECK: Final = [
# DB Clients:
"pymysql",
"MySQLdb",
"mysql",
"pymongo",
"ibis",
"boto3",
"psycopg2",
"psycopg3",
"sqlalchemy",
"elasticsearch",
"pyodbc",
"pymssql",
"cassandra",
"azure",
"redis",
"sqlite3",
"neo4j",
"duckdb",
"opensearchpy",
"supabase",
# Dataframe Libraries:
"polars",
"dask",
"vaex",
"modin",
"pyspark",
"cudf",
"xarray",
"ray",
# ML & LLM Tools:
"mistralai",
"openai",
"langchain",
"llama_index",
"llama_cpp",
"anthropic",
"pyllamacpp",
"cohere",
"transformers",
"nomic",
"diffusers",
"semantic_kernel",
"replicate",
"huggingface_hub",
"wandb",
"torch",
"tensorflow",
"trubrics",
"comet_ml",
"clarifai",
"reka",
"hegel",
"fastchat",
"assemblyai",
"openllm",
"embedchain",
"haystack",
"vllm",
"alpa",
"jinaai",
"guidance",
"litellm",
"comet_llm",
"instructor",
# Workflow Tools:
"prefect",
"luigi",
"airflow",
"dagster",
# Vector Stores:
"pgvector",
"faiss",
"annoy",
"pinecone",
"chromadb",
"weaviate",
"qdrant_client",
"pymilvus",
"lancedb",
# Others:
"datasets",
"snowflake",
"streamlit_extras",
"streamlit_pydantic",
"pydantic",
"plost",
]
The provided code snippet includes necessary dependencies for implementing the `create_page_profile_message` function. Write a Python function `def create_page_profile_message( commands: list[Command], exec_time: int, prep_time: int, uncaught_exception: str | None = None, ) -> ForwardMsg` to solve the following problem:
Create and return the full PageProfile ForwardMsg.
Here is the function:
def create_page_profile_message(
commands: list[Command],
exec_time: int,
prep_time: int,
uncaught_exception: str | None = None,
) -> ForwardMsg:
"""Create and return the full PageProfile ForwardMsg."""
msg = ForwardMsg()
msg.page_profile.commands.extend(commands)
msg.page_profile.exec_time = exec_time
msg.page_profile.prep_time = prep_time
msg.page_profile.headless = config.get_option("server.headless")
# Collect all config options that have been manually set
config_options: set[str] = set()
if config._config_options:
for option_name in config._config_options.keys():
if not config.is_manually_set(option_name):
# We only care about manually defined options
continue
config_option = config._config_options[option_name]
if config_option.is_default:
option_name = f"{option_name}:default"
config_options.add(option_name)
msg.page_profile.config.extend(config_options)
# Check the predefined set of modules for attribution
attributions: set[str] = {
attribution
for attribution in _ATTRIBUTIONS_TO_CHECK
if attribution in sys.modules
}
msg.page_profile.os = str(sys.platform)
msg.page_profile.timezone = str(time.tzname)
msg.page_profile.attributions.extend(attributions)
if uncaught_exception:
msg.page_profile.uncaught_exception = uncaught_exception
return msg | Create and return the full PageProfile ForwardMsg. |
178,301 | from __future__ import annotations
import contextlib
import functools
import hashlib
import inspect
import math
import os
import pickle
import shutil
import threading
import time
from collections import namedtuple
from dataclasses import dataclass
from typing import Any, Callable, Final, Iterator, TypeVar, cast, overload
from cachetools import TTLCache
import streamlit as st
from streamlit import config, file_util, util
from streamlit.deprecation_util import show_deprecation_warning
from streamlit.elements.spinner import spinner
from streamlit.error_util import handle_uncaught_app_exception
from streamlit.errors import StreamlitAPIWarning
from streamlit.logger import get_logger
from streamlit.runtime.caching import CACHE_DOCS_URL
from streamlit.runtime.caching.cache_type import CacheType, get_decorator_api_name
from streamlit.runtime.legacy_caching.hashing import (
HashFuncsDict,
HashReason,
update_hash,
)
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.stats import CacheStat, CacheStatsProvider
from streamlit.util import HASHLIB_KWARGS
_cache_info = ThreadLocalCacheInfo()
def _show_cached_st_function_warning(
dg: st.delta_generator.DeltaGenerator,
st_func_name: str,
cached_func: Callable[..., Any],
) -> None:
# Avoid infinite recursion by suppressing additional cached
# function warnings from within the cached function warning.
with suppress_cached_st_function_warning():
e = CachedStFunctionWarning(st_func_name, cached_func)
dg.exception(e)
The provided code snippet includes necessary dependencies for implementing the `maybe_show_cached_st_function_warning` function. Write a Python function `def maybe_show_cached_st_function_warning( dg: st.delta_generator.DeltaGenerator, st_func_name: str ) -> None` to solve the following problem:
If appropriate, warn about calling st.foo inside @cache. DeltaGenerator's @_with_element and @_widget wrappers use this to warn the user when they're calling st.foo() from within a function that is wrapped in @st.cache. Parameters ---------- dg : DeltaGenerator The DeltaGenerator to publish the warning to. st_func_name : str The name of the Streamlit function that was called.
Here is the function:
def maybe_show_cached_st_function_warning(
dg: st.delta_generator.DeltaGenerator, st_func_name: str
) -> None:
"""If appropriate, warn about calling st.foo inside @cache.
DeltaGenerator's @_with_element and @_widget wrappers use this to warn
the user when they're calling st.foo() from within a function that is
wrapped in @st.cache.
Parameters
----------
dg : DeltaGenerator
The DeltaGenerator to publish the warning to.
st_func_name : str
The name of the Streamlit function that was called.
"""
if (
len(_cache_info.cached_func_stack) > 0
and _cache_info.suppress_st_function_warning <= 0
):
cached_func = _cache_info.cached_func_stack[-1]
_show_cached_st_function_warning(dg, st_func_name, cached_func) | If appropriate, warn about calling st.foo inside @cache. DeltaGenerator's @_with_element and @_widget wrappers use this to warn the user when they're calling st.foo() from within a function that is wrapped in @st.cache. Parameters ---------- dg : DeltaGenerator The DeltaGenerator to publish the warning to. st_func_name : str The name of the Streamlit function that was called. |
178,302 | from __future__ import annotations
import contextlib
import functools
import hashlib
import inspect
import math
import os
import pickle
import shutil
import threading
import time
from collections import namedtuple
from dataclasses import dataclass
from typing import Any, Callable, Final, Iterator, TypeVar, cast, overload
from cachetools import TTLCache
import streamlit as st
from streamlit import config, file_util, util
from streamlit.deprecation_util import show_deprecation_warning
from streamlit.elements.spinner import spinner
from streamlit.error_util import handle_uncaught_app_exception
from streamlit.errors import StreamlitAPIWarning
from streamlit.logger import get_logger
from streamlit.runtime.caching import CACHE_DOCS_URL
from streamlit.runtime.caching.cache_type import CacheType, get_decorator_api_name
from streamlit.runtime.legacy_caching.hashing import (
HashFuncsDict,
HashReason,
update_hash,
)
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.stats import CacheStat, CacheStatsProvider
from streamlit.util import HASHLIB_KWARGS
def _clear_disk_cache() -> bool:
# TODO: Only delete disk cache for functions related to the user's current
# script.
cache_path = get_cache_path()
if os.path.isdir(cache_path):
shutil.rmtree(cache_path)
return True
return False
def _clear_mem_cache() -> None:
_mem_caches.clear()
The provided code snippet includes necessary dependencies for implementing the `clear_cache` function. Write a Python function `def clear_cache() -> bool` to solve the following problem:
Clear the memoization cache. Returns ------- boolean True if the disk cache was cleared. False otherwise (e.g. cache file doesn't exist on disk).
Here is the function:
def clear_cache() -> bool:
"""Clear the memoization cache.
Returns
-------
boolean
True if the disk cache was cleared. False otherwise (e.g. cache file
doesn't exist on disk).
"""
_clear_mem_cache()
return _clear_disk_cache() | Clear the memoization cache. Returns ------- boolean True if the disk cache was cleared. False otherwise (e.g. cache file doesn't exist on disk). |
178,303 | from __future__ import annotations
import contextlib
import functools
import hashlib
import inspect
import math
import os
import pickle
import shutil
import threading
import time
from collections import namedtuple
from dataclasses import dataclass
from typing import Any, Callable, Final, Iterator, TypeVar, cast, overload
from cachetools import TTLCache
import streamlit as st
from streamlit import config, file_util, util
from streamlit.deprecation_util import show_deprecation_warning
from streamlit.elements.spinner import spinner
from streamlit.error_util import handle_uncaught_app_exception
from streamlit.errors import StreamlitAPIWarning
from streamlit.logger import get_logger
from streamlit.runtime.caching import CACHE_DOCS_URL
from streamlit.runtime.caching.cache_type import CacheType, get_decorator_api_name
from streamlit.runtime.legacy_caching.hashing import (
HashFuncsDict,
HashReason,
update_hash,
)
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.stats import CacheStat, CacheStatsProvider
from streamlit.util import HASHLIB_KWARGS
The provided code snippet includes necessary dependencies for implementing the `_get_cached_func_name_md` function. Write a Python function `def _get_cached_func_name_md(func: Callable[..., Any]) -> str` to solve the following problem:
Get markdown representation of the function name.
Here is the function:
def _get_cached_func_name_md(func: Callable[..., Any]) -> str:
"""Get markdown representation of the function name."""
if hasattr(func, "__name__"):
return "`%s()`" % func.__name__
else:
return "a cached function" | Get markdown representation of the function name. |
178,304 | from __future__ import annotations
import collections
import enum
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import textwrap
import threading
import weakref
from typing import Any, Callable, Dict, Pattern, Type, Union
from streamlit import config, file_util, type_util, util
from streamlit.errors import MarkdownFormattedException, StreamlitAPIException
from streamlit.folder_black_list import FolderBlackList
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
Context = collections.namedtuple("Context", ["globals", "cells", "varnames"])
class _Cells:
"""
Class which is basically a dict that allows us to push/pop frames of data.
Python code objects are nested. In the following function:
def func():
production = [[x + y for x in range(3)] for y in range(5)]
return production
func.__code__ is a code object, and contains (inside
func.__code__.co_consts) additional code objects for the list
comprehensions. Those objects have their own co_freevars and co_cellvars.
What we need to do as we're traversing this "tree" of code objects is to
save each code object's vars, hash it, and then restore the original vars.
"""
_cell_delete_obj = object()
def __init__(self):
self.values = {}
self.stack = []
self.frames = []
def __repr__(self) -> str:
return util.repr_(self)
def _set(self, key, value):
"""
Sets a value and saves the old value so it can be restored when
we pop the frame. A sentinel object, _cell_delete_obj, indicates that
the key was previously empty and should just be deleted.
"""
# save the old value (or mark that it didn't exist)
self.stack.append((key, self.values.get(key, self._cell_delete_obj)))
# write the new value
self.values[key] = value
def pop(self):
"""Pop off the last frame we created, and restore all the old values."""
idx = self.frames.pop()
for key, val in self.stack[idx:]:
if val is self._cell_delete_obj:
del self.values[key]
else:
self.values[key] = val
self.stack = self.stack[:idx]
def push(self, code, func=None):
"""Create a new frame, and save all of `code`'s vars into it."""
self.frames.append(len(self.stack))
for var in code.co_cellvars:
self._set(var, var)
if code.co_freevars:
if func is not None:
assert len(code.co_freevars) == len(func.__closure__)
for var, cell in zip(code.co_freevars, func.__closure__):
self._set(var, cell.cell_contents)
else:
# List comprehension code objects also have freevars, but they
# don't have a surrounding closure. In these cases we just use the name.
for var in code.co_freevars:
self._set(var, var)
def _get_context(func) -> Context:
varnames = {}
if inspect.ismethod(func):
varnames = {"self": func.__self__}
return Context(globals=func.__globals__, cells=_Cells(), varnames=varnames) | null |
178,305 | from __future__ import annotations
import collections
import enum
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import textwrap
import threading
import weakref
from typing import Any, Callable, Dict, Pattern, Type, Union
from streamlit import config, file_util, type_util, util
from streamlit.errors import MarkdownFormattedException, StreamlitAPIException
from streamlit.folder_black_list import FolderBlackList
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True) | null |
178,306 | from __future__ import annotations
import collections
import enum
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import textwrap
import threading
import weakref
from typing import Any, Callable, Dict, Pattern, Type, Union
from streamlit import config, file_util, type_util, util
from streamlit.errors import MarkdownFormattedException, StreamlitAPIException
from streamlit.folder_black_list import FolderBlackList
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
class NoResult:
"""Placeholder class for return values when None is meaningful."""
pass
The provided code snippet includes necessary dependencies for implementing the `_key` function. Write a Python function `def _key(obj: Any | None) -> Any` to solve the following problem:
Return key for memoization.
Here is the function:
def _key(obj: Any | None) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if (
type_util.is_type(obj, "pandas.core.frame.DataFrame")
or type_util.is_type(obj, "numpy.ndarray")
or inspect.isbuiltin(obj)
or inspect.isroutine(obj)
or inspect.iscode(obj)
):
return id(obj)
return NoResult | Return key for memoization. |
178,307 | from __future__ import annotations
import collections
import enum
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import textwrap
import threading
import weakref
from typing import Any, Callable, Dict, Pattern, Type, Union
from streamlit import config, file_util, type_util, util
from streamlit.errors import MarkdownFormattedException, StreamlitAPIException
from streamlit.folder_black_list import FolderBlackList
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
Context = collections.namedtuple("Context", ["globals", "cells", "varnames"])
class UserHashError(StreamlitAPIException):
def __init__(self, orig_exc, cached_func_or_code, hash_func=None, lineno=None):
self.alternate_name = type(orig_exc).__name__
if hash_func:
msg = self._get_message_from_func(orig_exc, cached_func_or_code, hash_func)
else:
msg = self._get_message_from_code(orig_exc, cached_func_or_code, lineno)
super().__init__(msg)
self.with_traceback(orig_exc.__traceback__)
def _get_message_from_func(self, orig_exc, cached_func, hash_func):
args = _get_error_message_args(orig_exc, cached_func)
if hasattr(hash_func, "__name__"):
args["hash_func_name"] = "`%s()`" % hash_func.__name__
else:
args["hash_func_name"] = "a function"
return (
"""
%(orig_exception_desc)s
This error is likely due to a bug in %(hash_func_name)s, which is a
user-defined hash function that was passed into the `@st.cache` decorator of
%(object_desc)s.
%(hash_func_name)s failed when hashing an object of type
`%(failed_obj_type_str)s`. If you don't know where that object is coming from,
try looking at the hash chain below for an object that you do recognize, then
pass that to `hash_funcs` instead:
```
%(hash_stack)s
```
If you think this is actually a Streamlit bug, please
[file a bug report here](https://github.com/streamlit/streamlit/issues/new/choose).
"""
% args
).strip("\n")
def _get_message_from_code(self, orig_exc: BaseException, cached_code, lineno: int):
args = _get_error_message_args(orig_exc, cached_code)
failing_lines = _get_failing_lines(cached_code, lineno)
failing_lines_str = "".join(failing_lines)
failing_lines_str = textwrap.dedent(failing_lines_str).strip("\n")
args["failing_lines_str"] = failing_lines_str
args["filename"] = cached_code.co_filename
args["lineno"] = lineno
# This needs to have zero indentation otherwise %(lines_str)s will
# render incorrectly in Markdown.
return (
"""
%(orig_exception_desc)s
Streamlit encountered an error while caching %(object_part)s %(object_desc)s.
This is likely due to a bug in `%(filename)s` near line `%(lineno)s`:
```
%(failing_lines_str)s
```
Please modify the code above to address this.
If you think this is actually a Streamlit bug, you may [file a bug report
here.] (https://github.com/streamlit/streamlit/issues/new/choose)
"""
% args
).strip("\n")
def get_referenced_objects(code, context: Context) -> list[Any]:
# Top of the stack
tos: Any = None
lineno = None
refs: list[Any] = []
def set_tos(t):
nonlocal tos
if tos is not None:
# Hash tos so we support reading multiple objects
refs.append(tos)
tos = t
# Our goal is to find referenced objects. The problem is that co_names
# does not have full qualified names in it. So if you access `foo.bar`,
# co_names has `foo` and `bar` in it but it doesn't tell us that the
# code reads `bar` of `foo`. We are going over the bytecode to resolve
# from which object an attribute is requested.
# Read more about bytecode at https://docs.python.org/3/library/dis.html
import dis
for op in dis.get_instructions(code):
try:
# Sometimes starts_line is None, in which case let's just remember the
# previous start_line (if any). This way when there's an exception we at
# least can point users somewhat near the line where the error stems from.
if op.starts_line is not None:
lineno = op.starts_line
if op.opname in ["LOAD_GLOBAL", "LOAD_NAME"]:
if op.argval in context.globals:
set_tos(context.globals[op.argval])
else:
set_tos(op.argval)
elif op.opname in ["LOAD_DEREF", "LOAD_CLOSURE"]:
set_tos(context.cells.values[op.argval])
elif op.opname == "IMPORT_NAME":
try:
import importlib
set_tos(importlib.import_module(op.argval))
except ImportError:
set_tos(op.argval)
elif op.opname in ["LOAD_METHOD", "LOAD_ATTR", "IMPORT_FROM"]:
if tos is None:
refs.append(op.argval)
elif isinstance(tos, str):
tos += "." + op.argval
else:
tos = getattr(tos, op.argval)
elif op.opname == "DELETE_FAST" and tos:
del context.varnames[op.argval]
tos = None
elif op.opname == "STORE_FAST" and tos:
context.varnames[op.argval] = tos
tos = None
elif op.opname == "LOAD_FAST" and op.argval in context.varnames:
set_tos(context.varnames[op.argval])
else:
# For all other instructions, hash the current TOS.
if tos is not None:
refs.append(tos)
tos = None
except Exception as e:
raise UserHashError(e, code, lineno=lineno)
return refs | null |
178,308 | from __future__ import annotations
import collections
import enum
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import textwrap
import threading
import weakref
from typing import Any, Callable, Dict, Pattern, Type, Union
from streamlit import config, file_util, type_util, util
from streamlit.errors import MarkdownFormattedException, StreamlitAPIException
from streamlit.folder_black_list import FolderBlackList
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
class HashReason(enum.Enum):
CACHING_FUNC_ARGS = 0
CACHING_FUNC_BODY = 1
CACHING_FUNC_OUTPUT = 2
CACHING_BLOCK = 3
hash_stacks = _HashStacks()
def _get_error_message_args(orig_exc: BaseException, failed_obj: Any) -> dict[str, Any]:
hash_reason = hash_stacks.current.hash_reason
hash_source = hash_stacks.current.hash_source
failed_obj_type_str = type_util.get_fqn_type(failed_obj)
object_part = ""
if hash_source is None or hash_reason is None:
object_desc = "something"
elif hash_reason is HashReason.CACHING_BLOCK:
object_desc = "a code block"
else:
if hasattr(hash_source, "__name__"):
object_desc = f"`{hash_source.__name__}()`"
else:
object_desc = "a function"
if hash_reason is HashReason.CACHING_FUNC_ARGS:
object_part = "the arguments of"
elif hash_reason is HashReason.CACHING_FUNC_BODY:
object_part = "the body of"
elif hash_reason is HashReason.CACHING_FUNC_OUTPUT:
object_part = "the return value of"
return {
"orig_exception_desc": str(orig_exc),
"failed_obj_type_str": failed_obj_type_str,
"hash_stack": hash_stacks.current.pretty_print(),
"object_desc": object_desc,
"object_part": object_part,
} | null |
178,309 | from __future__ import annotations
import collections
import enum
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import textwrap
import threading
import weakref
from typing import Any, Callable, Dict, Pattern, Type, Union
from streamlit import config, file_util, type_util, util
from streamlit.errors import MarkdownFormattedException, StreamlitAPIException
from streamlit.folder_black_list import FolderBlackList
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
The provided code snippet includes necessary dependencies for implementing the `_get_failing_lines` function. Write a Python function `def _get_failing_lines(code, lineno: int) -> list[str]` to solve the following problem:
Get list of strings (lines of code) from lineno to lineno+3. Ideally we'd return the exact line where the error took place, but there are reasons why this is not possible without a lot of work, including playing with the AST. So for now we're returning 3 lines near where the error took place.
Here is the function:
def _get_failing_lines(code, lineno: int) -> list[str]:
"""Get list of strings (lines of code) from lineno to lineno+3.
Ideally we'd return the exact line where the error took place, but there
are reasons why this is not possible without a lot of work, including
playing with the AST. So for now we're returning 3 lines near where
the error took place.
"""
source_lines, source_lineno = inspect.getsourcelines(code)
start = lineno - source_lineno
end = min(start + 3, len(source_lines))
lines = source_lines[start:end]
return lines | Get list of strings (lines of code) from lineno to lineno+3. Ideally we'd return the exact line where the error took place, but there are reasons why this is not possible without a lot of work, including playing with the AST. So for now we're returning 3 lines near where the error took place. |
178,310 | from __future__ import annotations
import json
import os
import sys
import textwrap
from collections import namedtuple
from datetime import datetime
from typing import Final, NoReturn
from uuid import uuid4
from streamlit import cli_util, env_util, file_util, util
from streamlit.logger import get_logger
if env_util.IS_WINDOWS:
_CONFIG_FILE_PATH = r"%userprofile%/.streamlit/config.toml"
else:
_CONFIG_FILE_PATH = "~/.streamlit/config.toml"
def email_prompt() -> str:
# Emoji can cause encoding errors on non-UTF-8 terminals
# (See https://github.com/streamlit/streamlit/issues/2284.)
# WT_SESSION is a Windows Terminal specific environment variable. If it exists,
# we are on the latest Windows Terminal that supports emojis
show_emoji = sys.stdout.encoding == "utf-8" and (
not env_util.IS_WINDOWS or os.environ.get("WT_SESSION")
)
# IMPORTANT: Break the text below at 80 chars.
return """
{0}%(welcome)s
If you’d like to receive helpful onboarding emails, news, offers, promotions,
and the occasional swag, please enter your email address below. Otherwise,
leave this field blank.
%(email)s""".format(
"👋 " if show_emoji else ""
) % {
"welcome": cli_util.style_for_cli("Welcome to Streamlit!", bold=True),
"email": cli_util.style_for_cli("Email: ", fg="blue"),
} | null |
178,311 | from __future__ import annotations
import json
import os
import sys
import textwrap
from collections import namedtuple
from datetime import datetime
from typing import Final, NoReturn
from uuid import uuid4
from streamlit import cli_util, env_util, file_util, util
from streamlit.logger import get_logger
The provided code snippet includes necessary dependencies for implementing the `_send_email` function. Write a Python function `def _send_email(email: str) -> None` to solve the following problem:
Send the user's email to segment.io, if submitted
Here is the function:
def _send_email(email: str) -> None:
"""Send the user's email to segment.io, if submitted"""
import requests
if email is None or "@" not in email:
return
headers = {
"authority": "api.segment.io",
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "text/plain",
"origin": "localhost:8501",
"referer": "localhost:8501/",
}
dt = datetime.utcnow().isoformat() + "+00:00"
data = {
"anonymous_id": None,
"context": {
"library": {"name": "analytics-python", "version": "2.2.2"},
},
"messageId": str(uuid4()),
"timestamp": dt,
"event": "submittedEmail",
"traits": {
"authoremail": email,
"source": "provided_email",
},
"type": "track",
"userId": email,
"writeKey": "iCkMy7ymtJ9qYzQRXkQpnAJEq7D4NyMU",
}
response = requests.post(
"https://api.segment.io/v1/t",
headers=headers,
data=json.dumps(data).encode(),
)
response.raise_for_status() | Send the user's email to segment.io, if submitted |
178,312 | from __future__ import annotations
import json
import os
import sys
import textwrap
from collections import namedtuple
from datetime import datetime
from typing import Final, NoReturn
from uuid import uuid4
from streamlit import cli_util, env_util, file_util, util
from streamlit.logger import get_logger
_LOGGER: Final = get_logger(__name__)
_Activation = namedtuple(
"_Activation",
[
"email", # str : the user's email.
"is_valid", # boolean : whether the email is valid.
],
)
The provided code snippet includes necessary dependencies for implementing the `_verify_email` function. Write a Python function `def _verify_email(email: str) -> _Activation` to solve the following problem:
Verify the user's email address. The email can either be an empty string (if the user chooses not to enter it), or a string with a single '@' somewhere in it. Parameters ---------- email : str Returns ------- _Activation An _Activation object. Its 'is_valid' property will be True only if the email was validated.
Here is the function:
def _verify_email(email: str) -> _Activation:
"""Verify the user's email address.
The email can either be an empty string (if the user chooses not to enter
it), or a string with a single '@' somewhere in it.
Parameters
----------
email : str
Returns
-------
_Activation
An _Activation object. Its 'is_valid' property will be True only if
the email was validated.
"""
email = email.strip()
# We deliberately use simple email validation here
# since we do not use email address anywhere to send emails.
if len(email) > 0 and email.count("@") != 1:
_LOGGER.error("That doesn't look like an email :(")
return _Activation(None, False)
return _Activation(email, True) | Verify the user's email address. The email can either be an empty string (if the user chooses not to enter it), or a string with a single '@' somewhere in it. Parameters ---------- email : str Returns ------- _Activation An _Activation object. Its 'is_valid' property will be True only if the email was validated. |
178,313 | from __future__ import annotations
import json
import os
import sys
import textwrap
from collections import namedtuple
from datetime import datetime
from typing import Final, NoReturn
from uuid import uuid4
from streamlit import cli_util, env_util, file_util, util
from streamlit.logger import get_logger
_LOGGER: Final = get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `_exit` function. Write a Python function `def _exit(message: str) -> NoReturn` to solve the following problem:
Exit program with error.
Here is the function:
def _exit(message: str) -> NoReturn:
"""Exit program with error."""
_LOGGER.error(message)
sys.exit(-1) | Exit program with error. |
178,314 | from __future__ import annotations
from typing import Any
from streamlit.runtime.metrics_util import gather_metrics
The provided code snippet includes necessary dependencies for implementing the `transparent_write` function. Write a Python function `def transparent_write(*args: Any) -> Any` to solve the following problem:
The function that gets magic-ified into Streamlit apps. This is just st.write, but returns the arguments you passed to it.
Here is the function:
def transparent_write(*args: Any) -> Any:
"""The function that gets magic-ified into Streamlit apps.
This is just st.write, but returns the arguments you passed to it.
"""
import streamlit as st
st.write(*args)
if len(args) == 1:
return args[0]
return args | The function that gets magic-ified into Streamlit apps. This is just st.write, but returns the arguments you passed to it. |
178,315 | from __future__ import annotations
import collections
import threading
from dataclasses import dataclass, field
from typing import Callable, Counter, Dict, Final, Union
from urllib import parse
from typing_extensions import TypeAlias
from streamlit import runtime
from streamlit.errors import StreamlitAPIException
from streamlit.logger import get_logger
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.PageProfile_pb2 import Command
from streamlit.runtime.scriptrunner.script_requests import ScriptRequests
from streamlit.runtime.state import SafeSessionState
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
class ScriptRunContext:
"""A context object that contains data for a "script run" - that is,
data that's scoped to a single ScriptRunner execution (and therefore also
scoped to a single connected "session").
ScriptRunContext is used internally by virtually every `st.foo()` function.
It is accessed only from the script thread that's created by ScriptRunner,
or from app-created helper threads that have been "attached" to the
ScriptRunContext via `add_script_run_ctx`.
Streamlit code typically retrieves the active ScriptRunContext via the
`get_script_run_ctx` function.
"""
session_id: str
_enqueue: Callable[[ForwardMsg], None]
query_string: str
session_state: SafeSessionState
uploaded_file_mgr: UploadedFileManager
main_script_path: str
page_script_hash: str
user_info: UserInfo
gather_usage_stats: bool = False
command_tracking_deactivated: bool = False
tracked_commands: list[Command] = field(default_factory=list)
tracked_commands_counter: Counter[str] = field(default_factory=collections.Counter)
_set_page_config_allowed: bool = True
_has_script_started: bool = False
widget_ids_this_run: set[str] = field(default_factory=set)
widget_user_keys_this_run: set[str] = field(default_factory=set)
form_ids_this_run: set[str] = field(default_factory=set)
cursors: dict[int, "streamlit.cursor.RunningCursor"] = field(default_factory=dict)
script_requests: ScriptRequests | None = None
# TODO(willhuang1997): Remove this variable when experimental query params are removed
_experimental_query_params_used = False
_production_query_params_used = False
def reset(self, query_string: str = "", page_script_hash: str = "") -> None:
self.cursors = {}
self.widget_ids_this_run = set()
self.widget_user_keys_this_run = set()
self.form_ids_this_run = set()
self.query_string = query_string
self.page_script_hash = page_script_hash
# Permit set_page_config when the ScriptRunContext is reused on a rerun
self._set_page_config_allowed = True
self._has_script_started = False
self.command_tracking_deactivated: bool = False
self.tracked_commands = []
self.tracked_commands_counter = collections.Counter()
parsed_query_params = parse.parse_qs(query_string, keep_blank_values=True)
with self.session_state.query_params() as qp:
qp.clear_with_no_forward_msg()
for key, val in parsed_query_params.items():
if len(val) == 0:
qp.set_with_no_forward_msg(key, val="")
elif len(val) == 1:
qp.set_with_no_forward_msg(key, val=val[-1])
else:
qp.set_with_no_forward_msg(key, val)
def on_script_start(self) -> None:
self._has_script_started = True
def enqueue(self, msg: ForwardMsg) -> None:
"""Enqueue a ForwardMsg for this context's session."""
if msg.HasField("page_config_changed") and not self._set_page_config_allowed:
raise StreamlitAPIException(
"`set_page_config()` can only be called once per app page, "
+ "and must be called as the first Streamlit command in your script.\n\n"
+ "For more information refer to the [docs]"
+ "(https://docs.streamlit.io/library/api-reference/utilities/st.set_page_config)."
)
# We want to disallow set_page config if one of the following occurs:
# - set_page_config was called on this message
# - The script has already started and a different st call occurs (a delta)
if msg.HasField("page_config_changed") or (
msg.HasField("delta") and self._has_script_started
):
self._set_page_config_allowed = False
# Pass the message up to our associated ScriptRunner.
self._enqueue(msg)
def ensure_single_query_api_used(self):
if self._experimental_query_params_used and self._production_query_params_used:
raise StreamlitAPIException(
"Using `st.query_params` together with either `st.experimental_get_query_params` "
+ "or `st.experimental_set_query_params` is not supported. Please convert your app "
+ "to only use `st.query_params`"
)
def mark_experimental_query_params_used(self):
self._experimental_query_params_used = True
self.ensure_single_query_api_used()
def mark_production_query_params_used(self):
self._production_query_params_used = True
self.ensure_single_query_api_used()
SCRIPT_RUN_CONTEXT_ATTR_NAME: Final = "streamlit_script_run_ctx"
def get_script_run_ctx(suppress_warning: bool = False) -> ScriptRunContext | None:
"""
Parameters
----------
suppress_warning : bool
If True, don't log a warning if there's no ScriptRunContext.
Returns
-------
ScriptRunContext | None
The current thread's ScriptRunContext, or None if it doesn't have one.
"""
thread = threading.current_thread()
ctx: ScriptRunContext | None = getattr(thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, None)
if ctx is None and runtime.exists() and not suppress_warning:
# Only warn about a missing ScriptRunContext if suppress_warning is False, and
# we were started via `streamlit run`. Otherwise, the user is likely running a
# script "bare", and doesn't need to be warned about streamlit
# bits that are irrelevant when not connected to a session.
_LOGGER.warning("Thread '%s': missing ScriptRunContext", thread.name)
return ctx
import streamlit
The provided code snippet includes necessary dependencies for implementing the `add_script_run_ctx` function. Write a Python function `def add_script_run_ctx( thread: threading.Thread | None = None, ctx: ScriptRunContext | None = None )` to solve the following problem:
Adds the current ScriptRunContext to a newly-created thread. This should be called from this thread's parent thread, before the new thread starts. Parameters ---------- thread : threading.Thread The thread to attach the current ScriptRunContext to. ctx : ScriptRunContext or None The ScriptRunContext to add, or None to use the current thread's ScriptRunContext. Returns ------- threading.Thread The same thread that was passed in, for chaining.
Here is the function:
def add_script_run_ctx(
thread: threading.Thread | None = None, ctx: ScriptRunContext | None = None
):
"""Adds the current ScriptRunContext to a newly-created thread.
This should be called from this thread's parent thread,
before the new thread starts.
Parameters
----------
thread : threading.Thread
The thread to attach the current ScriptRunContext to.
ctx : ScriptRunContext or None
The ScriptRunContext to add, or None to use the current thread's
ScriptRunContext.
Returns
-------
threading.Thread
The same thread that was passed in, for chaining.
"""
if thread is None:
thread = threading.current_thread()
if ctx is None:
ctx = get_script_run_ctx()
if ctx is not None:
setattr(thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, ctx)
return thread | Adds the current ScriptRunContext to a newly-created thread. This should be called from this thread's parent thread, before the new thread starts. Parameters ---------- thread : threading.Thread The thread to attach the current ScriptRunContext to. ctx : ScriptRunContext or None The ScriptRunContext to add, or None to use the current thread's ScriptRunContext. Returns ------- threading.Thread The same thread that was passed in, for chaining. |
178,316 | from __future__ import annotations
import gc
import sys
import threading
import types
from contextlib import contextmanager
from enum import Enum
from timeit import default_timer as timer
from typing import Callable, Final
from blinker import Signal
from streamlit import config, runtime, source_util, util
from streamlit.error_util import handle_uncaught_app_exception
from streamlit.logger import get_logger
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.scriptrunner.script_cache import ScriptCache
from streamlit.runtime.scriptrunner.script_requests import (
RerunData,
ScriptRequests,
ScriptRequestType,
)
from streamlit.runtime.scriptrunner.script_run_context import (
ScriptRunContext,
add_script_run_ctx,
get_script_run_ctx,
)
from streamlit.runtime.state import (
SCRIPT_RUN_WITHOUT_ERRORS_KEY,
SafeSessionState,
SessionState,
)
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
from streamlit.vendor.ipython.modified_sys_path import modified_sys_path
The provided code snippet includes necessary dependencies for implementing the `_clean_problem_modules` function. Write a Python function `def _clean_problem_modules() -> None` to solve the following problem:
Some modules are stateful, so we have to clear their state.
Here is the function:
def _clean_problem_modules() -> None:
"""Some modules are stateful, so we have to clear their state."""
if "keras" in sys.modules:
try:
keras = sys.modules["keras"]
keras.backend.clear_session()
except Exception:
# We don't want to crash the app if we can't clear the Keras session.
pass
if "matplotlib.pyplot" in sys.modules:
try:
plt = sys.modules["matplotlib.pyplot"]
plt.close("all")
except Exception:
# We don't want to crash the app if we can't close matplotlib
pass | Some modules are stateful, so we have to clear their state. |
178,317 | from __future__ import annotations
import gc
import sys
import threading
import types
from contextlib import contextmanager
from enum import Enum
from timeit import default_timer as timer
from typing import Callable, Final
from blinker import Signal
from streamlit import config, runtime, source_util, util
from streamlit.error_util import handle_uncaught_app_exception
from streamlit.logger import get_logger
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.scriptrunner.script_cache import ScriptCache
from streamlit.runtime.scriptrunner.script_requests import (
RerunData,
ScriptRequests,
ScriptRequestType,
)
from streamlit.runtime.scriptrunner.script_run_context import (
ScriptRunContext,
add_script_run_ctx,
get_script_run_ctx,
)
from streamlit.runtime.state import (
SCRIPT_RUN_WITHOUT_ERRORS_KEY,
SafeSessionState,
SessionState,
)
from streamlit.runtime.uploaded_file_manager import UploadedFileManager
from streamlit.vendor.ipython.modified_sys_path import modified_sys_path
_LOGGER: Final = get_logger(__name__)
def _log_if_error(fn: Callable[[], None]) -> None:
try:
fn()
except Exception as e:
_LOGGER.warning(e) | null |
178,318 | from __future__ import annotations
import ast
from typing import Any, Final
from streamlit import config
def _modify_ast_subtree(
tree: Any,
body_attr: str = "body",
is_root: bool = False,
file_ends_in_semicolon: bool = False,
):
"""Parses magic commands and modifies the given AST (sub)tree."""
body = getattr(tree, body_attr)
for i, node in enumerate(body):
node_type = type(node)
# Recursively parses the content of the statements
# `with`, `for` and `while`, as well as function definitions.
# Also covers their async counterparts
if (
node_type is ast.FunctionDef
or node_type is ast.With
or node_type is ast.For
or node_type is ast.While
or node_type is ast.AsyncFunctionDef
or node_type is ast.AsyncWith
or node_type is ast.AsyncFor
):
_modify_ast_subtree(node)
# Recursively parses methods in a class.
elif node_type is ast.ClassDef:
for inner_node in node.body:
if type(inner_node) in {ast.FunctionDef, ast.AsyncFunctionDef}:
_modify_ast_subtree(inner_node)
# Recursively parses the contents of try statements,
# all their handlers (except and else) and the finally body
elif node_type is ast.Try:
for j, inner_node in enumerate(node.handlers):
node.handlers[j] = _modify_ast_subtree(inner_node)
finally_node = _modify_ast_subtree(node, body_attr="finalbody")
node.finalbody = finally_node.finalbody
_modify_ast_subtree(node)
# Recursively parses if blocks, as well as their else/elif blocks
# (else/elif are both mapped to orelse)
# it intentionally does not parse the test expression.
elif node_type is ast.If:
_modify_ast_subtree(node)
_modify_ast_subtree(node, "orelse")
# Convert standalone expression nodes to st.write
elif node_type is ast.Expr:
value = _get_st_write_from_expr(
node,
i,
parent_type=type(tree),
is_root=is_root,
is_last_expr=(i == len(body) - 1),
file_ends_in_semicolon=file_ends_in_semicolon,
)
if value is not None:
node.value = value
if is_root:
# Import Streamlit so we can use it in the new_value above.
_insert_import_statement(tree)
ast.fix_missing_locations(tree)
return tree
def _does_file_end_in_semicolon(tree, code: str) -> bool:
file_ends_in_semicolon = False
# Avoid spending time with this operation if magic.displayLastExprIfNoSemicolon is
# not set.
if config.get_option("magic.displayLastExprIfNoSemicolon"):
last_line_num = getattr(tree.body[-1], "end_lineno", None)
if last_line_num is not None:
last_line_str: str = code.split("\n")[last_line_num - 1]
file_ends_in_semicolon = last_line_str.strip(" ").endswith(";")
return file_ends_in_semicolon
The provided code snippet includes necessary dependencies for implementing the `add_magic` function. Write a Python function `def add_magic(code: str, script_path: str) -> Any` to solve the following problem:
Modifies the code to support magic Streamlit commands. Parameters ---------- code : str The Python code. script_path : str The path to the script file. Returns ------- ast.Module The syntax tree for the code.
Here is the function:
def add_magic(code: str, script_path: str) -> Any:
"""Modifies the code to support magic Streamlit commands.
Parameters
----------
code : str
The Python code.
script_path : str
The path to the script file.
Returns
-------
ast.Module
The syntax tree for the code.
"""
# Pass script_path so we get pretty exceptions.
tree = ast.parse(code, script_path, "exec")
file_ends_in_semicolon = _does_file_end_in_semicolon(tree, code)
return _modify_ast_subtree(
tree, is_root=True, file_ends_in_semicolon=file_ends_in_semicolon
) | Modifies the code to support magic Streamlit commands. Parameters ---------- code : str The Python code. script_path : str The path to the script file. Returns ------- ast.Module The syntax tree for the code. |
178,319 | from __future__ import annotations
from typing import Any
from streamlit import config
from streamlit.errors import MarkdownFormattedException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.forward_msg_cache import populate_hash_if_needed
The provided code snippet includes necessary dependencies for implementing the `is_cacheable_msg` function. Write a Python function `def is_cacheable_msg(msg: ForwardMsg) -> bool` to solve the following problem:
True if the given message qualifies for caching.
Here is the function:
def is_cacheable_msg(msg: ForwardMsg) -> bool:
"""True if the given message qualifies for caching."""
if msg.WhichOneof("type") in {"ref_hash", "initialize"}:
# Some message types never get cached
return False
return msg.ByteSize() >= int(config.get_option("global.minCachedMessageSize")) | True if the given message qualifies for caching. |
178,320 | from __future__ import annotations
from typing import Any
from streamlit import config
from streamlit.errors import MarkdownFormattedException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.runtime.forward_msg_cache import populate_hash_if_needed
class MessageSizeError(MarkdownFormattedException):
"""Exception raised when a websocket message is larger than the configured limit."""
def __init__(self, failed_msg_str: Any):
msg = self._get_message(failed_msg_str)
super().__init__(msg)
def _get_message(self, failed_msg_str: Any) -> str:
# This needs to have zero indentation otherwise the markdown will render incorrectly.
return (
(
"""
**Data of size {message_size_mb:.1f} MB exceeds the message size limit of {message_size_limit_mb} MB.**
This is often caused by a large chart or dataframe. Please decrease the amount of data sent
to the browser, or increase the limit by setting the config option `server.maxMessageSize`.
[Click here to learn more about config options](https://docs.streamlit.io/library/advanced-features/configuration#set-configuration-options).
_Note that increasing the limit may lead to long loading times and large memory consumption
of the client's browser and the Streamlit server._
"""
)
.format(
message_size_mb=len(failed_msg_str) / 1e6,
message_size_limit_mb=(get_max_message_size_bytes() / 1e6),
)
.strip("\n")
)
def get_max_message_size_bytes() -> int:
"""Returns the max websocket message size in bytes.
This will lazyload the value from the config and store it in the global symbol table.
"""
global _max_message_size_bytes
if _max_message_size_bytes is None:
_max_message_size_bytes = config.get_option("server.maxMessageSize") * int(1e6)
return _max_message_size_bytes
def populate_hash_if_needed(msg: ForwardMsg) -> str:
"""Computes and assigns the unique hash for a ForwardMsg.
If the ForwardMsg already has a hash, this is a no-op.
Parameters
----------
msg : ForwardMsg
Returns
-------
string
The message's hash, returned here for convenience. (The hash
will also be assigned to the ForwardMsg; callers do not need
to do this.)
"""
if msg.hash == "":
# Move the message's metadata aside. It's not part of the
# hash calculation.
metadata = msg.metadata
msg.ClearField("metadata")
# MD5 is good enough for what we need, which is uniqueness.
hasher = hashlib.md5(**HASHLIB_KWARGS)
hasher.update(msg.SerializeToString())
msg.hash = hasher.hexdigest()
# Restore metadata.
msg.metadata.CopyFrom(metadata)
return msg.hash
The provided code snippet includes necessary dependencies for implementing the `serialize_forward_msg` function. Write a Python function `def serialize_forward_msg(msg: ForwardMsg) -> bytes` to solve the following problem:
Serialize a ForwardMsg to send to a client. If the message is too large, it will be converted to an exception message instead.
Here is the function:
def serialize_forward_msg(msg: ForwardMsg) -> bytes:
"""Serialize a ForwardMsg to send to a client.
If the message is too large, it will be converted to an exception message
instead.
"""
populate_hash_if_needed(msg)
msg_str = msg.SerializeToString()
if len(msg_str) > get_max_message_size_bytes():
import streamlit.elements.exception as exception
# Overwrite the offending ForwardMsg.delta with an error to display.
# This assumes that the size limit wasn't exceeded due to metadata.
exception.marshall(msg.delta.new_element.exception, MessageSizeError(msg_str))
msg_str = msg.SerializeToString()
return msg_str | Serialize a ForwardMsg to send to a client. If the message is too large, it will be converted to an exception message instead. |
178,321 | from __future__ import annotations
import pickle
import threading
import types
from datetime import timedelta
from typing import Any, Callable, Final, Literal, TypeVar, Union, cast, overload
from typing_extensions import TypeAlias
import streamlit as st
from streamlit import runtime
from streamlit.deprecation_util import show_deprecation_warning
from streamlit.errors import StreamlitAPIException
from streamlit.logger import get_logger
from streamlit.runtime.caching.cache_errors import CacheError, CacheKeyNotFoundError
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cache_utils import (
Cache,
CachedFuncInfo,
make_cached_func_wrapper,
ttl_to_seconds,
)
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
ElementMsgData,
MsgData,
MultiCacheResults,
)
from streamlit.runtime.caching.hashing import HashFuncsDict
from streamlit.runtime.caching.storage import (
CacheStorage,
CacheStorageContext,
CacheStorageError,
CacheStorageKeyNotFoundError,
CacheStorageManager,
)
from streamlit.runtime.caching.storage.cache_storage_protocol import (
InvalidCacheStorageContext,
)
from streamlit.runtime.caching.storage.dummy_cache_storage import (
MemoryCacheStorageManager,
)
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.runtime.scriptrunner.script_run_context import get_script_run_ctx
from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
_data_caches = DataCaches()
class CacheStatsProvider(Protocol):
def get_stats(self) -> list[CacheStat]:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `get_data_cache_stats_provider` function. Write a Python function `def get_data_cache_stats_provider() -> CacheStatsProvider` to solve the following problem:
Return the StatsProvider for all @st.cache_data functions.
Here is the function:
def get_data_cache_stats_provider() -> CacheStatsProvider:
"""Return the StatsProvider for all @st.cache_data functions."""
return _data_caches | Return the StatsProvider for all @st.cache_data functions. |
178,322 | from __future__ import annotations
import types
from typing import Any
from streamlit import type_util
from streamlit.errors import (
MarkdownFormattedException,
StreamlitAPIException,
StreamlitAPIWarning,
)
from streamlit.runtime.caching.cache_type import CacheType, get_decorator_api_name
def get_cached_func_name_md(func: Any) -> str:
"""Get markdown representation of the function name."""
if hasattr(func, "__name__"):
return "`%s()`" % func.__name__
elif hasattr(type(func), "__name__"):
return f"`{type(func).__name__}`"
return f"`{type(func)}`"
def get_return_value_type(return_value: Any) -> str:
if hasattr(return_value, "__module__") and hasattr(type(return_value), "__name__"):
return f"`{return_value.__module__}.{type(return_value).__name__}`"
return get_cached_func_name_md(return_value) | null |
178,323 | from __future__ import annotations
import functools
import hashlib
import inspect
import math
import threading
import time
import types
from abc import abstractmethod
from collections import defaultdict
from datetime import timedelta
from typing import Any, Callable, Final, Literal, overload
from streamlit import type_util
from streamlit.elements.spinner import spinner
from streamlit.logger import get_logger
from streamlit.runtime.caching.cache_errors import (
BadTTLStringError,
CacheError,
CacheKeyNotFoundError,
UnevaluatedDataFrameError,
UnhashableParamError,
UnhashableTypeError,
UnserializableReturnValueError,
get_cached_func_name_md,
)
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
MsgData,
replay_cached_messages,
)
from streamlit.runtime.caching.hashing import HashFuncsDict, update_hash
from streamlit.util import HASHLIB_KWARGS
def ttl_to_seconds(
ttl: float | timedelta | str | None, *, coerce_none_to_inf: Literal[False]
) -> float | None:
... | null |
178,324 | from __future__ import annotations
import functools
import hashlib
import inspect
import math
import threading
import time
import types
from abc import abstractmethod
from collections import defaultdict
from datetime import timedelta
from typing import Any, Callable, Final, Literal, overload
from streamlit import type_util
from streamlit.elements.spinner import spinner
from streamlit.logger import get_logger
from streamlit.runtime.caching.cache_errors import (
BadTTLStringError,
CacheError,
CacheKeyNotFoundError,
UnevaluatedDataFrameError,
UnhashableParamError,
UnhashableTypeError,
UnserializableReturnValueError,
get_cached_func_name_md,
)
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
MsgData,
replay_cached_messages,
)
from streamlit.runtime.caching.hashing import HashFuncsDict, update_hash
from streamlit.util import HASHLIB_KWARGS
def ttl_to_seconds(ttl: float | timedelta | str | None) -> float:
... | null |
178,325 | from __future__ import annotations
import functools
import hashlib
import inspect
import math
import threading
import time
import types
from abc import abstractmethod
from collections import defaultdict
from datetime import timedelta
from typing import Any, Callable, Final, Literal, overload
from streamlit import type_util
from streamlit.elements.spinner import spinner
from streamlit.logger import get_logger
from streamlit.runtime.caching.cache_errors import (
BadTTLStringError,
CacheError,
CacheKeyNotFoundError,
UnevaluatedDataFrameError,
UnhashableParamError,
UnhashableTypeError,
UnserializableReturnValueError,
get_cached_func_name_md,
)
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
MsgData,
replay_cached_messages,
)
from streamlit.runtime.caching.hashing import HashFuncsDict, update_hash
from streamlit.util import HASHLIB_KWARGS
class BadTTLStringError(StreamlitAPIException):
"""Raised when a bad ttl= argument string is passed."""
def __init__(self, ttl: str):
MarkdownFormattedException.__init__(
self,
"TTL string doesn't look right. It should be formatted as"
f"`'1d2h34m'` or `2 days`, for example. Got: {ttl}",
)
The provided code snippet includes necessary dependencies for implementing the `ttl_to_seconds` function. Write a Python function `def ttl_to_seconds( ttl: float | timedelta | str | None, *, coerce_none_to_inf: bool = True ) -> float | None` to solve the following problem:
Convert a ttl value to a float representing "number of seconds".
Here is the function:
def ttl_to_seconds(
ttl: float | timedelta | str | None, *, coerce_none_to_inf: bool = True
) -> float | None:
"""
Convert a ttl value to a float representing "number of seconds".
"""
if coerce_none_to_inf and ttl is None:
return math.inf
if isinstance(ttl, timedelta):
return ttl.total_seconds()
if isinstance(ttl, str):
import numpy as np
import pandas as pd
try:
out: float = pd.Timedelta(ttl).total_seconds()
except ValueError as ex:
raise BadTTLStringError(ttl) from ex
if np.isnan(out):
raise BadTTLStringError(ttl)
return out
return ttl | Convert a ttl value to a float representing "number of seconds". |
178,326 | from __future__ import annotations
import functools
import hashlib
import inspect
import math
import threading
import time
import types
from abc import abstractmethod
from collections import defaultdict
from datetime import timedelta
from typing import Any, Callable, Final, Literal, overload
from streamlit import type_util
from streamlit.elements.spinner import spinner
from streamlit.logger import get_logger
from streamlit.runtime.caching.cache_errors import (
BadTTLStringError,
CacheError,
CacheKeyNotFoundError,
UnevaluatedDataFrameError,
UnhashableParamError,
UnhashableTypeError,
UnserializableReturnValueError,
get_cached_func_name_md,
)
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
MsgData,
replay_cached_messages,
)
from streamlit.runtime.caching.hashing import HashFuncsDict, update_hash
from streamlit.util import HASHLIB_KWARGS
class CachedFuncInfo:
"""Encapsulates data for a cached function instance.
CachedFuncInfo instances are scoped to a single script run - they're not
persistent.
"""
def __init__(
self,
func: types.FunctionType,
show_spinner: bool | str,
allow_widgets: bool,
hash_funcs: HashFuncsDict | None,
):
self.func = func
self.show_spinner = show_spinner
self.allow_widgets = allow_widgets
self.hash_funcs = hash_funcs
def cache_type(self) -> CacheType:
raise NotImplementedError
def cached_message_replay_ctx(self) -> CachedMessageReplayContext:
raise NotImplementedError
def get_function_cache(self, function_key: str) -> Cache:
"""Get or create the function cache for the given key."""
raise NotImplementedError
class CachedFunc:
def __init__(self, info: CachedFuncInfo):
self._info = info
self._function_key = _make_function_key(info.cache_type, info.func)
def __call__(self, *args, **kwargs) -> Any:
"""The wrapper. We'll only call our underlying function on a cache miss."""
name = self._info.func.__qualname__
if isinstance(self._info.show_spinner, bool):
if len(args) == 0 and len(kwargs) == 0:
message = f"Running `{name}()`."
else:
message = f"Running `{name}(...)`."
else:
message = self._info.show_spinner
if self._info.show_spinner or isinstance(self._info.show_spinner, str):
with spinner(message, _cache=True):
return self._get_or_create_cached_value(args, kwargs)
else:
return self._get_or_create_cached_value(args, kwargs)
def _get_or_create_cached_value(
self, func_args: tuple[Any, ...], func_kwargs: dict[str, Any]
) -> Any:
# Retrieve the function's cache object. We must do this "just-in-time"
# (as opposed to in the constructor), because caches can be invalidated
# at any time.
cache = self._info.get_function_cache(self._function_key)
# Generate the key for the cached value. This is based on the
# arguments passed to the function.
value_key = _make_value_key(
cache_type=self._info.cache_type,
func=self._info.func,
func_args=func_args,
func_kwargs=func_kwargs,
hash_funcs=self._info.hash_funcs,
)
try:
cached_result = cache.read_result(value_key)
return self._handle_cache_hit(cached_result)
except CacheKeyNotFoundError:
pass
return self._handle_cache_miss(cache, value_key, func_args, func_kwargs)
def _handle_cache_hit(self, result: CachedResult) -> Any:
"""Handle a cache hit: replay the result's cached messages, and return its value."""
replay_cached_messages(
result,
self._info.cache_type,
self._info.func,
)
return result.value
def _handle_cache_miss(
self,
cache: Cache,
value_key: str,
func_args: tuple[Any, ...],
func_kwargs: dict[str, Any],
) -> Any:
"""Handle a cache miss: compute a new cached value, write it back to the cache,
and return that newly-computed value.
"""
# Implementation notes:
# - We take a "compute_value_lock" before computing our value. This ensures that
# multiple sessions don't try to compute the same value simultaneously.
#
# - We use a different lock for each value_key, as opposed to a single lock for
# the entire cache, so that unrelated value computations don't block on each other.
#
# - When retrieving a cache entry that may not yet exist, we use a "double-checked locking"
# strategy: first we try to retrieve the cache entry without taking a value lock. (This
# happens in `_get_or_create_cached_value()`.) If that fails because the value hasn't
# been computed yet, we take the value lock and then immediately try to retrieve cache entry
# *again*, while holding the lock. If the cache entry exists at this point, it means that
# another thread computed the value before us.
#
# This means that the happy path ("cache entry exists") is a wee bit faster because
# no lock is acquired. But the unhappy path ("cache entry needs to be recomputed") is
# a wee bit slower, because we do two lookups for the entry.
with cache.compute_value_lock(value_key):
# We've acquired the lock - but another thread may have acquired it first
# and already computed the value. So we need to test for a cache hit again,
# before computing.
try:
cached_result = cache.read_result(value_key)
# Another thread computed the value before us. Early exit!
return self._handle_cache_hit(cached_result)
except CacheKeyNotFoundError:
pass
# We acquired the lock before any other thread. Compute the value!
with self._info.cached_message_replay_ctx.calling_cached_function(
self._info.func, self._info.allow_widgets
):
computed_value = self._info.func(*func_args, **func_kwargs)
# We've computed our value, and now we need to write it back to the cache
# along with any "replay messages" that were generated during value computation.
messages = self._info.cached_message_replay_ctx._most_recent_messages
try:
cache.write_result(value_key, computed_value, messages)
return computed_value
except (CacheError, RuntimeError):
# An exception was thrown while we tried to write to the cache. Report it to the user.
# (We catch `RuntimeError` here because it will be raised by Apache Spark if we do not
# collect dataframe before using `st.cache_data`.)
if True in [
type_util.is_type(computed_value, type_name)
for type_name in UNEVALUATED_DATAFRAME_TYPES
]:
raise UnevaluatedDataFrameError(
f"""
The function {get_cached_func_name_md(self._info.func)} is decorated with `st.cache_data` but it returns an unevaluated dataframe
of type `{type_util.get_fqn_type(computed_value)}`. Please call `collect()` or `to_pandas()` on the dataframe before returning it,
so `st.cache_data` can serialize and cache it."""
)
raise UnserializableReturnValueError(
return_value=computed_value, func=self._info.func
)
def clear(self):
"""Clear the wrapped function's associated cache."""
cache = self._info.get_function_cache(self._function_key)
cache.clear()
The provided code snippet includes necessary dependencies for implementing the `make_cached_func_wrapper` function. Write a Python function `def make_cached_func_wrapper(info: CachedFuncInfo) -> Callable[..., Any]` to solve the following problem:
Create a callable wrapper around a CachedFunctionInfo. Calling the wrapper will return the cached value if it's already been computed, and will call the underlying function to compute and cache the value otherwise. The wrapper also has a `clear` function that can be called to clear all of the wrapper's cached values.
Here is the function:
def make_cached_func_wrapper(info: CachedFuncInfo) -> Callable[..., Any]:
"""Create a callable wrapper around a CachedFunctionInfo.
Calling the wrapper will return the cached value if it's already been
computed, and will call the underlying function to compute and cache the
value otherwise.
The wrapper also has a `clear` function that can be called to clear
all of the wrapper's cached values.
"""
cached_func = CachedFunc(info)
# We'd like to simply return `cached_func`, which is already a Callable.
# But using `functools.update_wrapper` on the CachedFunc instance
# itself results in errors when our caching decorators are used to decorate
# member functions. (See https://github.com/streamlit/streamlit/issues/6109)
@functools.wraps(info.func)
def wrapper(*args, **kwargs):
return cached_func(*args, **kwargs)
# Give our wrapper its `clear` function.
# (This results in a spurious mypy error that we suppress.)
wrapper.clear = cached_func.clear # type: ignore
return wrapper | Create a callable wrapper around a CachedFunctionInfo. Calling the wrapper will return the cached value if it's already been computed, and will call the underlying function to compute and cache the value otherwise. The wrapper also has a `clear` function that can be called to clear all of the wrapper's cached values. |
178,327 | from __future__ import annotations
import functools
import hashlib
import inspect
import math
import threading
import time
import types
from abc import abstractmethod
from collections import defaultdict
from datetime import timedelta
from typing import Any, Callable, Final, Literal, overload
from streamlit import type_util
from streamlit.elements.spinner import spinner
from streamlit.logger import get_logger
from streamlit.runtime.caching.cache_errors import (
BadTTLStringError,
CacheError,
CacheKeyNotFoundError,
UnevaluatedDataFrameError,
UnhashableParamError,
UnhashableTypeError,
UnserializableReturnValueError,
get_cached_func_name_md,
)
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
MsgData,
replay_cached_messages,
)
from streamlit.runtime.caching.hashing import HashFuncsDict, update_hash
from streamlit.util import HASHLIB_KWARGS
_LOGGER: Final = get_logger(__name__)
def _get_positional_arg_name(func: types.FunctionType, arg_index: int) -> str | None:
"""Return the name of a function's positional argument.
If arg_index is out of range, or refers to a parameter that is not a
named positional argument (e.g. an *args, **kwargs, or keyword-only param),
return None instead.
"""
if arg_index < 0:
return None
params: list[inspect.Parameter] = list(inspect.signature(func).parameters.values())
if arg_index >= len(params):
return None
if params[arg_index].kind in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
):
return params[arg_index].name
return None
class UnhashableTypeError(Exception):
pass
class UnhashableParamError(StreamlitAPIException):
def __init__(
self,
cache_type: CacheType,
func: types.FunctionType,
arg_name: str | None,
arg_value: Any,
orig_exc: BaseException,
):
msg = self._create_message(cache_type, func, arg_name, arg_value)
super().__init__(msg)
self.with_traceback(orig_exc.__traceback__)
def _create_message(
cache_type: CacheType,
func: types.FunctionType,
arg_name: str | None,
arg_value: Any,
) -> str:
arg_name_str = arg_name if arg_name is not None else "(unnamed)"
arg_type = type_util.get_fqn_type(arg_value)
func_name = func.__name__
arg_replacement_name = f"_{arg_name}" if arg_name is not None else "_arg"
return (
f"""
Cannot hash argument '{arg_name_str}' (of type `{arg_type}`) in '{func_name}'.
To address this, you can tell Streamlit not to hash this argument by adding a
leading underscore to the argument's name in the function signature:
```
def {func_name}({arg_replacement_name}, ...):
...
```
"""
).strip("\n")
class CacheType(enum.Enum):
"""The function cache types we implement."""
DATA = "DATA"
RESOURCE = "RESOURCE"
HashFuncsDict: TypeAlias = Dict[Union[str, Type[Any]], Callable[[Any], Any]]
def update_hash(
val: Any,
hasher,
cache_type: CacheType,
hash_source: Callable[..., Any] | None = None,
hash_funcs: HashFuncsDict | None = None,
) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
hash_stacks.current.hash_source = hash_source
ch = _CacheFuncHasher(cache_type, hash_funcs)
ch.update(hasher, val)
HASHLIB_KWARGS: dict[str, Any] = (
{"usedforsecurity": False} if sys.version_info >= (3, 9) else {}
)
The provided code snippet includes necessary dependencies for implementing the `_make_value_key` function. Write a Python function `def _make_value_key( cache_type: CacheType, func: types.FunctionType, func_args: tuple[Any, ...], func_kwargs: dict[str, Any], hash_funcs: HashFuncsDict | None, ) -> str` to solve the following problem:
Create the key for a value within a cache. This key is generated from the function's arguments. All arguments will be hashed, except for those named with a leading "_". Raises ------ StreamlitAPIException Raised (with a nicely-formatted explanation message) if we encounter an un-hashable arg.
Here is the function:
def _make_value_key(
cache_type: CacheType,
func: types.FunctionType,
func_args: tuple[Any, ...],
func_kwargs: dict[str, Any],
hash_funcs: HashFuncsDict | None,
) -> str:
"""Create the key for a value within a cache.
This key is generated from the function's arguments. All arguments
will be hashed, except for those named with a leading "_".
Raises
------
StreamlitAPIException
Raised (with a nicely-formatted explanation message) if we encounter
an un-hashable arg.
"""
# Create a (name, value) list of all *args and **kwargs passed to the
# function.
arg_pairs: list[tuple[str | None, Any]] = []
for arg_idx in range(len(func_args)):
arg_name = _get_positional_arg_name(func, arg_idx)
arg_pairs.append((arg_name, func_args[arg_idx]))
for kw_name, kw_val in func_kwargs.items():
# **kwargs ordering is preserved, per PEP 468
# https://www.python.org/dev/peps/pep-0468/, so this iteration is
# deterministic.
arg_pairs.append((kw_name, kw_val))
# Create the hash from each arg value, except for those args whose name
# starts with "_". (Underscore-prefixed args are deliberately excluded from
# hashing.)
args_hasher = hashlib.new("md5", **HASHLIB_KWARGS)
for arg_name, arg_value in arg_pairs:
if arg_name is not None and arg_name.startswith("_"):
_LOGGER.debug("Not hashing %s because it starts with _", arg_name)
continue
try:
update_hash(
arg_name,
hasher=args_hasher,
cache_type=cache_type,
hash_source=func,
)
# we call update_hash twice here, first time for `arg_name`
# without `hash_funcs`, and second time for `arg_value` with hash_funcs
# to evaluate user defined `hash_funcs` only for computing `arg_value` hash.
update_hash(
arg_value,
hasher=args_hasher,
cache_type=cache_type,
hash_funcs=hash_funcs,
hash_source=func,
)
except UnhashableTypeError as exc:
raise UnhashableParamError(cache_type, func, arg_name, arg_value, exc)
value_key = args_hasher.hexdigest()
_LOGGER.debug("Cache key: %s", value_key)
return value_key | Create the key for a value within a cache. This key is generated from the function's arguments. All arguments will be hashed, except for those named with a leading "_". Raises ------ StreamlitAPIException Raised (with a nicely-formatted explanation message) if we encounter an un-hashable arg. |
178,328 | from __future__ import annotations
import functools
import hashlib
import inspect
import math
import threading
import time
import types
from abc import abstractmethod
from collections import defaultdict
from datetime import timedelta
from typing import Any, Callable, Final, Literal, overload
from streamlit import type_util
from streamlit.elements.spinner import spinner
from streamlit.logger import get_logger
from streamlit.runtime.caching.cache_errors import (
BadTTLStringError,
CacheError,
CacheKeyNotFoundError,
UnevaluatedDataFrameError,
UnhashableParamError,
UnhashableTypeError,
UnserializableReturnValueError,
get_cached_func_name_md,
)
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.caching.cached_message_replay import (
CachedMessageReplayContext,
CachedResult,
MsgData,
replay_cached_messages,
)
from streamlit.runtime.caching.hashing import HashFuncsDict, update_hash
from streamlit.util import HASHLIB_KWARGS
_LOGGER: Final = get_logger(__name__)
class CacheType(enum.Enum):
"""The function cache types we implement."""
DATA = "DATA"
RESOURCE = "RESOURCE"
def update_hash(
val: Any,
hasher,
cache_type: CacheType,
hash_source: Callable[..., Any] | None = None,
hash_funcs: HashFuncsDict | None = None,
) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
hash_stacks.current.hash_source = hash_source
ch = _CacheFuncHasher(cache_type, hash_funcs)
ch.update(hasher, val)
HASHLIB_KWARGS: dict[str, Any] = (
{"usedforsecurity": False} if sys.version_info >= (3, 9) else {}
)
The provided code snippet includes necessary dependencies for implementing the `_make_function_key` function. Write a Python function `def _make_function_key(cache_type: CacheType, func: types.FunctionType) -> str` to solve the following problem:
Create the unique key for a function's cache. A function's key is stable across reruns of the app, and changes when the function's source code changes.
Here is the function:
def _make_function_key(cache_type: CacheType, func: types.FunctionType) -> str:
"""Create the unique key for a function's cache.
A function's key is stable across reruns of the app, and changes when
the function's source code changes.
"""
func_hasher = hashlib.new("md5", **HASHLIB_KWARGS)
# Include the function's __module__ and __qualname__ strings in the hash.
# This means that two identical functions in different modules
# will not share a hash; it also means that two identical *nested*
# functions in the same module will not share a hash.
update_hash(
(func.__module__, func.__qualname__),
hasher=func_hasher,
cache_type=cache_type,
hash_source=func,
)
# Include the function's source code in its hash. If the source code can't
# be retrieved, fall back to the function's bytecode instead.
source_code: str | bytes
try:
source_code = inspect.getsource(func)
except OSError as e:
_LOGGER.debug(
"Failed to retrieve function's source code when building its key; falling back to bytecode. err={0}",
e,
)
source_code = func.__code__.co_code
update_hash(
source_code, hasher=func_hasher, cache_type=cache_type, hash_source=func
)
cache_key = func_hasher.hexdigest()
return cache_key | Create the unique key for a function's cache. A function's key is stable across reruns of the app, and changes when the function's source code changes. |
178,329 | from __future__ import annotations
import collections
import dataclasses
import datetime
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import threading
import uuid
import weakref
from enum import Enum
from typing import Any, Callable, Dict, Final, Pattern, Type, Union
from typing_extensions import TypeAlias
from streamlit import type_util, util
from streamlit.errors import StreamlitAPIException
from streamlit.runtime.caching.cache_errors import UnhashableTypeError
from streamlit.runtime.caching.cache_type import CacheType
from streamlit.runtime.uploaded_file_manager import UploadedFile
from streamlit.util import HASHLIB_KWARGS
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.