id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
178,530 | import sys
import types as Types
import warnings
import weakref as Weakref
from inspect import isbuiltin, isclass, iscode, isframe, isfunction, ismethod, ismodule
from math import log
from os import curdir, linesep
from struct import calcsize
t = hasattr(sys, "gettotalrefcount")
del t
from gc import get_objects as _getobjects
from gc import get_referents as _getreferents
def _objs_opts_x(where, objs, all=None, **opts):
"""Return the given or 'all' objects plus
the remaining options and exclude flag
"""
if objs: # given objects
t, x = objs, False
elif all in (False, None):
t, x = (), True
elif all is True: # 'all' objects
t, x = _getobjects(), True
else:
raise _OptionError(where, all=all)
return t, opts, x
t = (_kind_static, _kind_dynamic, _kind_derived, _kind_ignored, _kind_inferred) = (
i("static"),
i("dynamic"),
i("derived"),
i("ignored"),
i("inferred"),
)
from array import array as _array
s = [_items({}), _keys({}), _values({})]
try: # reversed list and tuples iterators
s.extend([reversed([]), reversed(())])
except NameError: # missing
pass
for t in _values(_typedefs):
if t.type and t.leng:
try: # create an (empty) instance
s.append(t.type())
except TypeError:
pass
for t in s:
try:
i = iter(t)
_typedef_both(type(i), leng=_len_iter, refs=_iter_refs, item=0) # no itemsize!
except (KeyError, TypeError): # ignore non-iterables, duplicates, etc.
pass
_asizer = Asizer()
The provided code snippet includes necessary dependencies for implementing the `asizeof` function. Write a Python function `def asizeof(*objs, **opts)` to solve the following problem:
Return the combined size (in bytes) of all objects passed as positional arguments. The available options and defaults are: *above=0* -- threshold for largest objects stats *align=8* -- size alignment *clip=80* -- clip ``repr()`` strings *code=False* -- incl. (byte)code size *cutoff=10* -- limit large objects or profiles stats *derive=False* -- derive from super type *frames=False* -- ignore stack frame objects *ignored=True* -- ignore certain types *infer=False* -- try to infer types *limit=100* -- recursion limit *stats=0* -- print statistics Set *align* to a power of 2 to align sizes. Any value less than 2 avoids size alignment. If *all* is True and if no positional arguments are supplied. size all current gc objects, including module, global and stack frame objects. A positive *clip* value truncates all repr() strings to at most *clip* characters. The (byte)code size of callable objects like functions, methods, classes, etc. is included only if *code* is True. If *derive* is True, new types are handled like an existing (super) type provided there is one and only of those. By default certain base types like object, super, etc. are ignored. Set *ignored* to False to include those. If *infer* is True, new types are inferred from attributes (only implemented for dict types on callable attributes as get, has_key, items, keys and values). Set *limit* to a positive value to accumulate the sizes of the referents of each object, recursively up to the limit. Using *limit=0* returns the sum of the flat sizes of the given objects. High *limit* values may cause runtime errors and miss objects for sizing. A positive value for *stats* prints up to 9 statistics, (1) a summary of the number of objects sized and seen and a list of the largests objects with size over *above* bytes, (2) a simple profile of the sized objects by type and (3+) up to 6 tables showing the static, dynamic, derived, ignored, inferred and dict types used, found respectively installed. The fractional part of the *stats* value (x 100) is the number of largest objects shown for (*stats*1.+) or the cutoff percentage for simple profiles for (*stats*=2.+). For example, *stats=1.10* shows the summary and the 10 largest objects, also the default. See this module documentation for the definition of flat size.
Here is the function:
def asizeof(*objs, **opts):
"""Return the combined size (in bytes) of all objects passed
as positional arguments.
The available options and defaults are:
*above=0* -- threshold for largest objects stats
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
Set *align* to a power of 2 to align sizes. Any value less
than 2 avoids size alignment.
If *all* is True and if no positional arguments are supplied.
size all current gc objects, including module, global and stack
frame objects.
A positive *clip* value truncates all repr() strings to at
most *clip* characters.
The (byte)code size of callable objects like functions,
methods, classes, etc. is included only if *code* is True.
If *derive* is True, new types are handled like an existing
(super) type provided there is one and only of those.
By default certain base types like object, super, etc. are
ignored. Set *ignored* to False to include those.
If *infer* is True, new types are inferred from attributes
(only implemented for dict types on callable attributes
as get, has_key, items, keys and values).
Set *limit* to a positive value to accumulate the sizes of
the referents of each object, recursively up to the limit.
Using *limit=0* returns the sum of the flat sizes of the
given objects. High *limit* values may cause runtime errors
and miss objects for sizing.
A positive value for *stats* prints up to 9 statistics, (1)
a summary of the number of objects sized and seen and a list
of the largests objects with size over *above* bytes, (2) a
simple profile of the sized objects by type and (3+) up to 6
tables showing the static, dynamic, derived, ignored, inferred
and dict types used, found respectively installed.
The fractional part of the *stats* value (x 100) is the number
of largest objects shown for (*stats*1.+) or the cutoff
percentage for simple profiles for (*stats*=2.+). For example,
*stats=1.10* shows the summary and the 10 largest objects,
also the default.
See this module documentation for the definition of flat size.
"""
t, p, x = _objs_opts_x(asizeof, objs, **opts)
_asizer.reset(**p)
if t:
if x: # don't size, profile or rank _getobjects tuple
_asizer.exclude_objs(t)
s = _asizer.asizeof(*t)
_asizer.print_stats(objs=t, opts=opts) # show opts as _kwdstr
_asizer._clear()
else:
s = 0
return s | Return the combined size (in bytes) of all objects passed as positional arguments. The available options and defaults are: *above=0* -- threshold for largest objects stats *align=8* -- size alignment *clip=80* -- clip ``repr()`` strings *code=False* -- incl. (byte)code size *cutoff=10* -- limit large objects or profiles stats *derive=False* -- derive from super type *frames=False* -- ignore stack frame objects *ignored=True* -- ignore certain types *infer=False* -- try to infer types *limit=100* -- recursion limit *stats=0* -- print statistics Set *align* to a power of 2 to align sizes. Any value less than 2 avoids size alignment. If *all* is True and if no positional arguments are supplied. size all current gc objects, including module, global and stack frame objects. A positive *clip* value truncates all repr() strings to at most *clip* characters. The (byte)code size of callable objects like functions, methods, classes, etc. is included only if *code* is True. If *derive* is True, new types are handled like an existing (super) type provided there is one and only of those. By default certain base types like object, super, etc. are ignored. Set *ignored* to False to include those. If *infer* is True, new types are inferred from attributes (only implemented for dict types on callable attributes as get, has_key, items, keys and values). Set *limit* to a positive value to accumulate the sizes of the referents of each object, recursively up to the limit. Using *limit=0* returns the sum of the flat sizes of the given objects. High *limit* values may cause runtime errors and miss objects for sizing. A positive value for *stats* prints up to 9 statistics, (1) a summary of the number of objects sized and seen and a list of the largests objects with size over *above* bytes, (2) a simple profile of the sized objects by type and (3+) up to 6 tables showing the static, dynamic, derived, ignored, inferred and dict types used, found respectively installed. The fractional part of the *stats* value (x 100) is the number of largest objects shown for (*stats*1.+) or the cutoff percentage for simple profiles for (*stats*=2.+). For example, *stats=1.10* shows the summary and the 10 largest objects, also the default. See this module documentation for the definition of flat size. |
178,531 | import sys
import types as Types
import warnings
import weakref as Weakref
from inspect import isbuiltin, isclass, iscode, isframe, isfunction, ismethod, ismodule
from math import log
from os import curdir, linesep
from struct import calcsize
t = hasattr(sys, "gettotalrefcount")
del t
from gc import get_objects as _getobjects
from gc import get_referents as _getreferents
t = (_kind_static, _kind_dynamic, _kind_derived, _kind_ignored, _kind_inferred) = (
i("static"),
i("dynamic"),
i("derived"),
i("ignored"),
i("inferred"),
)
from array import array as _array
for t in _values(_typedefs):
if t.type and t.leng:
try: # create an (empty) instance
s.append(t.type())
except TypeError:
pass
for t in s:
try:
i = iter(t)
_typedef_both(type(i), leng=_len_iter, refs=_iter_refs, item=0) # no itemsize!
except (KeyError, TypeError): # ignore non-iterables, duplicates, etc.
pass
_asizer = Asizer()
The provided code snippet includes necessary dependencies for implementing the `asizesof` function. Write a Python function `def asizesof(*objs, **opts)` to solve the following problem:
Return a tuple containing the size (in bytes) of all objects passed as positional arguments. The available options and defaults are: *above=1024* -- threshold for largest objects stats *align=8* -- size alignment *clip=80* -- clip ``repr()`` strings *code=False* -- incl. (byte)code size *cutoff=10* -- limit large objects or profiles stats *derive=False* -- derive from super type *frames=False* -- ignore stack frame objects *ignored=True* -- ignore certain types *infer=False* -- try to infer types *limit=100* -- recursion limit *stats=0* -- print statistics See function **asizeof** for a description of the options. The length of the returned tuple equals the number of given objects. The size of duplicate and ignored objects will be zero.
Here is the function:
def asizesof(*objs, **opts):
"""Return a tuple containing the size (in bytes) of all objects
passed as positional arguments.
The available options and defaults are:
*above=1024* -- threshold for largest objects stats
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
See function **asizeof** for a description of the options.
The length of the returned tuple equals the number of given
objects.
The size of duplicate and ignored objects will be zero.
"""
_asizer.reset(**opts)
if objs:
t = _asizer.asizesof(*objs)
_asizer.print_stats(objs, opts=opts, sizes=t) # show opts as _kwdstr
_asizer._clear()
else:
t = ()
return t | Return a tuple containing the size (in bytes) of all objects passed as positional arguments. The available options and defaults are: *above=1024* -- threshold for largest objects stats *align=8* -- size alignment *clip=80* -- clip ``repr()`` strings *code=False* -- incl. (byte)code size *cutoff=10* -- limit large objects or profiles stats *derive=False* -- derive from super type *frames=False* -- ignore stack frame objects *ignored=True* -- ignore certain types *infer=False* -- try to infer types *limit=100* -- recursion limit *stats=0* -- print statistics See function **asizeof** for a description of the options. The length of the returned tuple equals the number of given objects. The size of duplicate and ignored objects will be zero. |
178,532 | import sys
import types as Types
import warnings
import weakref as Weakref
from inspect import isbuiltin, isclass, iscode, isframe, isfunction, ismethod, ismodule
from math import log
from os import curdir, linesep
from struct import calcsize
t = hasattr(sys, "gettotalrefcount")
del t
from gc import get_objects as _getobjects
from gc import get_referents as _getreferents
t = (_kind_static, _kind_dynamic, _kind_derived, _kind_ignored, _kind_inferred) = (
i("static"),
i("dynamic"),
i("derived"),
i("ignored"),
i("inferred"),
)
from array import array as _array
for t in _values(_typedefs):
if t.type and t.leng:
try: # create an (empty) instance
s.append(t.type())
except TypeError:
pass
for t in s:
try:
i = iter(t)
_typedef_both(type(i), leng=_len_iter, refs=_iter_refs, item=0) # no itemsize!
except (KeyError, TypeError): # ignore non-iterables, duplicates, etc.
pass
def _typedefof(obj, save=False, **opts):
"""Get the typedef for an object."""
k = _objkey(obj)
v = _typedefs.get(k, None)
if not v: # new typedef
v = _typedef(obj, **opts)
if save:
_typedefs[k] = v
return v
The provided code snippet includes necessary dependencies for implementing the `basicsize` function. Write a Python function `def basicsize(obj, **opts)` to solve the following problem:
Return the basic size of an object (in bytes). The available options and defaults are: *derive=False* -- derive type from super type *infer=False* -- try to infer types *save=False* -- save the object's type definition if new See this module documentation for the definition of *basic size*.
Here is the function:
def basicsize(obj, **opts):
"""Return the basic size of an object (in bytes).
The available options and defaults are:
*derive=False* -- derive type from super type
*infer=False* -- try to infer types
*save=False* -- save the object's type definition if new
See this module documentation for the definition of *basic size*.
"""
b = t = _typedefof(obj, **opts)
if t:
b = t.base
return b | Return the basic size of an object (in bytes). The available options and defaults are: *derive=False* -- derive type from super type *infer=False* -- try to infer types *save=False* -- save the object's type definition if new See this module documentation for the definition of *basic size*. |
178,533 | import sys
import types as Types
import warnings
import weakref as Weakref
from inspect import isbuiltin, isclass, iscode, isframe, isfunction, ismethod, ismodule
from math import log
from os import curdir, linesep
from struct import calcsize
t = hasattr(sys, "gettotalrefcount")
del t
from gc import get_objects as _getobjects
from gc import get_referents as _getreferents
def _OptionError(where, Error=ValueError, **options):
"""Format an *Error* instance for invalid *option* or *options*."""
t = _plural(len(options)), _nameof(where), _kwdstr(**options)
return Error("invalid option%s: %s(%s)" % t)
t = (_kind_static, _kind_dynamic, _kind_derived, _kind_ignored, _kind_inferred) = (
i("static"),
i("dynamic"),
i("derived"),
i("ignored"),
i("inferred"),
)
from array import array as _array
for t in _values(_typedefs):
if t.type and t.leng:
try: # create an (empty) instance
s.append(t.type())
except TypeError:
pass
for t in s:
try:
i = iter(t)
_typedef_both(type(i), leng=_len_iter, refs=_iter_refs, item=0) # no itemsize!
except (KeyError, TypeError): # ignore non-iterables, duplicates, etc.
pass
def _typedefof(obj, save=False, **opts):
"""Get the typedef for an object."""
k = _objkey(obj)
v = _typedefs.get(k, None)
if not v: # new typedef
v = _typedef(obj, **opts)
if save:
_typedefs[k] = v
return v
The provided code snippet includes necessary dependencies for implementing the `flatsize` function. Write a Python function `def flatsize(obj, align=0, **opts)` to solve the following problem:
Return the flat size of an object (in bytes), optionally aligned to the given power-of-2. See function **basicsize** for a description of other available options. See this module documentation for the definition of *flat size*.
Here is the function:
def flatsize(obj, align=0, **opts):
"""Return the flat size of an object (in bytes), optionally aligned
to the given power-of-2.
See function **basicsize** for a description of other available options.
See this module documentation for the definition of *flat size*.
"""
f = t = _typedefof(obj, **opts)
if t:
if align > 1:
m = align - 1
if m & align:
raise _OptionError(flatsize, align=align)
else:
m = 0
f = t.flat(obj, mask=m)
return f | Return the flat size of an object (in bytes), optionally aligned to the given power-of-2. See function **basicsize** for a description of other available options. See this module documentation for the definition of *flat size*. |
178,534 | import sys
import types as Types
import warnings
import weakref as Weakref
from inspect import isbuiltin, isclass, iscode, isframe, isfunction, ismethod, ismodule
from math import log
from os import curdir, linesep
from struct import calcsize
from gc import get_objects as _getobjects
from gc import get_referents as _getreferents
from array import array as _array
def _typedefof(obj, save=False, **opts):
"""Get the typedef for an object."""
k = _objkey(obj)
v = _typedefs.get(k, None)
if not v: # new typedef
v = _typedef(obj, **opts)
if save:
_typedefs[k] = v
return v
def refs(obj, **opts):
"""Return (a generator for) specific *referents* of an object.
See function **basicsize** for a description of the available options.
"""
v = _typedefof(obj, **opts)
if v:
v = v.refs
if v and callable(v):
v = v(obj, False)
return v
The provided code snippet includes necessary dependencies for implementing the `named_refs` function. Write a Python function `def named_refs(obj, **opts)` to solve the following problem:
Return all named **referents** of an object (re-using functionality from **asizeof**). Does not return un-named *referents*, e.g. objects in a list. See function **basicsize** for a description of the available options.
Here is the function:
def named_refs(obj, **opts):
"""Return all named **referents** of an object (re-using
functionality from **asizeof**).
Does not return un-named *referents*, e.g. objects in a list.
See function **basicsize** for a description of the available options.
"""
rs = []
v = _typedefof(obj, **opts)
if v:
v = v.refs
if v and callable(v):
for r in v(obj, True):
try:
rs.append((r.name, r.ref))
except AttributeError:
pass
return rs | Return all named **referents** of an object (re-using functionality from **asizeof**). Does not return un-named *referents*, e.g. objects in a list. See function **basicsize** for a description of the available options. |
178,535 | from __future__ import annotations
import time
from enum import Enum
from typing import TYPE_CHECKING, Any, NamedTuple
from langchain.callbacks.base import ( # type: ignore[import-not-found, unused-ignore]
BaseCallbackHandler,
)
from langchain.schema import ( # type: ignore[import-not-found, unused-ignore]
AgentAction,
AgentFinish,
LLMResult,
)
from streamlit.runtime.metrics_util import gather_metrics
The provided code snippet includes necessary dependencies for implementing the `_convert_newlines` function. Write a Python function `def _convert_newlines(text: str) -> str` to solve the following problem:
Convert newline characters to markdown newline sequences (space, space, newline).
Here is the function:
def _convert_newlines(text: str) -> str:
"""Convert newline characters to markdown newline sequences
(space, space, newline).
"""
return text.replace("\n", " \n") | Convert newline characters to markdown newline sequences (space, space, newline). |
178,536 | from __future__ import annotations
import inspect
import os
from types import FrameType
from streamlit.components.types.base_component_registry import BaseComponentRegistry
from streamlit.components.v1.custom_component import CustomComponent
from streamlit.runtime import get_instance
def _get_module_name(caller_frame: FrameType) -> str:
# Get the caller's module name. `__name__` gives us the module's
# fully-qualified name, which includes its package.
module = inspect.getmodule(caller_frame)
assert module is not None
module_name = module.__name__
# If the caller was the main module that was executed (that is, if the
# user executed `python my_component.py`), then this name will be
# "__main__" instead of the actual package name. In this case, we use
# the main module's filename, sans `.py` extension, as the component name.
if module_name == "__main__":
file_path = inspect.getfile(caller_frame)
filename = os.path.basename(file_path)
module_name, _ = os.path.splitext(filename)
return module_name
class CustomComponent(BaseCustomComponent):
"""A Custom Component declaration."""
def __call__(
self,
*args,
default: Any = None,
key: str | None = None,
**kwargs,
) -> Any:
"""An alias for create_instance."""
return self.create_instance(*args, default=default, key=key, **kwargs)
def create_instance(
self,
*args,
default: Any = None,
key: str | None = None,
**kwargs,
) -> Any:
"""Create a new instance of the component.
Parameters
----------
*args
Must be empty; all args must be named. (This parameter exists to
enforce correct use of the function.)
default: any or None
The default return value for the component. This is returned when
the component's frontend hasn't yet specified a value with
`setComponentValue`.
key: str or None
If not None, this is the user key we use to generate the
component's "widget ID".
**kwargs
Keyword args to pass to the component.
Returns
-------
any or None
The component's widget value.
"""
if len(args) > 0:
raise MarshallComponentException(f"Argument '{args[0]}' needs a label")
try:
import pyarrow
from streamlit.components.v1 import component_arrow
except ImportError:
raise StreamlitAPIException(
"""To use Custom Components in Streamlit, you need to install
PyArrow. To do so locally:
`pip install pyarrow`
And if you're using Streamlit Cloud, add "pyarrow" to your requirements.txt."""
)
# In addition to the custom kwargs passed to the component, we also
# send the special 'default' and 'key' params to the component
# frontend.
all_args = dict(kwargs, **{"default": default, "key": key})
json_args = {}
special_args = []
for arg_name, arg_val in all_args.items():
if type_util.is_bytes_like(arg_val):
bytes_arg = SpecialArg()
bytes_arg.key = arg_name
bytes_arg.bytes = to_bytes(arg_val)
special_args.append(bytes_arg)
elif type_util.is_dataframe_like(arg_val):
dataframe_arg = SpecialArg()
dataframe_arg.key = arg_name
component_arrow.marshall(dataframe_arg.arrow_dataframe.data, arg_val)
special_args.append(dataframe_arg)
else:
json_args[arg_name] = arg_val
try:
serialized_json_args = json.dumps(json_args)
except Exception as ex:
raise MarshallComponentException(
"Could not convert component args to JSON", ex
)
def marshall_component(
dg: DeltaGenerator, element: Element
) -> Any | type[NoValue]:
element.component_instance.component_name = self.name
element.component_instance.form_id = current_form_id(dg)
if self.url is not None:
element.component_instance.url = self.url
# Normally, a widget's element_hash (which determines
# its identity across multiple runs of an app) is computed
# by hashing its arguments. This means that, if any of the arguments
# to the widget are changed, Streamlit considers it a new widget
# instance and it loses its previous state.
#
# However! If a *component* has a `key` argument, then the
# component's hash identity is determined by entirely by
# `component_name + url + key`. This means that, when `key`
# exists, the component will maintain its identity even when its
# other arguments change, and the component's iframe won't be
# remounted on the frontend.
def marshall_element_args():
element.component_instance.json_args = serialized_json_args
element.component_instance.special_args.extend(special_args)
ctx = get_script_run_ctx()
if key is None:
marshall_element_args()
computed_id = compute_widget_id(
"component_instance",
user_key=key,
name=self.name,
form_id=current_form_id(dg),
url=self.url,
key=key,
json_args=serialized_json_args,
special_args=special_args,
page=ctx.page_script_hash if ctx else None,
)
else:
computed_id = compute_widget_id(
"component_instance",
user_key=key,
name=self.name,
form_id=current_form_id(dg),
url=self.url,
key=key,
page=ctx.page_script_hash if ctx else None,
)
element.component_instance.id = computed_id
def deserialize_component(ui_value, widget_id=""):
# ui_value is an object from json, an ArrowTable proto, or a bytearray
return ui_value
component_state = register_widget(
element_type="component_instance",
element_proto=element.component_instance,
user_key=key,
widget_func_name=self.name,
deserializer=deserialize_component,
serializer=lambda x: x,
ctx=ctx,
)
widget_value = component_state.value
if key is not None:
marshall_element_args()
if widget_value is None:
widget_value = default
elif isinstance(widget_value, ArrowTableProto):
widget_value = component_arrow.arrow_proto_to_dataframe(widget_value)
# widget_value will be either None or whatever the component's most
# recent setWidgetValue value is. We coerce None -> NoValue,
# because that's what DeltaGenerator._enqueue expects.
return widget_value if widget_value is not None else NoValue
# We currently only support writing to st._main, but this will change
# when we settle on an improved API in a post-layout world.
dg = _main
element = Element()
return_value = marshall_component(dg, element)
result = dg._enqueue(
"component_instance", element.component_instance, return_value
)
return result
def __eq__(self, other) -> bool:
"""Equality operator."""
return (
isinstance(other, CustomComponent)
and self.name == other.name
and self.path == other.path
and self.url == other.url
and self.module_name == other.module_name
)
def __ne__(self, other) -> bool:
"""Inequality operator."""
# we have to use "not X == Y"" here because if we use "X != Y" we call __ne__ again and end up in recursion
return not self == other
def __str__(self) -> str:
return f"'{self.name}': {self.path if self.path is not None else self.url}"
def get_instance() -> Runtime:
"""Return the singleton Runtime instance. Raise an Error if the
Runtime hasn't been created yet.
"""
return Runtime.instance()
The provided code snippet includes necessary dependencies for implementing the `declare_component` function. Write a Python function `def declare_component( name: str, path: str | None = None, url: str | None = None, ) -> CustomComponent` to solve the following problem:
Create and register a custom component. Parameters ---------- name: str A short, descriptive name for the component. Like, "slider". path: str or None The path to serve the component's frontend files from. Either `path` or `url` must be specified, but not both. url: str or None The URL that the component is served from. Either `path` or `url` must be specified, but not both. Returns ------- CustomComponent A CustomComponent that can be called like a function. Calling the component will create a new instance of the component in the Streamlit app.
Here is the function:
def declare_component(
name: str,
path: str | None = None,
url: str | None = None,
) -> CustomComponent:
"""Create and register a custom component.
Parameters
----------
name: str
A short, descriptive name for the component. Like, "slider".
path: str or None
The path to serve the component's frontend files from. Either
`path` or `url` must be specified, but not both.
url: str or None
The URL that the component is served from. Either `path` or `url`
must be specified, but not both.
Returns
-------
CustomComponent
A CustomComponent that can be called like a function.
Calling the component will create a new instance of the component
in the Streamlit app.
"""
# Get our stack frame.
current_frame: FrameType | None = inspect.currentframe()
assert current_frame is not None
# Get the stack frame of our calling function.
caller_frame = current_frame.f_back
assert caller_frame is not None
module_name = _get_module_name(caller_frame)
# Build the component name.
component_name = f"{module_name}.{name}"
# Create our component object, and register it.
component = CustomComponent(
name=component_name, path=path, url=url, module_name=module_name
)
get_instance().component_registry.register_component(component)
return component | Create and register a custom component. Parameters ---------- name: str A short, descriptive name for the component. Like, "slider". path: str or None The path to serve the component's frontend files from. Either `path` or `url` must be specified, but not both. url: str or None The URL that the component is served from. Either `path` or `url` must be specified, but not both. Returns ------- CustomComponent A CustomComponent that can be called like a function. Calling the component will create a new instance of the component in the Streamlit app. |
178,537 | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from streamlit import type_util
from streamlit.elements.lib import pandas_styler_utils
from streamlit.proto.Components_pb2 import ArrowTable as ArrowTableProto
def _marshall_index(proto: ArrowTableProto, index: Index) -> None:
"""Marshall pandas.DataFrame index into an ArrowTable proto.
Parameters
----------
proto : proto.ArrowTable
Output. The protobuf for a Streamlit ArrowTable proto.
index : pd.Index
Index to use for resulting frame.
Will default to RangeIndex (0, 1, 2, ..., n) if no index is provided.
"""
import pandas as pd
index = map(type_util.maybe_tuple_to_list, index.values)
index_df = pd.DataFrame(index)
proto.index = type_util.data_frame_to_bytes(index_df)
def _marshall_columns(proto: ArrowTableProto, columns: Series) -> None:
"""Marshall pandas.DataFrame columns into an ArrowTable proto.
Parameters
----------
proto : proto.ArrowTable
Output. The protobuf for a Streamlit ArrowTable proto.
columns : Series
Column labels to use for resulting frame.
Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
"""
import pandas as pd
columns = map(type_util.maybe_tuple_to_list, columns.values)
columns_df = pd.DataFrame(columns)
proto.columns = type_util.data_frame_to_bytes(columns_df)
def _marshall_data(proto: ArrowTableProto, df: DataFrame) -> None:
"""Marshall pandas.DataFrame data into an ArrowTable proto.
Parameters
----------
proto : proto.ArrowTable
Output. The protobuf for a Streamlit ArrowTable proto.
df : pandas.DataFrame
A dataframe to marshall.
"""
proto.data = type_util.data_frame_to_bytes(df)
The provided code snippet includes necessary dependencies for implementing the `marshall` function. Write a Python function `def marshall( proto: ArrowTableProto, data: Any, default_uuid: str | None = None ) -> None` to solve the following problem:
Marshall data into an ArrowTable proto. Parameters ---------- proto : proto.ArrowTable Output. The protobuf for a Streamlit ArrowTable proto. data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Something that is or can be converted to a dataframe.
Here is the function:
def marshall(
proto: ArrowTableProto, data: Any, default_uuid: str | None = None
) -> None:
"""Marshall data into an ArrowTable proto.
Parameters
----------
proto : proto.ArrowTable
Output. The protobuf for a Streamlit ArrowTable proto.
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None
Something that is or can be converted to a dataframe.
"""
if type_util.is_pandas_styler(data):
pandas_styler_utils.marshall_styler(proto, data, default_uuid) # type: ignore
df = type_util.convert_anything_to_df(data)
_marshall_index(proto, df.index)
_marshall_columns(proto, df.columns)
_marshall_data(proto, df) | Marshall data into an ArrowTable proto. Parameters ---------- proto : proto.ArrowTable Output. The protobuf for a Streamlit ArrowTable proto. data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Something that is or can be converted to a dataframe. |
178,538 | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from streamlit import type_util
from streamlit.elements.lib import pandas_styler_utils
from streamlit.proto.Components_pb2 import ArrowTable as ArrowTableProto
The provided code snippet includes necessary dependencies for implementing the `arrow_proto_to_dataframe` function. Write a Python function `def arrow_proto_to_dataframe(proto: ArrowTableProto) -> DataFrame` to solve the following problem:
Convert ArrowTable proto to pandas.DataFrame. Parameters ---------- proto : proto.ArrowTable Output. pandas.DataFrame
Here is the function:
def arrow_proto_to_dataframe(proto: ArrowTableProto) -> DataFrame:
"""Convert ArrowTable proto to pandas.DataFrame.
Parameters
----------
proto : proto.ArrowTable
Output. pandas.DataFrame
"""
if type_util.is_pyarrow_version_less_than("14.0.1"):
raise RuntimeError(
"The installed pyarrow version is not compatible with this component. "
"Please upgrade to 14.0.1 or higher: pip install -U pyarrow"
)
import pandas as pd
data = type_util.bytes_to_data_frame(proto.data)
index = type_util.bytes_to_data_frame(proto.index)
columns = type_util.bytes_to_data_frame(proto.columns)
return pd.DataFrame(
data.values, index=index.values.T.tolist(), columns=columns.values.T.tolist()
) | Convert ArrowTable proto to pandas.DataFrame. Parameters ---------- proto : proto.ArrowTable Output. pandas.DataFrame |
178,539 | from __future__ import annotations
import logging
import sys
from typing import Final
_loggers: dict[str, logging.Logger] = {}
_global_log_level = logging.INFO
def get_logger(name: str) -> logging.Logger:
"""Return a logger.
Parameters
----------
name : str
The name of the logger to use. You should just pass in __name__.
Returns
-------
Logger
"""
if name in _loggers.keys():
return _loggers[name]
if name == "root":
logger = logging.getLogger("streamlit")
else:
logger = logging.getLogger(name)
logger.setLevel(_global_log_level)
logger.propagate = False
setup_formatter(logger)
_loggers[name] = logger
return logger
The provided code snippet includes necessary dependencies for implementing the `set_log_level` function. Write a Python function `def set_log_level(level: str | int) -> None` to solve the following problem:
Set log level.
Here is the function:
def set_log_level(level: str | int) -> None:
"""Set log level."""
logger = get_logger(__name__)
if isinstance(level, str):
level = level.upper()
if level == "CRITICAL" or level == logging.CRITICAL:
log_level = logging.CRITICAL
elif level == "ERROR" or level == logging.ERROR:
log_level = logging.ERROR
elif level == "WARNING" or level == logging.WARNING:
log_level = logging.WARNING
elif level == "INFO" or level == logging.INFO:
log_level = logging.INFO
elif level == "DEBUG" or level == logging.DEBUG:
log_level = logging.DEBUG
else:
msg = 'undefined log level "%s"' % level
logger.critical(msg)
sys.exit(1)
for log in _loggers.values():
log.setLevel(log_level)
global _global_log_level
_global_log_level = log_level | Set log level. |
178,540 | from __future__ import annotations
import logging
import sys
from typing import Final
_loggers: dict[str, logging.Logger] = {}
def setup_formatter(logger: logging.Logger) -> None:
"""Set up the console formatter for a given logger."""
# Deregister any previous console loggers.
if hasattr(logger, "streamlit_console_handler"):
logger.removeHandler(logger.streamlit_console_handler)
logger.streamlit_console_handler = logging.StreamHandler() # type: ignore[attr-defined]
# Import here to avoid circular imports
from streamlit import config
if config._config_options:
# logger is required in ConfigOption.set_value
# Getting the config option before the config file has been parsed
# can create an infinite loop
message_format = config.get_option("logger.messageFormat")
else:
message_format = DEFAULT_LOG_MESSAGE
formatter = logging.Formatter(fmt=message_format)
formatter.default_msec_format = "%s.%03d"
logger.streamlit_console_handler.setFormatter(formatter) # type: ignore[attr-defined]
# Register the new console logger.
logger.addHandler(logger.streamlit_console_handler) # type: ignore[attr-defined]
def update_formatter() -> None:
for log in _loggers.values():
setup_formatter(log) | null |
178,541 | from __future__ import annotations
import logging
import sys
from typing import Final
def get_logger(name: str) -> logging.Logger:
"""Return a logger.
Parameters
----------
name : str
The name of the logger to use. You should just pass in __name__.
Returns
-------
Logger
"""
if name in _loggers.keys():
return _loggers[name]
if name == "root":
logger = logging.getLogger("streamlit")
else:
logger = logging.getLogger(name)
logger.setLevel(_global_log_level)
logger.propagate = False
setup_formatter(logger)
_loggers[name] = logger
return logger
The provided code snippet includes necessary dependencies for implementing the `init_tornado_logs` function. Write a Python function `def init_tornado_logs() -> None` to solve the following problem:
Set Tornado log levels. This function does not import any Tornado code, so it's safe to call even when Server is not running.
Here is the function:
def init_tornado_logs() -> None:
"""Set Tornado log levels.
This function does not import any Tornado code, so it's safe to call even
when Server is not running.
"""
# http://www.tornadoweb.org/en/stable/log.html
for log in ("access", "application", "general"):
# get_logger will set the log level for the logger with the given name.
get_logger(f"tornado.{log}") | Set Tornado log levels. This function does not import any Tornado code, so it's safe to call even when Server is not running. |
178,542 | from __future__ import annotations
import contextlib
import errno
import io
import os
from pathlib import Path
from streamlit import env_util, util
from streamlit.string_util import is_binary_string
def is_binary_string(inp: bytes) -> bool:
"""Guess if an input bytesarray can be encoded as a string."""
# From https://stackoverflow.com/a/7392391
return bool(inp.translate(None, TEXTCHARS))
The provided code snippet includes necessary dependencies for implementing the `get_encoded_file_data` function. Write a Python function `def get_encoded_file_data( data: bytes, encoding: str = "auto" ) -> io.StringIO | io.BytesIO` to solve the following problem:
Coerce bytes to a BytesIO or a StringIO. Parameters ---------- data : bytes encoding : str Returns ------- BytesIO or StringIO If the file's data is in a well-known textual format (or if the encoding parameter is set), return a StringIO. Otherwise, return BytesIO.
Here is the function:
def get_encoded_file_data(
data: bytes, encoding: str = "auto"
) -> io.StringIO | io.BytesIO:
"""Coerce bytes to a BytesIO or a StringIO.
Parameters
----------
data : bytes
encoding : str
Returns
-------
BytesIO or StringIO
If the file's data is in a well-known textual format (or if the encoding
parameter is set), return a StringIO. Otherwise, return BytesIO.
"""
if encoding == "auto":
# If the file does not look like a pure binary file, assume
# it's utf-8. It would be great if we could guess it a little
# more smartly here, but it is what it is!
data_encoding = None if is_binary_string(data) else "utf-8"
else:
data_encoding = encoding
if data_encoding:
return io.StringIO(data.decode(data_encoding))
return io.BytesIO(data) | Coerce bytes to a BytesIO or a StringIO. Parameters ---------- data : bytes encoding : str Returns ------- BytesIO or StringIO If the file's data is in a well-known textual format (or if the encoding parameter is set), return a StringIO. Otherwise, return BytesIO. |
178,543 | from __future__ import annotations
import contextlib
import errno
import io
import os
from pathlib import Path
from streamlit import env_util, util
from streamlit.string_util import is_binary_string
The provided code snippet includes necessary dependencies for implementing the `get_static_dir` function. Write a Python function `def get_static_dir() -> str` to solve the following problem:
Get the folder where static HTML/JS/CSS files live.
Here is the function:
def get_static_dir() -> str:
"""Get the folder where static HTML/JS/CSS files live."""
dirname = os.path.dirname(os.path.normpath(__file__))
return os.path.normpath(os.path.join(dirname, "static")) | Get the folder where static HTML/JS/CSS files live. |
178,544 | from __future__ import annotations
import contextlib
import errno
import io
import os
from pathlib import Path
from streamlit import env_util, util
from streamlit.string_util import is_binary_string
CONFIG_FOLDER_NAME = ".streamlit"
The provided code snippet includes necessary dependencies for implementing the `get_project_streamlit_file_path` function. Write a Python function `def get_project_streamlit_file_path(*filepath)` to solve the following problem:
Return the full path to a filepath in ${CWD}/.streamlit. This doesn't guarantee that the file (or its directory) exists.
Here is the function:
def get_project_streamlit_file_path(*filepath):
"""Return the full path to a filepath in ${CWD}/.streamlit.
This doesn't guarantee that the file (or its directory) exists.
"""
return os.path.join(os.getcwd(), CONFIG_FOLDER_NAME, *filepath) | Return the full path to a filepath in ${CWD}/.streamlit. This doesn't guarantee that the file (or its directory) exists. |
178,545 | from __future__ import annotations
import contextlib
import errno
import io
import os
from pathlib import Path
from streamlit import env_util, util
from streamlit.string_util import is_binary_string
def file_is_in_folder_glob(filepath: str, folderpath_glob: str) -> bool:
"""Test whether a file is in some folder with globbing support.
Parameters
----------
filepath : str
A file path.
folderpath_glob: str
A path to a folder that may include globbing.
"""
# Make the glob always end with "/*" so we match files inside subfolders of
# folderpath_glob.
if not folderpath_glob.endswith("*"):
if folderpath_glob.endswith("/"):
folderpath_glob += "*"
else:
folderpath_glob += "/*"
import fnmatch
file_dir = os.path.dirname(filepath) + "/"
return fnmatch.fnmatch(file_dir, folderpath_glob)
The provided code snippet includes necessary dependencies for implementing the `file_in_pythonpath` function. Write a Python function `def file_in_pythonpath(filepath: str) -> bool` to solve the following problem:
Test whether a filepath is in the same folder of a path specified in the PYTHONPATH env variable. Parameters ---------- filepath : str An absolute file path. Returns ------- boolean True if contained in PYTHONPATH, False otherwise. False if PYTHONPATH is not defined or empty.
Here is the function:
def file_in_pythonpath(filepath: str) -> bool:
"""Test whether a filepath is in the same folder of a path specified in the PYTHONPATH env variable.
Parameters
----------
filepath : str
An absolute file path.
Returns
-------
boolean
True if contained in PYTHONPATH, False otherwise. False if PYTHONPATH is not defined or empty.
"""
pythonpath = os.environ.get("PYTHONPATH", "")
if len(pythonpath) == 0:
return False
absolute_paths = [os.path.abspath(path) for path in pythonpath.split(os.pathsep)]
return any(
file_is_in_folder_glob(os.path.normpath(filepath), path)
for path in absolute_paths
) | Test whether a filepath is in the same folder of a path specified in the PYTHONPATH env variable. Parameters ---------- filepath : str An absolute file path. Returns ------- boolean True if contained in PYTHONPATH, False otherwise. False if PYTHONPATH is not defined or empty. |
178,546 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
ArrayValueFieldName: TypeAlias = Literal[
"double_array_value",
"int_array_value",
"string_array_value",
]
ARRAY_VALUE_FIELD_NAMES: Final = frozenset(
cast(
"tuple[ArrayValueFieldName, ...]",
# NOTE: get_args is not recursive, so this only works as long as
# ArrayValueFieldName remains flat.
get_args(ArrayValueFieldName),
)
)
def is_array_value_field_name(obj: object) -> TypeGuard[ArrayValueFieldName]:
return obj in ARRAY_VALUE_FIELD_NAMES | null |
178,547 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
_BYTES_LIKE_TYPES: Final[tuple[type, ...]] = (
bytes,
bytearray,
)
BytesLike: TypeAlias = Union[bytes, bytearray]
The provided code snippet includes necessary dependencies for implementing the `is_bytes_like` function. Write a Python function `def is_bytes_like(obj: object) -> TypeGuard[BytesLike]` to solve the following problem:
True if the type is considered bytes-like for the purposes of protobuf data marshalling.
Here is the function:
def is_bytes_like(obj: object) -> TypeGuard[BytesLike]:
"""True if the type is considered bytes-like for the purposes of
protobuf data marshalling.
"""
return isinstance(obj, _BYTES_LIKE_TYPES) | True if the type is considered bytes-like for the purposes of protobuf data marshalling. |
178,548 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
_SYMPY_RE: Final = re.compile(r"^sympy.*$")
The provided code snippet includes necessary dependencies for implementing the `is_sympy_expession` function. Write a Python function `def is_sympy_expession(obj: object) -> TypeGuard[sympy.Expr]` to solve the following problem:
True if input is a SymPy expression.
Here is the function:
def is_sympy_expession(obj: object) -> TypeGuard[sympy.Expr]:
"""True if input is a SymPy expression."""
if not is_type(obj, _SYMPY_RE):
return False
try:
import sympy
return isinstance(obj, sympy.Expr)
except ImportError:
return False | True if input is a SymPy expression. |
178,549 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
_ALTAIR_RE: Final = re.compile(r"^altair\.vegalite\.v\d+\.api\.\w*Chart$")
The provided code snippet includes necessary dependencies for implementing the `is_altair_chart` function. Write a Python function `def is_altair_chart(obj: object) -> bool` to solve the following problem:
True if input looks like an Altair chart.
Here is the function:
def is_altair_chart(obj: object) -> bool:
"""True if input looks like an Altair chart."""
return is_type(obj, _ALTAIR_RE) | True if input looks like an Altair chart. |
178,550 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
_PILLOW_RE: Final = re.compile(r"^PIL\..*")
The provided code snippet includes necessary dependencies for implementing the `is_pillow_image` function. Write a Python function `def is_pillow_image(obj: object) -> bool` to solve the following problem:
True if input looks like a pillow image.
Here is the function:
def is_pillow_image(obj: object) -> bool:
"""True if input looks like a pillow image."""
return is_type(obj, _PILLOW_RE) | True if input looks like a pillow image. |
178,551 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
The provided code snippet includes necessary dependencies for implementing the `is_keras_model` function. Write a Python function `def is_keras_model(obj: object) -> bool` to solve the following problem:
True if input looks like a Keras model.
Here is the function:
def is_keras_model(obj: object) -> bool:
"""True if input looks like a Keras model."""
return (
is_type(obj, "keras.engine.sequential.Sequential")
or is_type(obj, "keras.engine.training.Model")
or is_type(obj, "tensorflow.python.keras.engine.sequential.Sequential")
or is_type(obj, "tensorflow.python.keras.engine.training.Model")
) | True if input looks like a Keras model. |
178,552 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
_OPENAI_CHUNK_RE: Final = re.compile(r"^openai\..+\.ChatCompletionChunk$")
The provided code snippet includes necessary dependencies for implementing the `is_openai_chunk` function. Write a Python function `def is_openai_chunk(obj: object) -> bool` to solve the following problem:
True if input looks like an OpenAI chat completion chunk.
Here is the function:
def is_openai_chunk(obj: object) -> bool:
"""True if input looks like an OpenAI chat completion chunk."""
return is_type(obj, _OPENAI_CHUNK_RE) | True if input looks like an OpenAI chat completion chunk. |
178,553 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
def _is_list_of_plotly_objs(obj: object) -> TypeGuard[list[Any]]:
if not isinstance(obj, list):
return False
if len(obj) == 0:
return False
return all(_is_plotly_obj(item) for item in obj)
def _is_probably_plotly_dict(obj: object) -> TypeGuard[dict[str, Any]]:
if not isinstance(obj, dict):
return False
if len(obj.keys()) == 0:
return False
if any(k not in ["config", "data", "frames", "layout"] for k in obj.keys()):
return False
if any(_is_plotly_obj(v) for v in obj.values()):
return True
if any(_is_list_of_plotly_objs(v) for v in obj.values()):
return True
return False
The provided code snippet includes necessary dependencies for implementing the `is_plotly_chart` function. Write a Python function `def is_plotly_chart(obj: object) -> TypeGuard[Figure | list[Any] | dict[str, Any]]` to solve the following problem:
True if input looks like a Plotly chart.
Here is the function:
def is_plotly_chart(obj: object) -> TypeGuard[Figure | list[Any] | dict[str, Any]]:
"""True if input looks like a Plotly chart."""
return (
is_type(obj, "plotly.graph_objs._figure.Figure")
or _is_list_of_plotly_objs(obj)
or _is_probably_plotly_dict(obj)
) | True if input looks like a Plotly chart. |
178,554 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
The provided code snippet includes necessary dependencies for implementing the `is_function` function. Write a Python function `def is_function(x: object) -> TypeGuard[types.FunctionType]` to solve the following problem:
Return True if x is a function.
Here is the function:
def is_function(x: object) -> TypeGuard[types.FunctionType]:
"""Return True if x is a function."""
return isinstance(x, types.FunctionType) | Return True if x is a function. |
178,555 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def is_namedtuple(x: object) -> TypeGuard[NamedTuple]:
t = type(x)
b = t.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(t, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n).__name__ == "str" for n in f) | null |
178,556 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]:
...
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, 'matplotlib.figure.Figure')
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
else:
return fqn_type_pattern.match(fqn_type) is not None
The provided code snippet includes necessary dependencies for implementing the `is_pydeck` function. Write a Python function `def is_pydeck(obj: object) -> TypeGuard[Deck]` to solve the following problem:
True if input looks like a pydeck chart.
Here is the function:
def is_pydeck(obj: object) -> TypeGuard[Deck]:
"""True if input looks like a pydeck chart."""
return is_type(obj, "pydeck.bindings.deck.Deck") | True if input looks like a pydeck chart. |
178,557 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
V_co = TypeVar(
"V_co",
covariant=True, # https://peps.python.org/pep-0484/#covariance-and-contravariance
)
OptionSequence: TypeAlias = Union[
Iterable[V_co],
DataFrameGenericAlias[V_co],
]
def ensure_iterable(obj: Iterable[V_co]) -> Iterable[V_co]:
...
def ensure_iterable(obj: OptionSequence[V_co]) -> Iterable[Any]:
...
def ensure_iterable(obj: OptionSequence[V_co] | Iterable[V_co]) -> Iterable[Any]:
"""Try to convert different formats to something iterable. Most inputs
are assumed to be iterable, but if we have a DataFrame, we can just
select the first column to iterate over. If the input is not iterable,
a TypeError is raised.
Parameters
----------
obj : list, tuple, numpy.ndarray, pandas.Series, pandas.DataFrame, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame or snowflake.snowpark.table.Table
Returns
-------
iterable
"""
if is_snowpark_or_pyspark_data_object(obj):
obj = convert_anything_to_df(obj)
if is_dataframe(obj):
# Return first column as a pd.Series
# The type of the elements in this column is not known up front, hence
# the Iterable[Any] return type.
return cast(Iterable[Any], obj.iloc[:, 0])
if is_iterable(obj):
return obj
raise TypeError(
f"Object is not an iterable and could not be converted to one. Object: {obj}"
)
The provided code snippet includes necessary dependencies for implementing the `ensure_indexable` function. Write a Python function `def ensure_indexable(obj: OptionSequence[V_co]) -> Sequence[V_co]` to solve the following problem:
Try to ensure a value is an indexable Sequence. If the collection already is one, it has the index method that we need. Otherwise, convert it to a list.
Here is the function:
def ensure_indexable(obj: OptionSequence[V_co]) -> Sequence[V_co]:
"""Try to ensure a value is an indexable Sequence. If the collection already
is one, it has the index method that we need. Otherwise, convert it to a list.
"""
it = ensure_iterable(obj)
# This is an imperfect check because there is no guarantee that an `index`
# function actually does the thing we want.
index_fn = getattr(it, "index", None)
if callable(index_fn):
# We return a shallow copy of the Sequence here because the return value of
# this function is saved in a widget serde class instance to be used in later
# script runs, and we don't want mutations to the options object passed to a
# widget affect the widget.
# (See https://github.com/streamlit/streamlit/issues/7534)
return copy.copy(cast(Sequence[V_co], it))
else:
return list(it) | Try to ensure a value is an indexable Sequence. If the collection already is one, it has the index method that we need. Otherwise, convert it to a list. |
178,558 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
The provided code snippet includes necessary dependencies for implementing the `check_python_comparable` function. Write a Python function `def check_python_comparable(seq: Sequence[Any]) -> None` to solve the following problem:
Check if the sequence elements support "python comparison". That means that the equality operator (==) returns a boolean value. Which is not True for e.g. numpy arrays and pandas series.
Here is the function:
def check_python_comparable(seq: Sequence[Any]) -> None:
"""Check if the sequence elements support "python comparison".
That means that the equality operator (==) returns a boolean value.
Which is not True for e.g. numpy arrays and pandas series."""
try:
bool(seq[0] == seq[0])
except LookupError:
# In case of empty sequences, the check not raise an exception.
pass
except ValueError:
raise StreamlitAPIException(
"Invalid option type provided. Options must be comparable, returning a "
f"boolean when used with *==*. \n\nGot **{type(seq[0]).__name__}**, "
"which cannot be compared. Refactor your code to use elements of "
"comparable types as options, e.g. use indices instead."
) | Check if the sequence elements support "python comparison". That means that the equality operator (==) returns a boolean value. Which is not True for e.g. numpy arrays and pandas series. |
178,559 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
The provided code snippet includes necessary dependencies for implementing the `is_pandas_version_less_than` function. Write a Python function `def is_pandas_version_less_than(v: str) -> bool` to solve the following problem:
Return True if the current Pandas version is less than the input version. Parameters ---------- v : str Version string, e.g. "0.25.0" Returns ------- bool
Here is the function:
def is_pandas_version_less_than(v: str) -> bool:
"""Return True if the current Pandas version is less than the input version.
Parameters
----------
v : str
Version string, e.g. "0.25.0"
Returns
-------
bool
"""
import pandas as pd
from packaging import version
return version.parse(pd.__version__) < version.parse(v) | Return True if the current Pandas version is less than the input version. Parameters ---------- v : str Version string, e.g. "0.25.0" Returns ------- bool |
178,560 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
class DataFormat(Enum):
"""DataFormat is used to determine the format of the data."""
UNKNOWN = auto()
EMPTY = auto() # None
PANDAS_DATAFRAME = auto() # pd.DataFrame
PANDAS_SERIES = auto() # pd.Series
PANDAS_INDEX = auto() # pd.Index
NUMPY_LIST = auto() # np.array[Scalar]
NUMPY_MATRIX = auto() # np.array[List[Scalar]]
PYARROW_TABLE = auto() # pyarrow.Table
SNOWPARK_OBJECT = auto() # Snowpark DataFrame, Table, List[Row]
PYSPARK_OBJECT = auto() # pyspark.DataFrame
PANDAS_STYLER = auto() # pandas Styler
LIST_OF_RECORDS = auto() # List[Dict[str, Scalar]]
LIST_OF_ROWS = auto() # List[List[Scalar]]
LIST_OF_VALUES = auto() # List[Scalar]
TUPLE_OF_VALUES = auto() # Tuple[Scalar]
SET_OF_VALUES = auto() # Set[Scalar]
COLUMN_INDEX_MAPPING = auto() # {column: {index: value}}
COLUMN_VALUE_MAPPING = auto() # {column: List[values]}
COLUMN_SERIES_MAPPING = auto() # {column: Series(values)}
KEY_VALUE_DICT = auto() # {index: value}
def is_snowpark_data_object(obj: object) -> bool:
"""True if obj is of type snowflake.snowpark.dataframe.DataFrame, snowflake.snowpark.table.Table or
True when obj is a list which contains snowflake.snowpark.row.Row,
False otherwise.
"""
if is_type(obj, _SNOWPARK_TABLE_TYPE_STR):
return True
if is_type(obj, _SNOWPARK_DF_TYPE_STR):
return True
if not isinstance(obj, list):
return False
if len(obj) < 1:
return False
if not hasattr(obj[0], "__class__"):
return False
return is_type(obj[0], _SNOWPARK_DF_ROW_TYPE_STR)
def is_pyspark_data_object(obj: object) -> bool:
"""True if obj is of type pyspark.sql.dataframe.DataFrame"""
return (
is_type(obj, _PYSPARK_DF_TYPE_STR)
and hasattr(obj, "toPandas")
and callable(getattr(obj, "toPandas"))
)
def is_list_of_scalars(data: Iterable[Any]) -> bool:
"""Check if the list only contains scalar values."""
from pandas.api.types import infer_dtype
# Overview on all value that are interpreted as scalar:
# https://pandas.pydata.org/docs/reference/api/pandas.api.types.is_scalar.html
return infer_dtype(data, skipna=True) not in ["mixed", "unknown-array"]
def is_pandas_styler(obj: object) -> TypeGuard[Styler]:
return is_type(obj, _PANDAS_STYLER_TYPE_STR)
The provided code snippet includes necessary dependencies for implementing the `determine_data_format` function. Write a Python function `def determine_data_format(input_data: Any) -> DataFormat` to solve the following problem:
Determine the data format of the input data. Parameters ---------- input_data : Any The input data to determine the data format of. Returns ------- DataFormat The data format of the input data.
Here is the function:
def determine_data_format(input_data: Any) -> DataFormat:
"""Determine the data format of the input data.
Parameters
----------
input_data : Any
The input data to determine the data format of.
Returns
-------
DataFormat
The data format of the input data.
"""
import numpy as np
import pandas as pd
import pyarrow as pa
if input_data is None:
return DataFormat.EMPTY
elif isinstance(input_data, pd.DataFrame):
return DataFormat.PANDAS_DATAFRAME
elif isinstance(input_data, np.ndarray):
if len(input_data.shape) == 1:
# For technical reasons, we need to distinguish one
# one-dimensional numpy array from multidimensional ones.
return DataFormat.NUMPY_LIST
return DataFormat.NUMPY_MATRIX
elif isinstance(input_data, pa.Table):
return DataFormat.PYARROW_TABLE
elif isinstance(input_data, pd.Series):
return DataFormat.PANDAS_SERIES
elif isinstance(input_data, pd.Index):
return DataFormat.PANDAS_INDEX
elif is_pandas_styler(input_data):
return DataFormat.PANDAS_STYLER
elif is_snowpark_data_object(input_data):
return DataFormat.SNOWPARK_OBJECT
elif is_pyspark_data_object(input_data):
return DataFormat.PYSPARK_OBJECT
elif isinstance(input_data, (list, tuple, set)):
if is_list_of_scalars(input_data):
# -> one-dimensional data structure
if isinstance(input_data, tuple):
return DataFormat.TUPLE_OF_VALUES
if isinstance(input_data, set):
return DataFormat.SET_OF_VALUES
return DataFormat.LIST_OF_VALUES
else:
# -> Multi-dimensional data structure
# This should always contain at least one element,
# otherwise the values type from infer_dtype would have been empty
first_element = next(iter(input_data))
if isinstance(first_element, dict):
return DataFormat.LIST_OF_RECORDS
if isinstance(first_element, (list, tuple, set)):
return DataFormat.LIST_OF_ROWS
elif isinstance(input_data, dict):
if not input_data:
return DataFormat.KEY_VALUE_DICT
if len(input_data) > 0:
first_value = next(iter(input_data.values()))
if isinstance(first_value, dict):
return DataFormat.COLUMN_INDEX_MAPPING
if isinstance(first_value, (list, tuple)):
return DataFormat.COLUMN_VALUE_MAPPING
if isinstance(first_value, pd.Series):
return DataFormat.COLUMN_SERIES_MAPPING
# In the future, we could potentially also support the tight & split formats here
if is_list_of_scalars(input_data.values()):
# Only use the key-value dict format if the values are only scalar values
return DataFormat.KEY_VALUE_DICT
return DataFormat.UNKNOWN | Determine the data format of the input data. Parameters ---------- input_data : Any The input data to determine the data format of. Returns ------- DataFormat The data format of the input data. |
178,561 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
class DataFormat(Enum):
"""DataFormat is used to determine the format of the data."""
UNKNOWN = auto()
EMPTY = auto() # None
PANDAS_DATAFRAME = auto() # pd.DataFrame
PANDAS_SERIES = auto() # pd.Series
PANDAS_INDEX = auto() # pd.Index
NUMPY_LIST = auto() # np.array[Scalar]
NUMPY_MATRIX = auto() # np.array[List[Scalar]]
PYARROW_TABLE = auto() # pyarrow.Table
SNOWPARK_OBJECT = auto() # Snowpark DataFrame, Table, List[Row]
PYSPARK_OBJECT = auto() # pyspark.DataFrame
PANDAS_STYLER = auto() # pandas Styler
LIST_OF_RECORDS = auto() # List[Dict[str, Scalar]]
LIST_OF_ROWS = auto() # List[List[Scalar]]
LIST_OF_VALUES = auto() # List[Scalar]
TUPLE_OF_VALUES = auto() # Tuple[Scalar]
SET_OF_VALUES = auto() # Set[Scalar]
COLUMN_INDEX_MAPPING = auto() # {column: {index: value}}
COLUMN_VALUE_MAPPING = auto() # {column: List[values]}
COLUMN_SERIES_MAPPING = auto() # {column: Series(values)}
KEY_VALUE_DICT = auto() # {index: value}
def _unify_missing_values(df: DataFrame) -> DataFrame:
"""Unify all missing values in a DataFrame to None.
Pandas uses a variety of values to represent missing values, including np.nan,
NaT, None, and pd.NA. This function replaces all of these values with None,
which is the only missing value type that is supported by all data
"""
import numpy as np
return df.fillna(np.nan).replace([np.nan], [None])
The provided code snippet includes necessary dependencies for implementing the `convert_df_to_data_format` function. Write a Python function `def convert_df_to_data_format( df: DataFrame, data_format: DataFormat ) -> ( DataFrame | Series[Any] | pa.Table | np.ndarray[Any, np.dtype[Any]] | tuple[Any] | list[Any] | set[Any] | dict[str, Any] )` to solve the following problem:
Convert a dataframe to the specified data format. Parameters ---------- df : pd.DataFrame The dataframe to convert. data_format : DataFormat The data format to convert to. Returns ------- pd.DataFrame, pd.Series, pyarrow.Table, np.ndarray, list, set, tuple, or dict. The converted dataframe.
Here is the function:
def convert_df_to_data_format(
df: DataFrame, data_format: DataFormat
) -> (
DataFrame
| Series[Any]
| pa.Table
| np.ndarray[Any, np.dtype[Any]]
| tuple[Any]
| list[Any]
| set[Any]
| dict[str, Any]
):
"""Convert a dataframe to the specified data format.
Parameters
----------
df : pd.DataFrame
The dataframe to convert.
data_format : DataFormat
The data format to convert to.
Returns
-------
pd.DataFrame, pd.Series, pyarrow.Table, np.ndarray, list, set, tuple, or dict.
The converted dataframe.
"""
if data_format in [
DataFormat.EMPTY,
DataFormat.PANDAS_DATAFRAME,
DataFormat.SNOWPARK_OBJECT,
DataFormat.PYSPARK_OBJECT,
DataFormat.PANDAS_INDEX,
DataFormat.PANDAS_STYLER,
]:
return df
elif data_format == DataFormat.NUMPY_LIST:
import numpy as np
# It's a 1-dimensional array, so we only return
# the first column as numpy array
# Calling to_numpy() on the full DataFrame would result in:
# [[1], [2]] instead of [1, 2]
return np.ndarray(0) if df.empty else df.iloc[:, 0].to_numpy()
elif data_format == DataFormat.NUMPY_MATRIX:
import numpy as np
return np.ndarray(0) if df.empty else df.to_numpy()
elif data_format == DataFormat.PYARROW_TABLE:
import pyarrow as pa
return pa.Table.from_pandas(df)
elif data_format == DataFormat.PANDAS_SERIES:
# Select first column in dataframe and create a new series based on the values
if len(df.columns) != 1:
raise ValueError(
f"DataFrame is expected to have a single column but has {len(df.columns)}."
)
return df[df.columns[0]]
elif data_format == DataFormat.LIST_OF_RECORDS:
return _unify_missing_values(df).to_dict(orient="records")
elif data_format == DataFormat.LIST_OF_ROWS:
# to_numpy converts the dataframe to a list of rows
return _unify_missing_values(df).to_numpy().tolist()
elif data_format == DataFormat.COLUMN_INDEX_MAPPING:
return _unify_missing_values(df).to_dict(orient="dict")
elif data_format == DataFormat.COLUMN_VALUE_MAPPING:
return _unify_missing_values(df).to_dict(orient="list")
elif data_format == DataFormat.COLUMN_SERIES_MAPPING:
return df.to_dict(orient="series")
elif data_format in [
DataFormat.LIST_OF_VALUES,
DataFormat.TUPLE_OF_VALUES,
DataFormat.SET_OF_VALUES,
]:
df = _unify_missing_values(df)
return_list = []
if len(df.columns) == 1:
# Get the first column and convert to list
return_list = df[df.columns[0]].tolist()
elif len(df.columns) >= 1:
raise ValueError(
f"DataFrame is expected to have a single column but has {len(df.columns)}."
)
if data_format == DataFormat.TUPLE_OF_VALUES:
return tuple(return_list)
if data_format == DataFormat.SET_OF_VALUES:
return set(return_list)
return return_list
elif data_format == DataFormat.KEY_VALUE_DICT:
df = _unify_missing_values(df)
# The key is expected to be the index -> this will return the first column
# as a dict with index as key.
return dict() if df.empty else df.iloc[:, 0].to_dict()
raise ValueError(f"Unsupported input data format: {data_format}") | Convert a dataframe to the specified data format. Parameters ---------- df : pd.DataFrame The dataframe to convert. data_format : DataFormat The data format to convert to. Returns ------- pd.DataFrame, pd.Series, pyarrow.Table, np.ndarray, list, set, tuple, or dict. The converted dataframe. |
178,562 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
def to_key(key: None) -> None:
... | null |
178,563 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
Key: TypeAlias = Union[str, int]
def to_key(key: Key) -> str:
... | null |
178,564 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
Key: TypeAlias = Union[str, int]
def to_key(key: Key | None) -> str | None:
if key is None:
return None
else:
return str(key) | null |
178,565 | from __future__ import annotations
import contextlib
import copy
import math
import re
import types
from enum import Enum, EnumMeta, auto
from typing import (
TYPE_CHECKING,
Any,
Final,
Iterable,
Literal,
NamedTuple,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
cast,
get_args,
overload,
)
from typing_extensions import TypeAlias, TypeGuard
import streamlit as st
from streamlit import config, errors
from streamlit import logger as _logger
from streamlit import string_util
from streamlit.errors import StreamlitAPIException
_LOGGER = _logger.get_logger(__name__)
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
def __repr__(self) -> str:
return util.repr_(self)
def maybe_raise_label_warnings(label: str | None, label_visibility: str | None):
if not label:
_LOGGER.warning(
"`label` got an empty value. This is discouraged for accessibility "
"reasons and may be disallowed in the future by raising an exception. "
"Please provide a non-empty label and hide it with label_visibility "
"if needed."
)
if label_visibility not in ("visible", "hidden", "collapsed"):
raise errors.StreamlitAPIException(
f"Unsupported label_visibility option '{label_visibility}'. "
f"Valid values are 'visible', 'hidden' or 'collapsed'."
) | null |
178,566 | import streamlit as st
st.expander("Empty expander")
with st.expander("Expander with number input", expanded=True):
# We deliberately use a list to implement this for the screenshot
st.write("* Example list item")
value = st.number_input("number", value=1.0, key="number")
st.text(st.session_state.number)
if st.button("Print State Value"):
st.text(st.session_state.number)
def update_value():
st.session_state.number = 0 | null |
178,568 | import streamlit as st
from streamlit import runtime
st.write("value 1:", v1)
st.write("value 2:", v2)
st.write("value 3:", v3)
st.write("value 4:", v4)
st.write("value 5:", v5)
st.write("value 6:", v6)
st.write("value 7:", v7)
st.write("value 8:", v8)
st.write("value 10:", v10)
st.write("value 11:", v11)
def on_change():
st.session_state.text_area_changed = True
st.text("text area changed callback") | null |
178,569 | import pandas as pd
import streamlit as st
from streamlit import runtime
st.write("value 1:", v1)
st.write("value 2:", v2)
st.write("value 3:", v3)
st.write("value 4:", v4)
st.write("value 5:", v5)
st.write("value 6:", v6)
st.write("value 7:", v7)
st.write("value 8:", v8)
st.write("value 9:", v9)
st.write("value 10:", v10)
st.write("value 11:", v11)
st.write("value 13:", v13)
def on_change():
st.session_state.radio_changed = True
st.text("Radio widget callback triggered") | null |
178,570 | import random
import numpy as np
import pandas as pd
import streamlit as st
def highlight_first(value):
return "background-color: yellow" if value == 0 else "" | null |
178,571 | import random
import numpy as np
import pandas as pd
import streamlit as st
def style_negative(v, props=""):
return props if v < 0 else None | null |
178,572 | import random
import numpy as np
import pandas as pd
import streamlit as st
np.random.seed(0)
def highlight_max(s, props=""):
return np.where(s == np.nanmax(s.values), props, "") | null |
178,573 | import random
import numpy as np
import pandas as pd
import streamlit as st
def rain_condition(v):
if v < 1.75:
return "Dry"
elif v < 2.75:
return "Rain"
return "Heavy Rain"
def make_pretty(styler):
styler.set_caption("Weather Conditions")
styler.format(rain_condition)
styler.background_gradient(axis=None, vmin=1, vmax=5, cmap="YlGnBu")
return styler | null |
178,574 | import time
import numpy as np
import pandas as pd
import streamlit as st
np.random.seed(0)
_LOREM_IPSUM = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut
labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco
laboris nisi ut aliquip ex ea commodo consequat.
"""
def stream_example():
for word in _LOREM_IPSUM.split():
yield word + " "
time.sleep(0.02)
yield pd.DataFrame(
np.random.randn(5, 10),
columns=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
)
for word in "This is the end of the stream.".split():
yield word + " "
time.sleep(0.02) | null |
178,575 | import asyncio
import contextlib
def context_mgr():
try:
yield
finally:
pass | null |
178,576 | import asyncio
import contextlib
def func(value):
value | null |
178,577 | import asyncio
import contextlib
async def async_func(value):
value | null |
178,578 | import asyncio
import contextlib
async def async_for():
async def async_iter():
yield
async for _ in async_iter():
"ASYNC FOR" | null |
178,579 | import asyncio
import contextlib
async def async_with():
@contextlib.asynccontextmanager
async def async_context_mgr():
try:
yield
finally:
pass
async with async_context_mgr():
"ASYNC WITH" | null |
178,580 | import asyncio
import contextlib
The provided code snippet includes necessary dependencies for implementing the `docstrings` function. Write a Python function `def docstrings()` to solve the following problem:
Docstring. Should not be printed.
Here is the function:
def docstrings():
"""Docstring. Should not be printed."""
def nested():
"""Multiline docstring.
Should not be printed."""
pass
class Foo(object):
"""Class docstring. Should not be printed."""
pass
nested() | Docstring. Should not be printed. |
178,581 | import asyncio
import contextlib
The provided code snippet includes necessary dependencies for implementing the `my_func` function. Write a Python function `def my_func()` to solve the following problem:
my_func: this help block should be printed.
Here is the function:
def my_func():
"""my_func: this help block should be printed."""
pass | my_func: this help block should be printed. |
178,582 | import streamlit as st
from streamlit import runtime
st.write("toggle 1 - value:", i1)
st.write("toggle 2 - value:", i2)
st.write("toggle 3 - value:", i3)
st.write("toggle 5 - value:", i5)
st.write("toggle 6 - value:", i6)
st.write("toggle 7 - value:", i7)
st.write("toggle 8 - value:", i8)
def on_change():
st.session_state.toggle_clicked = True | null |
178,583 | import streamlit as st
from streamlit import runtime
st.write("checkbox 1 - value:", i1)
st.write("checkbox 2 - value:", i2)
st.write("checkbox 3 - value:", i3)
st.write("checkbox 5 - value:", i5)
st.write("checkbox 6 - value:", i6)
st.write("checkbox 7 - value:", i7)
st.write("checkbox 8 - value:", i8)
def on_change():
st.session_state.checkbox_clicked = True | null |
178,584 | import streamlit as st
from streamlit import runtime
st.write("number input 1 (default) - value: ", v1)
st.write("number input 2 (value=1) - value: ", v2)
st.write("number input 3 (min & max) - value: ", v3)
st.write("number input 4 (step=2) - value: ", v4)
st.write("number input 5 (max=10) - value: ", v5)
st.write("number input 6 (disabled=True) - value: ", v6)
st.write("number input 7 (label=hidden) - value: ", v7)
st.write("number input 8 (label=collapsed) - value: ", v8)
st.write("number input 11 (value=None) - value: ", v11)
if "number_input_12" not in st.session_state:
st.session_state["number_input_12"] = 10
st.write("number input 12 (value from state & min=1) - value: ", v12)
def on_change():
st.session_state.number_input_changed = True | null |
178,585 | import io
from collections import namedtuple
from dataclasses import dataclass
from datetime import datetime
import altair as alt
import graphviz
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.express as px
import pydeck as pdk
from PIL import Image
import streamlit as st
def stream_text():
yield "This is "
yield "streamed text" | null |
178,586 | from datetime import date, datetime
import streamlit as st
from streamlit import runtime
st.write("Value 1:", v1)
st.write("Value 2:", v2)
st.write("Value 3:", v3)
st.write("Value 4:", v4)
st.write("Value 5:", v5)
st.write("Value 6:", v6)
st.write("Value 7:", v7)
st.write("Value 8:", v8)
st.write("Value 9:", v9)
st.write("Value 10:", v10)
st.write("Value 11:", v11)
st.write("Value 13:", v13)
if "date_input_14" not in st.session_state:
st.session_state["date_input_14"] = date(1970, 2, 3)
st.write("Value 14:", v14)
def on_change():
st.session_state.date_input_changed = True
st.text("Date input changed callback") | null |
178,587 | import pandas as pd
import streamlit as st
from streamlit import runtime
st.write("value 1:", v1)
st.write("value 2:", v2)
st.write("value 3:", v3)
st.write("value 4:", v4)
st.write("value 5:", v5)
st.write("value 6:", v6)
st.write("value 7:", v7)
st.write("value 9:", v9)
st.write("value 10:", v10)
st.write("value 11:", v11)
if "selectbox_12" not in st.session_state:
st.session_state["selectbox_12"] = "female"
st.write("value 12:", v12)
def on_change():
st.session_state.selectbox_changed = True
st.text("Selectbox widget callback triggered") | null |
178,588 | import streamlit as st
from streamlit import runtime
st.write("value 1:", v1)
st.write("value 2:", v2)
st.write("value 3:", v3)
st.write("value 4:", v4)
st.write("value 5:", v5)
st.write("value 6:", v6)
st.write("value 7:", v7)
st.write("value 8:", v8)
st.write("value 10:", v10)
st.write("value 11:", v11)
def on_change():
st.session_state.text_input_changed = True
st.text("Text input changed callback") | null |
178,589 | import streamlit as st
from streamlit import runtime
st.write("Chat input 1 (inline) - value:", v1)
st.write("Chat input 2 (in column, disabled) - value:", v2)
st.write("Chat input 4 (bottom, max_chars) - value:", v4)
def on_submit():
st.text("chat input submitted") | null |
178,590 | from typing import Any, List
import streamlit as st
from streamlit import runtime
from tests.streamlit import pyspark_mocks
st.text(f"value 1: {i1}")
st.text(f"value 2: {i2}")
st.text(f"value 3: {i3}")
st.text(f"value 4: {i4}")
st.text(f"value 5: {i5}")
st.text(f"value 6: {i6}")
st.text(f"value 7: {i7}")
st.text(f"value 8: {i8}")
st.text(f"value 9: {i9}")
with st.form("my_max_selections_ms_in_form"):
i10 = st.multiselect(
"multiselect 10", options, max_selections=1, key="multiselect 10"
)
st.text(f"value 10: {i10}")
submitted = st.form_submit_button("Submit")
st.multiselect("PySpark DataFrame", options=pyspark_mocks.DataFrame())
def set_multiselect_9_to_have_bad_state():
if "multiselect 9" in st.session_state:
st.session_state["multiselect 9"] = ["male", "female"] | null |
178,591 | from typing import Any, List
import streamlit as st
from streamlit import runtime
from tests.streamlit import pyspark_mocks
st.text(f"value 1: {i1}")
st.text(f"value 2: {i2}")
st.text(f"value 3: {i3}")
st.text(f"value 4: {i4}")
st.text(f"value 5: {i5}")
st.text(f"value 6: {i6}")
st.text(f"value 7: {i7}")
st.text(f"value 8: {i8}")
st.text(f"value 9: {i9}")
with st.form("my_max_selections_ms_in_form"):
i10 = st.multiselect(
"multiselect 10", options, max_selections=1, key="multiselect 10"
)
st.text(f"value 10: {i10}")
submitted = st.form_submit_button("Submit")
st.multiselect("PySpark DataFrame", options=pyspark_mocks.DataFrame())
def on_change():
st.session_state.multiselect_changed = True | null |
178,592 | import streamlit as st
def on_click_4():
def on_click_5():
on_click_4() | null |
178,593 | import streamlit as st
st.subheader("Control Panel", divider="blue")
if "tabs" not in st.session_state:
st.session_state["tabs"] = ["Tab 1", "Tab 2"]
if "add_tab" not in st.session_state:
st.session_state["add_tab"] = False
if "remove_1" not in st.session_state:
st.session_state["remove_1"] = False
if "remove_2" not in st.session_state:
st.session_state["remove_2"] = False
if "change" not in st.session_state:
st.session_state["change"] = False
st.subheader("Tabs Example", divider="green")
def reset():
st.session_state.clear() | null |
178,594 | import streamlit as st
from streamlit import runtime
st.write("value 2:", i3)
st.write("value 3:", i4)
st.write("value 4:", i5)
st.button("button 5 (container_width)", use_container_width=True)
st.button(
"button 6 (container_width + help)", use_container_width=True, help="help text"
)
st.button("_button 7_ (**styled** :green[label])")
def on_click(x, y):
if "click_count" not in st.session_state:
st.session_state.click_count = 0
st.session_state.click_count += 1
st.session_state.x = x
st.session_state.y = y | null |
178,595 | from datetime import datetime, time
import streamlit as st
from streamlit import runtime
st.write("Value 1:", v1)
st.write("Value 2:", v2)
st.write("Value 3:", v3)
st.write("Value 4:", v4)
st.write("Value 5:", v5)
st.write("Value 7:", v7)
st.write("Value 8:", v8)
if "time_input_9" not in st.session_state:
st.session_state["time_input_9"] = time(8, 50)
st.write("Value 9:", v9)
def on_change():
st.session_state.time_input_changed = True
st.text("Time input callback triggered") | null |
178,597 | import fileinput
import os
import re
import sys
import packaging.version
import semver
The provided code snippet includes necessary dependencies for implementing the `verify_pep440` function. Write a Python function `def verify_pep440(version)` to solve the following problem:
Verify if version is PEP440 compliant. https://github.com/pypa/packaging/blob/16.7/packaging/version.py#L191 We might need pre, post, alpha, rc in the future so might as well use an object that does all that. This verifies its a valid version.
Here is the function:
def verify_pep440(version):
"""Verify if version is PEP440 compliant.
https://github.com/pypa/packaging/blob/16.7/packaging/version.py#L191
We might need pre, post, alpha, rc in the future so might as well
use an object that does all that. This verifies its a valid
version.
"""
try:
return packaging.version.Version(version)
except packaging.version.InvalidVersion as e:
raise (e) | Verify if version is PEP440 compliant. https://github.com/pypa/packaging/blob/16.7/packaging/version.py#L191 We might need pre, post, alpha, rc in the future so might as well use an object that does all that. This verifies its a valid version. |
178,598 | import fileinput
import os
import re
import sys
import packaging.version
import semver
The provided code snippet includes necessary dependencies for implementing the `verify_semver` function. Write a Python function `def verify_semver(version)` to solve the following problem:
Verify if version is compliant with semantic versioning. https://semver.org/
Here is the function:
def verify_semver(version):
"""Verify if version is compliant with semantic versioning.
https://semver.org/
"""
try:
return str(semver.VersionInfo.parse(version))
except ValueError as e:
raise (e) | Verify if version is compliant with semantic versioning. https://semver.org/ |
178,599 | import fileinput
import os
import re
import sys
import packaging.version
import semver
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
The provided code snippet includes necessary dependencies for implementing the `update_files` function. Write a Python function `def update_files(data, version)` to solve the following problem:
Update files with new version number.
Here is the function:
def update_files(data, version):
"""Update files with new version number."""
for filename, regex in data.items():
filename = os.path.join(BASE_DIR, filename)
matched = False
pattern = re.compile(regex)
for line in fileinput.input(filename, inplace=True):
if pattern.match(line.rstrip()):
matched = True
line = re.sub(regex, r"\g<pre>%s\g<post>" % version, line.rstrip())
print(line)
if not matched:
raise Exception('In file "%s", did not find regex "%s"' % (filename, regex)) | Update files with new version number. |
178,600 | from datetime import datetime
import packaging.version
import pytz
import streamlit.version
The provided code snippet includes necessary dependencies for implementing the `create_tag` function. Write a Python function `def create_tag()` to solve the following problem:
Create tag with updated version, a suffix and date.
Here is the function:
def create_tag():
"""Create tag with updated version, a suffix and date."""
# Get latest version
current_version = streamlit.version._get_latest_streamlit_version()
# Update micro
version_with_inc_micro = (
current_version.major,
current_version.minor,
current_version.micro + 1,
)
# Append todays date
version_with_date = (
".".join([str(x) for x in version_with_inc_micro])
+ ".dev"
+ datetime.now(pytz.timezone("US/Pacific")).strftime("%Y%m%d")
)
# Verify if version is PEP440 compliant.
packaging.version.Version(version_with_date)
return version_with_date | Create tag with updated version, a suffix and date. |
178,601 | import subprocess
import sys
import textwrap
from pathlib import Path
from typing import List, Tuple
def display_usage():
prog = Path(__file__).name
print(
textwrap.dedent(
f"""\
usage: {prog} [-h] SUBDIRECTORY ARGS [ARGS ...]
Runs the program in a subdirectory and fix paths in arguments.
example:
When this program is executed with the following command:
{prog} frontend/ yarn eslint frontend/src/index.ts
Then the command will be executed:
yarn eslint src/index.ts
and the current working directory will be set to frontend/
positional arguments:
SUBDIRECTORY subdirectory within which the subprocess will be executed
ARGS sequence of program arguments
optional arguments:
-h, --help show this help message and exit\
"""
)
)
def parse_args() -> Tuple[str, List[str]]:
if len(sys.argv) == 2 and sys.argv[1] in ("-h", "--help"):
display_usage()
sys.exit(0)
if len(sys.argv) < 3:
print("Missing arguments")
display_usage()
sys.exit(1)
print(sys.argv)
return sys.argv[1], sys.argv[2:] | null |
178,602 | import subprocess
import sys
import textwrap
from pathlib import Path
from typing import List, Tuple
def is_relative_to(path: Path, *other):
def fix_arg(subdirectory: str, arg: str) -> str:
arg_path = Path(arg)
if not (arg_path.exists() and is_relative_to(arg_path, subdirectory)):
return arg
return str(arg_path.relative_to(subdirectory)) | null |
178,603 | import subprocess
import sys
import textwrap
from pathlib import Path
from typing import List, Tuple
def try_as_shell(fixed_args: List[str], subdirectory: str):
# Windows doesn't know how to run "yarn" using the CreateProcess
# WINAPI because it's looking for an executable, and yarn is a node script.
# Yarn happens to be the only thing currently run with this patching script,
# so add a fall-back which tries to run the requested command in a shell
# if directly calling the process doesn't work.
import shlex
print("Direct call failed, trying as shell command:")
shell_cmd = shlex.join(fixed_args)
print(shell_cmd)
try:
subprocess.run(shell_cmd, cwd=subdirectory, check=True, shell=True)
except subprocess.CalledProcessError as ex:
sys.exit(ex.returncode) | null |
178,604 | import requests
def check_for_release_pr(pull):
label = pull["head"]["label"]
if label.find("release/") != -1:
return pull["head"]["ref"]
The provided code snippet includes necessary dependencies for implementing the `get_release_branch` function. Write a Python function `def get_release_branch()` to solve the following problem:
Retrieve the release branch from the release PR
Here is the function:
def get_release_branch():
"""Retrieve the release branch from the release PR"""
url = "https://api.github.com/repos/streamlit/streamlit/pulls"
response = requests.get(url).json()
# Response is in an array, must map over each pull (dict)
for pull in response:
ref = check_for_release_pr(pull)
if ref != None:
return ref | Retrieve the release branch from the release PR |
178,605 | import fileinput
import os
import re
import sys
from typing import Dict
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
The provided code snippet includes necessary dependencies for implementing the `update_files` function. Write a Python function `def update_files(project_name: str, files: Dict[str, str]) -> None` to solve the following problem:
Update files with new project name.
Here is the function:
def update_files(project_name: str, files: Dict[str, str]) -> None:
"""Update files with new project name."""
for filename, regex in files.items():
filename = os.path.join(BASE_DIR, filename)
matched = False
pattern = re.compile(regex)
for line in fileinput.input(filename, inplace=True):
line = line.rstrip()
if pattern.match(line):
line = re.sub(
regex, rf"\g<pre_match>{project_name}\g<post_match>", line
)
matched = True
print(line)
if not matched:
raise Exception(f'In file "{filename}", did not find regex "{regex}"') | Update files with new project name. |
178,606 | import os
import sys
import requests
The provided code snippet includes necessary dependencies for implementing the `send_notification` function. Write a Python function `def send_notification()` to solve the following problem:
Create a slack message
Here is the function:
def send_notification():
"""Create a slack message"""
webhook = os.getenv("SLACK_WEBHOOK")
if not webhook:
raise Exception("Unable to retrieve SLACK_WEBHOOK")
nightly_slack_messages = {
"tag": "to create a tag",
"python": "on python tests",
"js": "on javascript tests",
"py_prod": "on python prod dependencies test",
"cypress": "on cypress tests",
"playwright": "on playwright tests",
"build": "to release",
}
run_id = os.getenv("RUN_ID")
workflow = sys.argv[1]
message_key = sys.argv[2]
payload = None
if workflow == "nightly":
failure = nightly_slack_messages[message_key]
payload = {
"text": f":blobonfire: Nightly build failed {failure} - <https://github.com/streamlit/streamlit/actions/runs/{run_id}|Link to run>"
}
if workflow == "candidate":
if message_key == "success":
payload = {"text": ":rocket: Release Candidate was successful!"}
else:
payload = {
"text": f":blobonfire: Release Candidate failed - <https://github.com/streamlit/streamlit/actions/runs/{run_id}|Link to run>"
}
if workflow == "release":
if message_key == "success":
payload = {"text": ":rocket: Release was successful!"}
else:
payload = {
"text": f":blobonfire: Release failed - <https://github.com/streamlit/streamlit/actions/runs/{run_id}|Link to run>"
}
if payload:
response = requests.post(webhook, json=payload)
if response.status_code != 200:
raise Exception(
f"Unable to send slack message, HTTP response: {response.text}"
) | Create a slack message |
178,607 | import os
import click
auto_run = False
The provided code snippet includes necessary dependencies for implementing the `run_commands` function. Write a Python function `def run_commands(section_header, commands, skip_last_input=False, comment=None)` to solve the following problem:
Run a list of commands, displaying them within the given section.
Here is the function:
def run_commands(section_header, commands, skip_last_input=False, comment=None):
"""Run a list of commands, displaying them within the given section."""
global auto_run
for i, command in enumerate(commands):
# Display the status.
vars = {
"section_header": section_header,
"total": len(commands),
"command": command,
"v": i + 1,
}
click.secho(
"\nRunning %(section_header)s %(v)s/%(total)s : %(command)s" % vars,
bold=True,
)
click.secho("\n%(v)s/%(total)s : %(command)s" % vars, fg="yellow", bold=True)
if comment:
click.secho(comment)
# Run the command.
os.system(command)
last_command = i + 1 == len(commands)
if not (auto_run or (last_command and skip_last_input)):
click.secho(
"Press [enter] to continue or [a] to continue on auto:\n> ", nl=False
)
response = click.getchar()
if response == "a":
print("Turning on auto run.")
auto_run = True | Run a list of commands, displaying them within the given section. |
178,608 | import json
import subprocess
import sys
from pathlib import Path
from typing import NoReturn, Set, Tuple, cast
from typing_extensions import TypeAlias
PackageInfo: TypeAlias = Tuple[str, str, str, str, str, str]
ACCEPTABLE_LICENSES = {
"MIT", # https://opensource.org/licenses/MIT
"Apache-2.0", # https://opensource.org/licenses/Apache-2.0
"Apache-2.0 WITH LLVM-exception", # https://spdx.org/licenses/LLVM-exception.html
"0BSD", # https://opensource.org/licenses/0BSD
"BSD-2-Clause", # https://opensource.org/licenses/BSD-2-Clause
"BSD-3-Clause", # https://opensource.org/licenses/BSD-3-Clause
"ISC", # https://opensource.org/licenses/ISC
"CC0-1.0", # https://creativecommons.org/publicdomain/zero/1.0/
"CC-BY-3.0", # https://creativecommons.org/licenses/by/3.0/
"CC-BY-4.0", # https://creativecommons.org/licenses/by/4.0/
"Python-2.0", # https://www.python.org/download/releases/2.0/license/
"Zlib", # https://opensource.org/licenses/Zlib
"Unlicense", # https://unlicense.org/
"WTFPL", # http://www.wtfpl.net/about/
# Dual-licenses are acceptable if at least one of the two licenses is
# acceptable.
"(MIT OR Apache-2.0)",
"(MPL-2.0 OR Apache-2.0)",
"(MIT OR CC0-1.0)",
"(Apache-2.0 OR MPL-1.1)",
"(BSD-3-Clause OR GPL-2.0)",
"(MIT AND BSD-3-Clause)",
"(MIT AND Zlib)",
"(WTFPL OR MIT)",
"(AFL-2.1 OR BSD-3-Clause)",
}
PACKAGE_EXCEPTIONS: Set[PackageInfo] = {
(
# MIT license: https://github.com/mapbox/jsonlint
"@mapbox/jsonlint-lines-primitives",
"2.0.2",
"UNKNOWN",
"git://github.com/mapbox/jsonlint.git",
"http://zaa.ch",
"Zach Carter",
),
(
# Apache 2.0 license: https://github.com/google/flatbuffers
"flatbuffers",
"23.5.26",
"SEE LICENSE IN LICENSE",
"git+https://github.com/google/flatbuffers.git",
"https://google.github.io/flatbuffers/",
"The FlatBuffers project",
),
(
# Mapbox Web SDK license: https://github.com/mapbox/mapbox-gl-js/blob/main/LICENSE.txt
"@plotly/mapbox-gl",
"1.13.4",
"SEE LICENSE IN LICENSE.txt",
"git://github.com/plotly/mapbox-gl-js.git",
"Unknown",
"Unknown",
),
(
# Mapbox Web SDK license: https://github.com/mapbox/mapbox-gl-js/blob/main/LICENSE.txt
"mapbox-gl",
"1.13.3",
"SEE LICENSE IN LICENSE.txt",
"git://github.com/mapbox/mapbox-gl-js.git",
"Unknown",
"Unknown",
),
(
# CC-BY-3.0 license: https://github.com/cartodb/cartocolor#licensing
"cartocolor",
"4.0.2",
"UNKNOWN",
"https://github.com/cartodb/cartocolor",
"http://carto.com/",
"Unknown",
),
(
# Apache-2.0 license: https://github.com/saikocat/colorbrewer/blob/master/LICENSE.txt
"colorbrewer",
"1.0.0",
"Apache*",
"https://github.com/saikocat/colorbrewer",
"http://colorbrewer2.org/",
"Cynthia Brewer",
),
}
def get_license_type(package: PackageInfo) -> str:
"""Return the license type string for a dependency entry."""
return package[2]
def check_licenses(licenses) -> NoReturn:
# `yarn licenses` outputs a bunch of lines.
# The last line contains the JSON object we care about
licenses_json = json.loads(licenses[len(licenses) - 1])
assert licenses_json["type"] == "table"
# Pull out the list of package infos from the JSON.
packages = [
cast(PackageInfo, tuple(package)) for package in licenses_json["data"]["body"]
]
# Discover dependency exceptions that are no longer used and can be
# jettisoned, and print them out with a warning.
unused_exceptions = PACKAGE_EXCEPTIONS.difference(set(packages))
if len(unused_exceptions) > 0:
for exception in sorted(list(unused_exceptions)):
print(f"Unused package exception, please remove: {exception}")
# Discover packages that don't have an acceptable license, and that don't
# have an explicit exception. If we have any, we print them out and exit
# with an error.
bad_packages = [
package
for package in packages
if (get_license_type(package) not in ACCEPTABLE_LICENSES)
and (package not in PACKAGE_EXCEPTIONS)
# workspace aggregator is yarn workspaces
and "workspace-aggregator" not in package[0]
]
if len(bad_packages) > 0:
for package in bad_packages:
print(f"Unacceptable license: '{get_license_type(package)}' (in {package})")
print(f"{len(bad_packages)} unacceptable licenses")
sys.exit(1)
print(f"No unacceptable licenses")
sys.exit(0) | null |
178,609 | import os
import requests
The provided code snippet includes necessary dependencies for implementing the `create_release` function. Write a Python function `def create_release()` to solve the following problem:
Create a release from the Git Tag
Here is the function:
def create_release():
"""Create a release from the Git Tag"""
tag = os.getenv("GIT_TAG")
access_token = os.getenv("GH_TOKEN")
if not tag:
raise Exception("Unable to retrieve GIT_TAG environment variable")
url = "https://api.github.com/repos/streamlit/streamlit/releases"
header = {"Authorization": f"token {access_token}"}
# Get the latest release tag to compare against
response = requests.get(f"{url}/latest", headers=header)
previous_tag_name = None
if response.status_code == 200:
previous_tag_name = response.json()["tag_name"]
else:
raise Exception(f"Unable get the latest release: {response.text}")
# Generate the automated release notes
payload = {"tag_name": tag, "previous_tag_name": previous_tag_name}
response = requests.post(f"{url}/generate-notes", json=payload, headers=header)
body = None
if response.status_code == 200:
body = response.json()["body"]
else:
raise Exception(f"Unable generate the latest release notes: {response.text}")
# Create the release with the generated release notes
payload = {"tag_name": tag, "name": tag, "body": body}
response = requests.post(url, json=payload, headers=header)
if response.status_code == 201:
print(f"Successfully created Release {tag}")
else:
raise Exception(f"Unable to create release, HTTP response: {response.text}") | Create a release from the Git Tag |
178,610 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
_SUPPORTED_FILTER_OPERATORS = ('$in', '$eq', '$gt', '$gte', '$lt', '$lte', '$ne', '$nin')
class MongitaError(Exception):
pass
The provided code snippet includes necessary dependencies for implementing the `_validate_filter` function. Write a Python function `def _validate_filter(filter)` to solve the following problem:
Validate the 'filter' parameter. This is near the top of most public methods. :param filter dict: :rtype: None
Here is the function:
def _validate_filter(filter):
"""
Validate the 'filter' parameter.
This is near the top of most public methods.
:param filter dict:
:rtype: None
"""
if not isinstance(filter, dict):
raise MongitaError("The filter parameter must be a dict, not %r" % type(filter))
for k in filter.keys():
if not isinstance(k, str):
raise MongitaError("Filter keys must be strings, not %r" % type(filter))
_id = filter.get('_id')
if _id:
if not isinstance(_id, (bson.ObjectId, str, dict)):
raise MongitaError("If present, the '_id' filter must be a bson ObjectId, string, or a dict")
for query_ops in filter.values():
if isinstance(query_ops, dict):
for op in query_ops.keys():
if op.startswith('$') and op not in _SUPPORTED_FILTER_OPERATORS:
raise MongitaError(
"Mongita does not support %r. These filter operators are "
"supported: %r" % (op, _SUPPORTED_FILTER_OPERATORS)) | Validate the 'filter' parameter. This is near the top of most public methods. :param filter dict: :rtype: None |
178,611 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
_SUPPORTED_UPDATE_OPERATORS = ('$set', '$inc', '$push')
class MongitaError(Exception):
pass
class MongitaNotImplementedError(MongitaError, NotImplementedError):
def create(cls, attr):
msg = "%s.%s is not yet implemented. You can help." % (cls, attr)
return MongitaNotImplementedError(msg)
def create_client(cls, attr):
msg = "%s.%s is not yet implemented. Most MongoClient attributes/methods will never be implemented because this is the key place where Mongita differs. See the Mongita docs." % (cls, attr)
return MongitaNotImplementedError(msg)
def create_depr(cls, attr):
msg = "%s.%s is deprecated and will not be implemented in Mongita." % (cls, attr)
return MongitaNotImplementedError(msg)
The provided code snippet includes necessary dependencies for implementing the `_validate_update` function. Write a Python function `def _validate_update(update)` to solve the following problem:
Validate the 'update' parameter. This is near the top of the public update methods. :param update dict: :rtype: None
Here is the function:
def _validate_update(update):
"""
Validate the 'update' parameter.
This is near the top of the public update methods.
:param update dict:
:rtype: None
"""
if not isinstance(update, dict):
raise MongitaError("The update parameter must be a dict, not %r" % type(update))
for k in update.keys():
if k in _SUPPORTED_UPDATE_OPERATORS:
continue
if k.startswith('$'):
raise MongitaNotImplementedError(
"Mongita does not support %r. These update operators are " \
"supported: %r." % (k, _SUPPORTED_UPDATE_OPERATORS))
raise MongitaError(
"In update operations, you must use one of the supported " \
"update operators %r." % (_SUPPORTED_UPDATE_OPERATORS,))
for update_dict in update.values():
if not isinstance(update_dict, dict):
raise MongitaError("If present, the update operator must be a dict, "
"not %r" % type(update_dict))
_id = update_dict.get('_id')
if _id:
if not isinstance(_id, (str, bson.ObjectId)):
raise MongitaError("The update _id must be a bson ObjectId or a string") | Validate the 'update' parameter. This is near the top of the public update methods. :param update dict: :rtype: None |
178,612 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
class MongitaError(Exception):
pass
class InvalidName(MongitaError):
pass
The provided code snippet includes necessary dependencies for implementing the `_validate_doc` function. Write a Python function `def _validate_doc(doc)` to solve the following problem:
Validate the 'doc' parameter. This is near the top of the public insert / replace methods. :param doc dict: :rtype: None
Here is the function:
def _validate_doc(doc):
"""
Validate the 'doc' parameter.
This is near the top of the public insert / replace methods.
:param doc dict:
:rtype: None
"""
if not isinstance(doc, dict):
raise MongitaError("The document must be a dict, not %r" % type(doc))
_id = doc.get('_id')
if _id:
if not isinstance(_id, (bson.ObjectId, str)):
raise MongitaError("The document _id must be a bson ObjectId, a string, or not present")
for k in doc.keys():
if not k or k.startswith('$'):
raise InvalidName("All document keys must be truthy and cannot start with '$'.") | Validate the 'doc' parameter. This is near the top of the public insert / replace methods. :param doc dict: :rtype: None |
178,613 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
def _doc_matches_agg(doc_v, query_ops):
"""
Return whether an individual document value matches a dict of
query operations. Usually there will be one query_op but sometimes there
are many.
e.g. collection.find({'path.to.doc_v': {'$query_op': query_val}})
The loop returns False whenever we know for sure that the document is
not part of the query. At the end return True
:param doc_v: The value in the doc to compare against
:param query_ops {$query_op: query_val}:
:returns: Whether the document value matches all query operators
:rtype: bool
"""
if any(k.startswith('$') for k in query_ops.keys()):
for query_op, query_val in query_ops.items():
if query_op == '$eq':
if doc_v != query_val:
return False
elif query_op == '$ne':
if doc_v == query_val:
return False
elif query_op == '$in':
if not isinstance(query_val, (list, tuple, set)):
raise MongitaError("'$in' requires an iterable")
if not ((isinstance(doc_v, list) and _overlap(doc_v, query_val))
or (doc_v in query_val)):
return False
elif query_op == '$nin':
if not isinstance(query_val, (list, tuple, set)):
raise MongitaError("'$nin' requires an iterable")
if (isinstance(doc_v, list) and _overlap(doc_v, query_val)) \
or (doc_v in query_val):
return False
elif query_op == '$lt':
try:
if doc_v >= query_val:
return False
except TypeError:
return False
elif query_op == '$lte':
try:
if doc_v > query_val:
return False
except TypeError:
return False
elif query_op == '$gt':
try:
if doc_v <= query_val:
return False
except TypeError:
return False
elif query_op == '$gte':
try:
if doc_v < query_val:
return False
except TypeError:
return False
# agg_k check is in _validate_filter
return True
else:
return doc_v == query_ops
def _get_item_from_doc(doc, key):
"""
Get an item from the document given a key which might use dot notation.
e.g.
doc = {'deep': {'nested': {'list': ['a', 'b', 'c']}}}
key = 'deep.nested.list.1'
-> 'b'
:param doc dict:
:param key str:
:rtype: value
"""
if '.' in key:
item = doc
for level in key.split('.'):
if isinstance(item, list):
try:
level_int = int(level)
except ValueError:
return None
try:
item = item[level_int]
except IndexError:
return None
elif isinstance(item, dict):
item = item.get(level, {})
else:
return None
return item or None
return doc.get(key)
The provided code snippet includes necessary dependencies for implementing the `_doc_matches_slow_filters` function. Write a Python function `def _doc_matches_slow_filters(doc, slow_filters)` to solve the following problem:
Given an entire doc, return whether that doc matches every filter item in the slow_filters dict. A slow_filter is just the set of filters that we didn't have an index for. :param doc dict: :param slow_filters dict: :rtype: bool
Here is the function:
def _doc_matches_slow_filters(doc, slow_filters):
"""
Given an entire doc, return whether that doc matches every filter item in the
slow_filters dict. A slow_filter is just the set of filters that we didn't
have an index for.
:param doc dict:
:param slow_filters dict:
:rtype: bool
"""
for doc_key, query_ops in slow_filters.items():
if isinstance(query_ops, dict):
doc_v = _get_item_from_doc(doc, doc_key)
if _doc_matches_agg(doc_v, query_ops):
continue
return False
item_from_doc = _get_item_from_doc(doc, doc_key)
if isinstance(item_from_doc, list) and query_ops in item_from_doc:
continue
if item_from_doc == query_ops:
continue
return False
return True | Given an entire doc, return whether that doc matches every filter item in the slow_filters dict. A slow_filter is just the set of filters that we didn't have an index for. :param doc dict: :param slow_filters dict: :rtype: bool |
178,614 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
def _failed_update_error(update_op, update_op_dict, doc, msg):
"""Helper for raising errors on update"""
return MongitaError("Cannot apply operation %r to %r (%s)" %
({update_op: update_op_dict}, doc, msg))
def _rightpad(item, desired_length):
"""
Given a list, pad to the desired_length with Nones
This might be slow but it modifies the list in place
:param item list:
:param desired_length int:
:rtype: None
"""
pad_len = desired_length - len(item) + 1
for _ in range(pad_len):
item.append(None)
def _get_datastructure_from_doc(doc, key):
"""
Get a pass-by-reference data structure from the document so that we can
update it in-place. This dives deep into the document with the key
parameter which uses dot notation.
e.g.
doc = {'deep': {'nested': {'list': [1, 2, 3]}}}
key = 'deep.nested.list.5'
-> a reference to [1, 2, 3, None, None] and 5
:param doc dict:
:param key str:
:returns: the datastructure and the final accessor
:rtype: list|dict|None, value
"""
if '.' not in key:
return doc, key
item = doc
levels = key.split('.')
levels, last_level = levels[:-1], levels[-1]
for level in levels:
if isinstance(item, list):
try:
level_int = int(level)
except ValueError:
return None, None
if level_int < 0:
return None, None
try:
item = item[level_int]
except IndexError:
_rightpad(item, level_int)
item = item[level_int] or {}
elif isinstance(item, dict):
if level not in item or not isinstance(item[level], (list, dict)):
item[level] = {}
item = item[level]
else:
return None, None
if isinstance(item, list):
try:
last_level = int(last_level)
except ValueError:
return None, None
return item, last_level
The provided code snippet includes necessary dependencies for implementing the `_update_item_in_doc` function. Write a Python function `def _update_item_in_doc(update_op, update_op_dict, doc)` to solve the following problem:
Given an $update_op, a {doc_key: value} update_op_dict, and a doc, Update the doc in-place at doc_key with the update operation. e.g. doc = {'hi': 'ma'} update_op = '$set' update_op_dict {'ma': 'pa'} -> {'hi': 'pa'} :param update_op str: :param update_op_dict {str: value}: :param doc dict: :rtype: None
Here is the function:
def _update_item_in_doc(update_op, update_op_dict, doc):
"""
Given an $update_op, a {doc_key: value} update_op_dict, and a doc,
Update the doc in-place at doc_key with the update operation.
e.g.
doc = {'hi': 'ma'}
update_op = '$set'
update_op_dict {'ma': 'pa'}
-> {'hi': 'pa'}
:param update_op str:
:param update_op_dict {str: value}:
:param doc dict:
:rtype: None
"""
for doc_key, value in update_op_dict.items():
ds, last_key = _get_datastructure_from_doc(doc, doc_key)
if isinstance(ds, list):
_rightpad(ds, last_key)
if ds is None:
raise _failed_update_error(update_op, update_op_dict, doc,
"Could not find item")
if update_op == '$set':
ds[last_key] = value
elif update_op == '$inc':
if not isinstance(value, (int, float)):
raise _failed_update_error(update_op, update_op_dict, doc,
"Increment was not numeric")
elif not isinstance(ds.get(last_key), (int, float)):
raise _failed_update_error(update_op, update_op_dict, doc,
"Document value was not numeric")
ds[last_key] += value
elif update_op == '$push':
if isinstance(ds.get(last_key), list):
ds[last_key].append(value)
elif last_key not in ds:
ds[last_key] = [value]
else:
raise _failed_update_error(update_op, update_op_dict, doc,
"Document value was not a list")
# Should never get an update key we don't recognize b/c _validate_update | Given an $update_op, a {doc_key: value} update_op_dict, and a doc, Update the doc in-place at doc_key with the update operation. e.g. doc = {'hi': 'ma'} update_op = '$set' update_op_dict {'ma': 'pa'} -> {'hi': 'pa'} :param update_op str: :param update_op_dict {str: value}: :param doc dict: :rtype: None |
178,615 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
def _get_item_from_doc(doc, key):
"""
Get an item from the document given a key which might use dot notation.
e.g.
doc = {'deep': {'nested': {'list': ['a', 'b', 'c']}}}
key = 'deep.nested.list.1'
-> 'b'
:param doc dict:
:param key str:
:rtype: value
"""
if '.' in key:
item = doc
for level in key.split('.'):
if isinstance(item, list):
try:
level_int = int(level)
except ValueError:
return None
try:
item = item[level_int]
except IndexError:
return None
elif isinstance(item, dict):
item = item.get(level, {})
else:
return None
return item or None
return doc.get(key)
def _make_idx_key(idx_key):
"""
MongoDB is very liberal when it comes to what keys it can compare on.
When we get something weird, it makes sense to just store it as a
hashable key
:param idx_key value:
:rtype: hashable value
"""
if isinstance(idx_key, collections.abc.Hashable):
return _sort_tup(idx_key)
try:
return _sort_tup(str(bson.encode(idx_key)))
except TypeError:
return _sort_tup(str(bson.encode({'idx_key': idx_key})))
def _remove_docs_from_idx_doc(doc_ids, idx_doc):
"""
Update an idx_doc given documents which were just removed
:param doc_ids set[str]:
:param idx_doc {key_str: str, direction: int idx: SortedDict, ...}:
:rtype: None
"""
idx_doc_idx = idx_doc['idx']
for k in idx_doc_idx.keys():
idx_doc_idx[k] -= doc_ids
DESCENDING = -1
The provided code snippet includes necessary dependencies for implementing the `_update_idx_doc_with_new_documents` function. Write a Python function `def _update_idx_doc_with_new_documents(documents, idx_doc)` to solve the following problem:
Update an idx_doc given documents which were just inserted / modified / etc :param documents list[dict]: :param idx_doc {key_str: str, direction: int idx: SortedDict, ...}: :rtype: None
Here is the function:
def _update_idx_doc_with_new_documents(documents, idx_doc):
"""
Update an idx_doc given documents which were just inserted / modified / etc
:param documents list[dict]:
:param idx_doc {key_str: str, direction: int idx: SortedDict, ...}:
:rtype: None
"""
documents = list(documents)
_remove_docs_from_idx_doc(set(d['_id'] for d in documents), idx_doc)
key_str = idx_doc['key_str']
new_idx = sortedcontainers.SortedDict(idx_doc['idx'])
for doc in documents:
item_from_doc = _get_item_from_doc(doc, key_str)
if isinstance(item_from_doc, list):
for item in item_from_doc:
key = _make_idx_key(item)
new_idx.setdefault(key, set()).add(doc['_id'])
key = _make_idx_key(item_from_doc)
new_idx.setdefault(key, set()).add(doc['_id'])
reverse = idx_doc['direction'] == DESCENDING
idx_doc['idx'] = sortedcontainers.SortedDict(sorted(new_idx.items(), reverse=reverse)) | Update an idx_doc given documents which were just inserted / modified / etc :param documents list[dict]: :param idx_doc {key_str: str, direction: int idx: SortedDict, ...}: :rtype: None |
178,616 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
def _sort_func(doc, sort_key):
"""
Sorter to sort different types according to MongoDB rules
:param doc dict:
:param sort_key str:
:rtype: tuple
"""
item = _get_item_from_doc(doc, sort_key)
return _sort_tup(item)
ASCENDING = 1
DESCENDING = -1
The provided code snippet includes necessary dependencies for implementing the `_sort_docs` function. Write a Python function `def _sort_docs(docs, sort_list)` to solve the following problem:
Given the sort list provided in the .sort() method, sort the documents in place. from https://docs.python.org/3/howto/sorting.html :param docs list[dict]: :param sort_list list[(key, direction)] :rtype: None
Here is the function:
def _sort_docs(docs, sort_list):
"""
Given the sort list provided in the .sort() method,
sort the documents in place.
from https://docs.python.org/3/howto/sorting.html
:param docs list[dict]:
:param sort_list list[(key, direction)]
:rtype: None
"""
for sort_key, direction in reversed(sort_list):
_sort_func_partial = functools.partial(_sort_func, sort_key=sort_key)
if direction == ASCENDING:
docs.sort(key=_sort_func_partial)
elif direction == DESCENDING:
docs.sort(key=_sort_func_partial, reverse=True)
# validation on direction happens in cursor | Given the sort list provided in the .sort() method, sort the documents in place. from https://docs.python.org/3/howto/sorting.html :param docs list[dict]: :param sort_list list[(key, direction)] :rtype: None |
178,617 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
The provided code snippet includes necessary dependencies for implementing the `_split_filter` function. Write a Python function `def _split_filter(filter, metadata)` to solve the following problem:
Split the filter into indx_ops and slow_filters which are later used differently :param filter {doc_key: query_ops}: :param metadata dict: :rtype: {doc_key: query_ops}, [(SortedDict idx, dict query_ops), ...]
Here is the function:
def _split_filter(filter, metadata):
"""
Split the filter into indx_ops and slow_filters which are later used
differently
:param filter {doc_key: query_ops}:
:param metadata dict:
:rtype: {doc_key: query_ops}, [(SortedDict idx, dict query_ops), ...]
"""
slow_filters = {}
indx_ops = []
indexes = metadata.get('indexes', {})
for doc_key, query_ops in filter.items():
if doc_key + '_1' in indexes:
indx_ops.append((indexes[doc_key + '_1']['idx'], query_ops))
elif doc_key + '_-1' in indexes:
indx_ops.append((indexes[doc_key + '_-1']['idx'], query_ops))
else:
slow_filters[doc_key] = query_ops
return slow_filters, indx_ops | Split the filter into indx_ops and slow_filters which are later used differently :param filter {doc_key: query_ops}: :param metadata dict: :rtype: {doc_key: query_ops}, [(SortedDict idx, dict query_ops), ...] |
178,618 | import collections
import copy
import datetime
import functools
import re
import bson
import sortedcontainers
from .cursor import Cursor, _validate_sort
from .common import support_alert, ASCENDING, DESCENDING, MetaStorageObject
from .errors import (MongitaError, MongitaNotImplementedError, DuplicateKeyError,
InvalidName, OperationFailure)
from .read_concern import ReadConcern
from .results import InsertOneResult, InsertManyResult, DeleteResult, UpdateResult
from .write_concern import WriteConcern
def _get_ids_from_idx(idx, query_ops):
"""
Returns the ids that match a set of query_ops in an index.
:param idx SortedDict:
:param query_ops str|dict:
:rtype: set
"""
if not isinstance(query_ops, dict):
return set(idx.get(_make_idx_key(query_ops), set()))
if not set(query_ops.keys()).intersection(_SUPPORTED_FILTER_OPERATORS):
if _make_idx_key(query_ops) in idx.keys():
return idx[_make_idx_key(query_ops)]
return set()
keys_remain = set(idx.keys())
keys_not_cursed = keys_remain.copy()
keys_cursed = set()
for query_op, query_val in sorted(query_ops.items(),
key=_idx_filter_sort, reverse=True):
clean_idx_key = _make_idx_key(query_val)
if query_op == '$eq':
keys_remain = {clean_idx_key} if clean_idx_key in keys_remain else set()
elif query_op == '$ne':
_keys_cursed = set(k for k in keys_not_cursed if k == clean_idx_key)
keys_remain -= _keys_cursed
keys_not_cursed -= _keys_cursed
keys_cursed.update(_keys_cursed)
elif query_op == '$lt':
keys_remain = _ids_given_irange_filters(keys_remain, idx,
maximum=clean_idx_key,
inclusive=(False, False))
elif query_op == '$lte':
keys_remain = _ids_given_irange_filters(keys_remain, idx,
maximum=clean_idx_key,
inclusive=(False, True))
elif query_op == '$gt':
keys_remain = _ids_given_irange_filters(keys_remain, idx,
minimum=clean_idx_key,
inclusive=(False, False))
elif query_op == '$gte':
keys_remain = _ids_given_irange_filters(keys_remain, idx,
minimum=clean_idx_key,
inclusive=(True, False))
elif query_op == '$in':
if not isinstance(query_val, (list, tuple, set)):
raise MongitaError("'$in' requires an iterable")
clean_q_val = [_make_idx_key(e) for e in query_val]
keys_remain = set(k for k in keys_remain
if k in clean_q_val)
elif query_op == '$nin':
if not isinstance(query_val, (list, tuple, set)):
raise MongitaError("'$nin' requires an iterable")
clean_q_val = [_make_idx_key(e) for e in query_val]
_keys_cursed = set(k for k in keys_not_cursed
if k in clean_q_val)
keys_remain -= _keys_cursed
keys_not_cursed -= _keys_cursed
keys_cursed.update(_keys_cursed)
# validation of options is done earlier
ids_cursed = set()
for k in keys_cursed:
ids_cursed.update(idx[k])
ret = set()
for k in keys_remain:
ret.update(idx[k])
ret -= ids_cursed
return ret
The provided code snippet includes necessary dependencies for implementing the `_apply_indx_ops` function. Write a Python function `def _apply_indx_ops(indx_ops)` to solve the following problem:
Return all doc_ids that can be found through the index filters :param indx_ops {idx_key: query_ops}: :param indexes dict: :rtype: set
Here is the function:
def _apply_indx_ops(indx_ops):
"""
Return all doc_ids that can be found through the index filters
:param indx_ops {idx_key: query_ops}:
:param indexes dict:
:rtype: set
"""
doc_ids_so_far = set()
for idx, query_ops in indx_ops:
doc_ids = _get_ids_from_idx(idx, query_ops)
if not doc_ids:
return set()
if doc_ids_so_far:
doc_ids_so_far = doc_ids_so_far.intersection(doc_ids)
if not doc_ids_so_far:
return set()
else:
doc_ids_so_far = doc_ids
return doc_ids_so_far | Return all doc_ids that can be found through the index filters :param indx_ops {idx_key: query_ops}: :param indexes dict: :rtype: set |
178,619 | from .errors import MongitaNotImplementedError, MongitaError, InvalidOperation
from .common import ASCENDING, DESCENDING, support_alert
class MongitaError(Exception):
pass
ASCENDING = 1
DESCENDING = -1
The provided code snippet includes necessary dependencies for implementing the `_validate_sort` function. Write a Python function `def _validate_sort(key_or_list, direction=None)` to solve the following problem:
Validate kwargs and return a proper sort list :param key_or_list str|[(str key, int direction), ...] :param direction int: :rtype: [(str key, int direction), ...]
Here is the function:
def _validate_sort(key_or_list, direction=None):
"""
Validate kwargs and return a proper sort list
:param key_or_list str|[(str key, int direction), ...]
:param direction int:
:rtype: [(str key, int direction), ...]
"""
if direction is None and isinstance(key_or_list, (list, tuple)) \
and all(isinstance(tup, (list, tuple)) and len(tup) == 2 for tup in key_or_list):
_sort = key_or_list
elif direction is None and isinstance(key_or_list, str):
_sort = [(key_or_list, ASCENDING)]
elif isinstance(key_or_list, str) and isinstance(direction, int):
_sort = [(key_or_list, direction)]
else:
raise MongitaError("Unsupported sort parameter format. See the docs.")
for sort_key, sort_direction in _sort:
if not isinstance(sort_key, str):
raise MongitaError("Sort key(s) must be strings %r" % str(key_or_list))
if sort_direction not in (ASCENDING, DESCENDING):
raise MongitaError("Sort direction(s) must be either ASCENDING (1) or DESCENDING (-1). Not %r" % direction)
return _sort | Validate kwargs and return a proper sort list :param key_or_list str|[(str key, int direction), ...] :param direction int: :rtype: [(str key, int direction), ...] |
178,620 | import pymongo
from . import mongita_client
def _resolve_client(connection_type, uri):
"""
:param str connection_type:
:param str uri:
:rtype: mongita.MongitaClientDisk|pymongo.MongoClient
"""
assert connection_type in ('mongita', 'mongodb')
if connection_type == 'mongita':
if uri:
uri = uri.replace('file://', '/')
return mongita_client.MongitaClientDisk(uri)
return pymongo.MongoClient(host=uri or 'localhost')
def _resolve_collections(collections):
"""
Split a list of raw collections into a list of database/collection tuples
:param list[str] collections:
:rtype: list[(str, str|None)]
"""
ret = []
for raw_collection in collections:
attr_chain = raw_collection.split('.', 1)
database = attr_chain[0]
if len(attr_chain) == 2:
ret.append((database, attr_chain[1]))
else:
ret.append((database, None))
return ret
def _confirm_loop(msg, logger):
"""
Confirm in a loop that the user wants to do the thing.
Returns a tuple of (yes/no, yesall)
:param str msg:
:param Logger logger:
:rtype: (bool, bool)
"""
while True:
logger.log("%s (yes/yesall/no)", msg)
confirm = input()
if confirm.lower() == 'yesall':
return True, True
if confirm.lower() in ('yes', 'y'):
return True, False
if confirm.lower() in ('no', 'n'):
return False, False
def _replace_collection(source, dest, database, collection, force, logger):
"""
Replace a single collection at destination with the source's collection.
Returns whether we want to 'force' going forward
:param MongoClient|MongitaClientDisk source:
:param MongoClient|MongitaClientDisk dest:
:param str database:
:param str collection:
:param bool force:
:param Logger logger:
:rtype: bool
"""
source_coll = source[database][collection]
dest_coll = dest[database][collection]
if force:
logger.log("Replacing %s.%s (%d documents -> %d documents)...",
database, collection, source_coll.count_documents({}),
dest_coll.count_documents({}))
else:
confirm, force = _confirm_loop("Replace %s.%s? (%d documents -> %d documents)" %
(database, collection,
source_coll.count_documents({}),
dest_coll.count_documents({})),
logger)
if not confirm:
return False
dest[database].drop_collection(collection)
for doc_batch in _batch_docs(source_coll.find({})):
dest_coll.insert_many(doc_batch)
return force
class _Logger():
def __init__(self, quiet):
self.quiet = quiet
def log(self, msg, *args):
if not self.quiet:
msg = msg % args
print("MONGITASYNC: %s" % (msg))
The provided code snippet includes necessary dependencies for implementing the `mongitasync` function. Write a Python function `def mongitasync(source_type, destination_type, collections, force=False, source_uri=None, destination_uri=None, quiet=False)` to solve the following problem:
Sync a list of collections from the source to the destination. Source/destination can be either 'mongita' or 'mongodb' Collections can be formatted as either 'db.coll' or plain 'db' :param str source_type: mongita|mongodb :param str destination_type: mongita|mongodb :param list[str]|str collections: :param bool force: :param str source_uri: :param str destination_uri: :param bool quiet:
Here is the function:
def mongitasync(source_type, destination_type, collections, force=False,
source_uri=None, destination_uri=None, quiet=False):
"""
Sync a list of collections from the source to the destination.
Source/destination can be either 'mongita' or 'mongodb'
Collections can be formatted as either 'db.coll' or plain 'db'
:param str source_type: mongita|mongodb
:param str destination_type: mongita|mongodb
:param list[str]|str collections:
:param bool force:
:param str source_uri:
:param str destination_uri:
:param bool quiet:
"""
source = _resolve_client(source_type, source_uri)
destination = _resolve_client(destination_type, destination_uri)
if not collections:
raise AssertionError("No collections provided")
if not isinstance(collections, list):
collections = [collections]
logger = _Logger(quiet)
logger.log("Syncing %d databases/collections from %r (%s) to %r (%s):",
len(collections), source_type, source_uri,
destination_type, destination_uri)
for collection in collections:
logger.log(' ' + collection)
for database, collection in _resolve_collections(collections):
if collection:
force = _replace_collection(source, destination, database, collection,
force, logger)
continue
db_collections = list(source[database].list_collection_names())
if force:
destination.drop_database(database)
else:
confirm, force = _confirm_loop("Drop database %r on %r?" %
(database, destination), logger)
if confirm:
destination.drop_database(database)
for collection in db_collections:
force = _replace_collection(source, destination, database, collection,
force, logger) | Sync a list of collections from the source to the destination. Source/destination can be either 'mongita' or 'mongodb' Collections can be formatted as either 'db.coll' or plain 'db' :param str source_type: mongita|mongodb :param str destination_type: mongita|mongodb :param list[str]|str collections: :param bool force: :param str source_uri: :param str destination_uri: :param bool quiet: |
178,621 | import functools
import os
import re
import unicodedata
import bson
import sortedcontainers
from .errors import MongitaError
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
'LPT2', 'LPT3', 'PRN', 'NUL')
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
The provided code snippet includes necessary dependencies for implementing the `secure_filename` function. Write a Python function `def secure_filename(filename: str) -> str` to solve the following problem:
The idea of this is to ensure that the document_id doesn't do sketchy shit on the filesystem. This will probably be deleted soon.
Here is the function:
def secure_filename(filename: str) -> str:
"""
The idea of this is to ensure that the document_id doesn't do sketchy shit on
the filesystem. This will probably be deleted soon.
"""
r"""From werkzeug source"""
filename = unicodedata.normalize("NFKD", filename)
filename = filename.encode("ascii", "ignore").decode("ascii")
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, " ")
filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
"._"
)
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if filename and filename.split(".")[0].upper() in _windows_device_files:
filename = f"_{filename}"
return filename | The idea of this is to ensure that the document_id doesn't do sketchy shit on the filesystem. This will probably be deleted soon. |
178,622 | import functools
import os
import re
import unicodedata
import bson
import sortedcontainers
from .errors import MongitaError
_invalid_names = re.compile(r'[/\. "$*<>:|?]')
The provided code snippet includes necessary dependencies for implementing the `ok_name` function. Write a Python function `def ok_name(name)` to solve the following problem:
In-line with MongoDB restrictions. https://docs.mongodb.com/manual/reference/limits/#std-label-restrictions-on-db-names https://docs.mongodb.com/manual/reference/limits/#Restriction-on-Collection-Names The prohibition on "system." names will be covered by the prohibition on '.'
Here is the function:
def ok_name(name):
"""
In-line with MongoDB restrictions.
https://docs.mongodb.com/manual/reference/limits/#std-label-restrictions-on-db-names
https://docs.mongodb.com/manual/reference/limits/#Restriction-on-Collection-Names
The prohibition on "system." names will be covered by the prohibition on '.'
"""
if not name:
return False
if _invalid_names.search(name):
return False
if len(name) > 64:
return False
return True | In-line with MongoDB restrictions. https://docs.mongodb.com/manual/reference/limits/#std-label-restrictions-on-db-names https://docs.mongodb.com/manual/reference/limits/#Restriction-on-Collection-Names The prohibition on "system." names will be covered by the prohibition on '.' |
178,623 | import functools
import os
import re
import unicodedata
import bson
import sortedcontainers
from .errors import MongitaError
class MongitaError(Exception):
pass
The provided code snippet includes necessary dependencies for implementing the `support_alert` function. Write a Python function `def support_alert(func)` to solve the following problem:
Provide smart tips if the user tries to use un-implemented / deprecated known kwargs.
Here is the function:
def support_alert(func):
"""
Provide smart tips if the user tries to use un-implemented / deprecated
known kwargs.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
for k in kwargs:
if k not in func.__code__.co_varnames:
raise MongitaError("The argument %r is not supported by %r in Mongita. "
"This may or may not be supported in PyMongo. "
"If it is, you can help implement it." %
(k, func))
return func(*args, **kwargs)
return inner | Provide smart tips if the user tries to use un-implemented / deprecated known kwargs. |
178,624 | import argparse
import os
import shutil
import sys
import time
from functools import partial
import deepspeed
import numpy as np
import torch
import tqdm
import transformers
from peft import LoraConfig, get_peft_model
from torch.utils.tensorboard import SummaryWriter
from model.LISA import LISAForCausalLM
from model.llava import conversation as conversation_lib
from utils.dataset import HybridDataset, ValDataset, collate_fn
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
AverageMeter, ProgressMeter, Summary, dict_to_cuda,
intersectionAndUnionGPU)
def parse_args(args):
parser = argparse.ArgumentParser(description="LISA Model Training")
parser.add_argument("--local_rank", default=0, type=int, help="node rank")
parser.add_argument(
"--version", default="liuhaotian/llava-llama-2-13b-chat-lightning-preview"
)
parser.add_argument("--vis_save_path", default="./vis_output", type=str)
parser.add_argument(
"--precision",
default="bf16",
type=str,
choices=["fp32", "bf16", "fp16"],
help="precision for inference",
)
parser.add_argument("--image_size", default=1024, type=int, help="image size")
parser.add_argument("--model_max_length", default=512, type=int)
parser.add_argument("--lora_r", default=8, type=int)
parser.add_argument(
"--vision-tower", default="openai/clip-vit-large-patch14", type=str
)
parser.add_argument("--load_in_8bit", action="store_true", default=False)
parser.add_argument("--load_in_4bit", action="store_true", default=False)
parser.add_argument(
"--dataset", default="sem_seg||refer_seg||vqa||reason_seg", type=str
)
parser.add_argument("--sample_rates", default="9,3,3,1", type=str)
parser.add_argument(
"--sem_seg_data",
default="ade20k||cocostuff||pascal_part||paco_lvis||mapillary",
type=str,
)
parser.add_argument(
"--refer_seg_data", default="refclef||refcoco||refcoco+||refcocog", type=str
)
parser.add_argument("--vqa_data", default="llava_instruct_150k", type=str)
parser.add_argument("--reason_seg_data", default="ReasonSeg|train", type=str)
parser.add_argument("--val_dataset", default="ReasonSeg|val", type=str)
parser.add_argument("--dataset_dir", default="./dataset", type=str)
parser.add_argument("--log_base_dir", default="./runs", type=str)
parser.add_argument("--exp_name", default="lisa", type=str)
parser.add_argument("--epochs", default=10, type=int)
parser.add_argument("--steps_per_epoch", default=500, type=int)
parser.add_argument(
"--batch_size", default=2, type=int, help="batch size per device per step"
)
parser.add_argument(
"--grad_accumulation_steps",
default=10,
type=int,
)
parser.add_argument("--val_batch_size", default=1, type=int)
parser.add_argument("--workers", default=4, type=int)
parser.add_argument("--lr", default=0.0003, type=float)
parser.add_argument("--ce_loss_weight", default=1.0, type=float)
parser.add_argument("--dice_loss_weight", default=0.5, type=float)
parser.add_argument("--bce_loss_weight", default=2.0, type=float)
parser.add_argument("--lora_alpha", default=16, type=int)
parser.add_argument("--lora_dropout", default=0.05, type=float)
parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str)
parser.add_argument("--explanatory", default=0.1, type=float)
parser.add_argument("--beta1", default=0.9, type=float)
parser.add_argument("--beta2", default=0.95, type=float)
parser.add_argument("--num_classes_per_sample", default=3, type=int)
parser.add_argument("--exclude_val", action="store_true", default=False)
parser.add_argument("--no_eval", action="store_true", default=False)
parser.add_argument("--eval_only", action="store_true", default=False)
parser.add_argument("--vision_pretrained", default="PATH_TO_SAM_ViT-H", type=str)
parser.add_argument("--out_dim", default=256, type=int)
parser.add_argument("--resume", default="", type=str)
parser.add_argument("--print_freq", default=1, type=int)
parser.add_argument("--start_epoch", default=0, type=int)
parser.add_argument("--gradient_checkpointing", action="store_true", default=True)
parser.add_argument("--train_mask_decoder", action="store_true", default=True)
parser.add_argument("--use_mm_start_end", action="store_true", default=True)
parser.add_argument("--auto_resume", action="store_true", default=True)
parser.add_argument(
"--conv_type",
default="llava_v1",
type=str,
choices=["llava_v1", "llava_llama_2"],
)
return parser.parse_args(args) | null |
178,625 | import argparse
import os
import shutil
import sys
import time
from functools import partial
import deepspeed
import numpy as np
import torch
import tqdm
import transformers
from peft import LoraConfig, get_peft_model
from torch.utils.tensorboard import SummaryWriter
from model.LISA import LISAForCausalLM
from model.llava import conversation as conversation_lib
from utils.dataset import HybridDataset, ValDataset, collate_fn
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
AverageMeter, ProgressMeter, Summary, dict_to_cuda,
intersectionAndUnionGPU)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f", summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def all_reduce(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
if isinstance(self.sum, np.ndarray):
total = torch.tensor(
self.sum.tolist()
+ [
self.count,
],
dtype=torch.float32,
device=device,
)
else:
total = torch.tensor(
[self.sum, self.count], dtype=torch.float32, device=device
)
dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)
if total.shape[0] > 2:
self.sum, self.count = total[:-1].cpu().numpy(), total[-1].cpu().item()
else:
self.sum, self.count = total.tolist()
self.avg = self.sum / (self.count + 1e-5)
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ""
if self.summary_type is Summary.NONE:
fmtstr = ""
elif self.summary_type is Summary.AVERAGE:
fmtstr = "{name} {avg:.3f}"
elif self.summary_type is Summary.SUM:
fmtstr = "{name} {sum:.3f}"
elif self.summary_type is Summary.COUNT:
fmtstr = "{name} {count:.3f}"
else:
raise ValueError("invalid summary type %r" % self.summary_type)
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def display_summary(self):
entries = [" *"]
entries += [meter.summary() for meter in self.meters]
print(" ".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def dict_to_cuda(input_dict):
for k, v in input_dict.items():
if isinstance(input_dict[k], torch.Tensor):
input_dict[k] = v.cuda(non_blocking=True)
elif (
isinstance(input_dict[k], list)
and len(input_dict[k]) > 0
and isinstance(input_dict[k][0], torch.Tensor)
):
input_dict[k] = [ele.cuda(non_blocking=True) for ele in v]
return input_dict
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( train_loader, model, epoch, scheduler, writer, train_iter, args, )` to solve the following problem:
Main training loop.
Here is the function:
def train(
train_loader,
model,
epoch,
scheduler,
writer,
train_iter,
args,
):
"""Main training loop."""
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4f")
ce_losses = AverageMeter("CeLoss", ":.4f")
mask_bce_losses = AverageMeter("MaskBCELoss", ":.4f")
mask_dice_losses = AverageMeter("MaskDICELoss", ":.4f")
mask_losses = AverageMeter("MaskLoss", ":.4f")
progress = ProgressMeter(
args.steps_per_epoch,
[
batch_time,
losses,
ce_losses,
mask_losses,
mask_bce_losses,
mask_dice_losses,
],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for global_step in range(args.steps_per_epoch):
for i in range(args.grad_accumulation_steps):
try:
input_dict = next(train_iter)
except:
train_iter = iter(train_loader)
input_dict = next(train_iter)
data_time.update(time.time() - end)
input_dict = dict_to_cuda(input_dict)
if args.precision == "fp16":
input_dict["images"] = input_dict["images"].half()
input_dict["images_clip"] = input_dict["images_clip"].half()
elif args.precision == "bf16":
input_dict["images"] = input_dict["images"].bfloat16()
input_dict["images_clip"] = input_dict["images_clip"].bfloat16()
else:
input_dict["images"] = input_dict["images"].float()
input_dict["images_clip"] = input_dict["images_clip"].float()
output_dict = model(**input_dict)
loss = output_dict["loss"]
ce_loss = output_dict["ce_loss"]
mask_bce_loss = output_dict["mask_bce_loss"]
mask_dice_loss = output_dict["mask_dice_loss"]
mask_loss = output_dict["mask_loss"]
losses.update(loss.item(), input_dict["images"].size(0))
ce_losses.update(ce_loss.item(), input_dict["images"].size(0))
mask_bce_losses.update(mask_bce_loss.item(), input_dict["images"].size(0))
mask_dice_losses.update(mask_dice_loss.item(), input_dict["images"].size(0))
mask_losses.update(mask_loss.item(), input_dict["images"].size(0))
model.backward(loss)
model.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if global_step % args.print_freq == 0:
if args.distributed:
batch_time.all_reduce()
data_time.all_reduce()
losses.all_reduce()
ce_losses.all_reduce()
mask_bce_losses.all_reduce()
mask_dice_losses.all_reduce()
mask_losses.all_reduce()
if args.local_rank == 0:
progress.display(global_step + 1)
writer.add_scalar("train/loss", losses.avg, global_step)
writer.add_scalar("train/ce_loss", ce_losses.avg, global_step)
writer.add_scalar(
"train/mask_bce_loss", mask_bce_losses.avg, global_step
)
writer.add_scalar(
"train/mask_dice_loss", mask_dice_losses.avg, global_step
)
writer.add_scalar("train/mask_loss", mask_losses.avg, global_step)
writer.add_scalar(
"metrics/total_secs_per_batch", batch_time.avg, global_step
)
writer.add_scalar(
"metrics/data_secs_per_batch", data_time.avg, global_step
)
batch_time.reset()
data_time.reset()
losses.reset()
ce_losses.reset()
mask_bce_losses.reset()
mask_dice_losses.reset()
mask_losses.reset()
if global_step != 0:
curr_lr = scheduler.get_last_lr()
if args.local_rank == 0:
writer.add_scalar("train/lr", curr_lr[0], global_step)
return train_iter | Main training loop. |
178,626 | import argparse
import os
import shutil
import sys
import time
from functools import partial
import deepspeed
import numpy as np
import torch
import tqdm
import transformers
from peft import LoraConfig, get_peft_model
from torch.utils.tensorboard import SummaryWriter
from model.LISA import LISAForCausalLM
from model.llava import conversation as conversation_lib
from utils.dataset import HybridDataset, ValDataset, collate_fn
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
AverageMeter, ProgressMeter, Summary, dict_to_cuda,
intersectionAndUnionGPU)
class Summary(Enum):
NONE = 0
AVERAGE = 1
SUM = 2
COUNT = 3
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f", summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def all_reduce(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
if isinstance(self.sum, np.ndarray):
total = torch.tensor(
self.sum.tolist()
+ [
self.count,
],
dtype=torch.float32,
device=device,
)
else:
total = torch.tensor(
[self.sum, self.count], dtype=torch.float32, device=device
)
dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)
if total.shape[0] > 2:
self.sum, self.count = total[:-1].cpu().numpy(), total[-1].cpu().item()
else:
self.sum, self.count = total.tolist()
self.avg = self.sum / (self.count + 1e-5)
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ""
if self.summary_type is Summary.NONE:
fmtstr = ""
elif self.summary_type is Summary.AVERAGE:
fmtstr = "{name} {avg:.3f}"
elif self.summary_type is Summary.SUM:
fmtstr = "{name} {sum:.3f}"
elif self.summary_type is Summary.COUNT:
fmtstr = "{name} {count:.3f}"
else:
raise ValueError("invalid summary type %r" % self.summary_type)
return fmtstr.format(**self.__dict__)
def intersectionAndUnionGPU(output, target, K, ignore_index=255):
# 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.
assert output.dim() in [1, 2, 3]
assert output.shape == target.shape
output = output.view(-1)
target = target.view(-1)
output[target == ignore_index] = ignore_index
intersection = output[output == target]
area_intersection = torch.histc(intersection, bins=K, min=0, max=K - 1)
area_output = torch.histc(output, bins=K, min=0, max=K - 1)
area_target = torch.histc(target, bins=K, min=0, max=K - 1)
area_union = area_output + area_target - area_intersection
return area_intersection, area_union, area_target
def dict_to_cuda(input_dict):
for k, v in input_dict.items():
if isinstance(input_dict[k], torch.Tensor):
input_dict[k] = v.cuda(non_blocking=True)
elif (
isinstance(input_dict[k], list)
and len(input_dict[k]) > 0
and isinstance(input_dict[k][0], torch.Tensor)
):
input_dict[k] = [ele.cuda(non_blocking=True) for ele in v]
return input_dict
def validate(val_loader, model_engine, epoch, writer, args):
intersection_meter = AverageMeter("Intersec", ":6.3f", Summary.SUM)
union_meter = AverageMeter("Union", ":6.3f", Summary.SUM)
acc_iou_meter = AverageMeter("gIoU", ":6.3f", Summary.SUM)
model_engine.eval()
for input_dict in tqdm.tqdm(val_loader):
torch.cuda.empty_cache()
input_dict = dict_to_cuda(input_dict)
if args.precision == "fp16":
input_dict["images"] = input_dict["images"].half()
input_dict["images_clip"] = input_dict["images_clip"].half()
elif args.precision == "bf16":
input_dict["images"] = input_dict["images"].bfloat16()
input_dict["images_clip"] = input_dict["images_clip"].bfloat16()
else:
input_dict["images"] = input_dict["images"].float()
input_dict["images_clip"] = input_dict["images_clip"].float()
with torch.no_grad():
output_dict = model_engine(**input_dict)
pred_masks = output_dict["pred_masks"]
masks_list = output_dict["gt_masks"][0].int()
output_list = (pred_masks[0] > 0).int()
assert len(pred_masks) == 1
intersection, union, acc_iou = 0.0, 0.0, 0.0
for mask_i, output_i in zip(masks_list, output_list):
intersection_i, union_i, _ = intersectionAndUnionGPU(
output_i.contiguous().clone(), mask_i.contiguous(), 2, ignore_index=255
)
intersection += intersection_i
union += union_i
acc_iou += intersection_i / (union_i + 1e-5)
acc_iou[union_i == 0] += 1.0 # no-object target
intersection, union = intersection.cpu().numpy(), union.cpu().numpy()
acc_iou = acc_iou.cpu().numpy() / masks_list.shape[0]
intersection_meter.update(intersection), union_meter.update(
union
), acc_iou_meter.update(acc_iou, n=masks_list.shape[0])
intersection_meter.all_reduce()
union_meter.all_reduce()
acc_iou_meter.all_reduce()
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
ciou = iou_class[1]
giou = acc_iou_meter.avg[1]
if args.local_rank == 0:
writer.add_scalar("val/giou", giou, epoch)
writer.add_scalar("val/ciou", ciou, epoch)
print("giou: {:.4f}, ciou: {:.4f}".format(giou, ciou))
return giou, ciou | null |
178,627 | import argparse
import os
import sys
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
from model.LISA import LISAForCausalLM
from model.llava import conversation as conversation_lib
from model.llava.mm_utils import tokenizer_image_token
from model.segment_anything.utils.transforms import ResizeLongestSide
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
def parse_args(args):
parser = argparse.ArgumentParser(description="LISA chat")
parser.add_argument("--version", default="xinlai/LISA-13B-llama2-v1")
parser.add_argument("--vis_save_path", default="./vis_output", type=str)
parser.add_argument(
"--precision",
default="bf16",
type=str,
choices=["fp32", "bf16", "fp16"],
help="precision for inference",
)
parser.add_argument("--image_size", default=1024, type=int, help="image size")
parser.add_argument("--model_max_length", default=512, type=int)
parser.add_argument("--lora_r", default=8, type=int)
parser.add_argument(
"--vision-tower", default="openai/clip-vit-large-patch14", type=str
)
parser.add_argument("--local-rank", default=0, type=int, help="node rank")
parser.add_argument("--load_in_8bit", action="store_true", default=False)
parser.add_argument("--load_in_4bit", action="store_true", default=False)
parser.add_argument("--use_mm_start_end", action="store_true", default=True)
parser.add_argument(
"--conv_type",
default="llava_v1",
type=str,
choices=["llava_v1", "llava_llama_2"],
)
return parser.parse_args(args) | null |
178,628 | import argparse
import os
import sys
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
from model.LISA import LISAForCausalLM
from model.llava import conversation as conversation_lib
from model.llava.mm_utils import tokenizer_image_token
from model.segment_anything.utils.transforms import ResizeLongestSide
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess( x, pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1), pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1), img_size=1024, ) -> torch.Tensor` to solve the following problem:
Normalize pixel values and pad to a square input.
Here is the function:
def preprocess(
x,
pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
img_size=1024,
) -> torch.Tensor:
"""Normalize pixel values and pad to a square input."""
# Normalize colors
x = (x - pixel_mean) / pixel_std
# Pad
h, w = x.shape[-2:]
padh = img_size - h
padw = img_size - w
x = F.pad(x, (0, padw, 0, padh))
return x | Normalize pixel values and pad to a square input. |
178,629 | import argparse
import glob
import os
import sys
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import transformers
from peft import LoraConfig, get_peft_model
from transformers import AutoTokenizer
from model.LISA import LISAForCausalLM
from utils.utils import DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN
def parse_args(args):
parser = argparse.ArgumentParser(
description="merge lora weights and save model with hf format"
)
parser.add_argument(
"--version", default="liuhaotian/llava-llama-2-13b-chat-lightning-preview"
)
parser.add_argument("--vis_save_path", default="./vis_output", type=str)
parser.add_argument(
"--precision",
default="bf16",
type=str,
choices=["fp32", "bf16", "fp16"],
help="precision for inference",
)
parser.add_argument("--vision_pretrained", default="PATH_TO_SAM_ViT-H", type=str)
parser.add_argument("--out_dim", default=256, type=int)
parser.add_argument("--image_size", default=1024, type=int, help="image size")
parser.add_argument("--model_max_length", default=512, type=int)
parser.add_argument(
"--vision-tower", default="openai/clip-vit-large-patch14", type=str
)
parser.add_argument("--lora_r", default=8, type=int)
parser.add_argument("--lora_alpha", default=16, type=int)
parser.add_argument("--lora_dropout", default=0.05, type=float)
parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str)
parser.add_argument("--local-rank", default=0, type=int, help="node rank")
parser.add_argument("--train_mask_decoder", action="store_true", default=True)
parser.add_argument("--use_mm_start_end", action="store_true", default=True)
parser.add_argument(
"--conv_type",
default="llava_v1",
type=str,
choices=["llava_v1", "llava_llama_2"],
)
parser.add_argument("--weight", default="", type=str, required=True)
parser.add_argument("--save_path", default="./lisa_model", type=str, required=True)
return parser.parse_args(args) | null |
178,630 | import argparse
import os
import re
import sys
import bleach
import cv2
import gradio as gr
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
from model.LISA import LISAForCausalLM
from model.llava import conversation as conversation_lib
from model.llava.mm_utils import tokenizer_image_token
from model.segment_anything.utils.transforms import ResizeLongestSide
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
description = """
<font size=4>
This is the online demo of LISA. \n
If multiple users are using it at the same time, they will enter a queue, which may delay some time. \n
**Note**: **Different prompts can lead to significantly varied results**. \n
**Note**: Please try to **standardize** your input text prompts to **avoid ambiguity**, and also pay attention to whether the **punctuations** of the input are correct. \n
**Note**: Current model is **LISA-13B-llama2-v0-explanatory**, and 4-bit quantization may impair text-generation quality. \n
**Usage**: <br>
 (1) To let LISA **segment something**, input prompt like: "Can you segment xxx in this image?", "What is xxx in this image? Please output segmentation mask."; <br>
 (2) To let LISA **output an explanation**, input prompt like: "What is xxx in this image? Please output segmentation mask and explain why."; <br>
 (3) To obtain **solely language output**, you can input like what you should do in current multi-modal LLM (e.g., LLaVA). <br>
Hope you can enjoy our work!
</font>
"""
def parse_args(args):
parser = argparse.ArgumentParser(description="LISA chat")
parser.add_argument("--version", default="xinlai/LISA-13B-llama2-v1")
parser.add_argument("--vis_save_path", default="./vis_output", type=str)
parser.add_argument(
"--precision",
default="fp16",
type=str,
choices=["fp32", "bf16", "fp16"],
help="precision for inference",
)
parser.add_argument("--image_size", default=1024, type=int, help="image size")
parser.add_argument("--model_max_length", default=512, type=int)
parser.add_argument("--lora_r", default=8, type=int)
parser.add_argument(
"--vision-tower", default="openai/clip-vit-large-patch14", type=str
)
parser.add_argument("--local-rank", default=0, type=int, help="node rank")
parser.add_argument("--load_in_8bit", action="store_true", default=False)
parser.add_argument("--load_in_4bit", action="store_true", default=False)
parser.add_argument("--use_mm_start_end", action="store_true", default=True)
parser.add_argument(
"--conv_type",
default="llava_v1",
type=str,
choices=["llava_v1", "llava_llama_2"],
)
return parser.parse_args(args) | null |
178,631 | import argparse
import os
import re
import sys
import bleach
import cv2
import gradio as gr
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
from model.LISA import LISAForCausalLM
from model.llava import conversation as conversation_lib
from model.llava.mm_utils import tokenizer_image_token
from model.segment_anything.utils.transforms import ResizeLongestSide
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
def preprocess(
x,
pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
img_size=1024,
) -> torch.Tensor:
args = parse_args(sys.argv[1:])
tokenizer = AutoTokenizer.from_pretrained(
args.version,
cache_dir=None,
model_max_length=args.model_max_length,
padding_side="right",
use_fast=False,
)
tokenizer.pad_token = tokenizer.unk_token
args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
if args.precision == "bf16":
torch_dtype = torch.bfloat16
elif args.precision == "fp16":
torch_dtype = torch.half
if args.load_in_4bit:
kwargs.update(
{
"torch_dtype": torch.half,
"load_in_4bit": True,
"quantization_config": BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
llm_int8_skip_modules=["visual_model"],
),
}
)
elif args.load_in_8bit:
kwargs.update(
{
"torch_dtype": torch.half,
"quantization_config": BitsAndBytesConfig(
llm_int8_skip_modules=["visual_model"],
load_in_8bit=True,
),
}
)
model = LISAForCausalLM.from_pretrained(
args.version, low_cpu_mem_usage=True, vision_tower=args.vision_tower, seg_token_idx=args.seg_token_idx, **kwargs
)
model.config.eos_token_id = tokenizer.eos_token_id
model.config.bos_token_id = tokenizer.bos_token_id
model.config.pad_token_id = tokenizer.pad_token_id
model.get_model().initialize_vision_modules(model.get_model().config)
if args.precision == "bf16":
model = model.bfloat16().cuda()
elif (
args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit)
):
vision_tower = model.get_model().get_vision_tower()
model.model.vision_tower = None
import deepspeed
model_engine = deepspeed.init_inference(
model=model,
dtype=torch.half,
replace_with_kernel_inject=True,
replace_method="auto",
)
model = model_engine.module
model.model.vision_tower = vision_tower.half().cuda()
elif args.precision == "fp32":
model = model.float().cuda()
clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
transform = ResizeLongestSide(args.image_size)
model.eval()
def tokenizer_image_token(
prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None
):
IMAGE_TOKEN_INDEX = -200
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
def inference(input_str, input_image):
## filter out special chars
input_str = bleach.clean(input_str)
print("input_str: ", input_str, "input_image: ", input_image)
## input valid check
if not re.match(r"^[A-Za-z ,.!?\'\"]+$", input_str) or len(input_str) < 1:
output_str = "[Error] Invalid input: ", input_str
# output_image = np.zeros((128, 128, 3))
## error happened
output_image = cv2.imread("./resources/error_happened.png")[:, :, ::-1]
return output_image, output_str
# Model Inference
conv = conversation_lib.conv_templates[args.conv_type].copy()
conv.messages = []
prompt = input_str
prompt = DEFAULT_IMAGE_TOKEN + "\n" + prompt
if args.use_mm_start_end:
replace_token = (
DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
)
prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
conv.append_message(conv.roles[0], prompt)
conv.append_message(conv.roles[1], "")
prompt = conv.get_prompt()
image_np = cv2.imread(input_image)
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
original_size_list = [image_np.shape[:2]]
image_clip = (
clip_image_processor.preprocess(image_np, return_tensors="pt")[
"pixel_values"
][0]
.unsqueeze(0)
.cuda()
)
if args.precision == "bf16":
image_clip = image_clip.bfloat16()
elif args.precision == "fp16":
image_clip = image_clip.half()
else:
image_clip = image_clip.float()
image = transform.apply_image(image_np)
resize_list = [image.shape[:2]]
image = (
preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
.unsqueeze(0)
.cuda()
)
if args.precision == "bf16":
image = image.bfloat16()
elif args.precision == "fp16":
image = image.half()
else:
image = image.float()
input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
input_ids = input_ids.unsqueeze(0).cuda()
output_ids, pred_masks = model.evaluate(
image_clip,
image,
input_ids,
resize_list,
original_size_list,
max_new_tokens=512,
tokenizer=tokenizer,
)
output_ids = output_ids[0][output_ids[0] != IMAGE_TOKEN_INDEX]
text_output = tokenizer.decode(output_ids, skip_special_tokens=False)
text_output = text_output.replace("\n", "").replace(" ", " ")
text_output = text_output.split("ASSISTANT: ")[-1]
print("text_output: ", text_output)
save_img = None
for i, pred_mask in enumerate(pred_masks):
if pred_mask.shape[0] == 0:
continue
pred_mask = pred_mask.detach().cpu().numpy()[0]
pred_mask = pred_mask > 0
save_img = image_np.copy()
save_img[pred_mask] = (
image_np * 0.5
+ pred_mask[:, :, None].astype(np.uint8) * np.array([255, 0, 0]) * 0.5
)[pred_mask]
output_str = "ASSITANT: " + text_output # input_str
if save_img is not None:
output_image = save_img # input_image
else:
## no seg output
output_image = cv2.imread("./resources/no_seg_out.png")[:, :, ::-1]
return output_image, output_str | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.