language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | openai__openai-python | src/openai/types/responses/response_input_item_param.py | {
"start": 9919,
"end": 10615
} | class ____(TypedDict, total=False):
call_id: Required[str]
"""The unique ID of the apply patch tool call generated by the model."""
operation: Required[ApplyPatchCallOperation]
"""
The specific create, delete, or update instruction for the apply_patch tool
call.
"""
status: Required[Literal["in_progress", "completed"]]
"""The status of the apply patch tool call. One of `in_progress` or `completed`."""
type: Required[Literal["apply_patch_call"]]
"""The type of the item. Always `apply_patch_call`."""
id: Optional[str]
"""The unique ID of the apply patch tool call.
Populated when this item is returned via API.
"""
| ApplyPatchCall |
python | sanic-org__sanic | guide/webapp/display/plugins/tabs.py | {
"start": 288,
"end": 1562
} | class ____(DirectivePlugin):
def parse(
self, block: BlockParser, m: Match, state: BlockState
) -> dict[str, Any]:
info = m.groupdict()
new_state = block.state_cls()
new_state.process(dedent(info["text"]))
block.parse(new_state)
return {
"type": "tab",
"text": info["text"],
"children": new_state.tokens,
"attrs": {
"title": info["title"],
},
}
def __call__( # type: ignore
self,
directive: RSTDirective,
md: Markdown,
) -> None:
directive.register("tab", self.parse)
if md.renderer.NAME == "html":
md.renderer.register("tab", self._render_tab)
def _render_tab(self, renderer: HTMLRenderer, text: str, **attrs):
start = (
'<div class="tab-container mt-6"><div class="tabs"><ul>\n'
if attrs.get("first")
else ""
)
end = (
'</ul></div><div class="tab-display"></div></div>\n'
if attrs.get("last")
else ""
)
content = f'<div class="tab-content">{text}</div>\n'
tab = f"<li><a>{attrs['title']}</a>{content}</li>\n"
return start + tab + end
| Tabs |
python | pydata__xarray | xarray/core/parallel.py | {
"start": 654,
"end": 25062
} | class ____(TypedDict):
shapes: dict[Hashable, int]
coords: set[Hashable]
data_vars: set[Hashable]
def unzip(iterable):
return zip(*iterable, strict=True)
def assert_chunks_compatible(a: Dataset, b: Dataset):
a = a.unify_chunks()
b = b.unify_chunks()
for dim in set(a.chunks).intersection(set(b.chunks)):
if a.chunks[dim] != b.chunks[dim]:
raise ValueError(f"Chunk sizes along dimension {dim!r} are not equal.")
def check_result_variables(
result: DataArray | Dataset,
expected: ExpectedDict,
kind: Literal["coords", "data_vars"],
):
if kind == "coords":
nice_str = "coordinate"
elif kind == "data_vars":
nice_str = "data"
# check that coords and data variables are as expected
missing = expected[kind] - set(getattr(result, kind))
if missing:
raise ValueError(
"Result from applying user function does not contain "
f"{nice_str} variables {missing}."
)
extra = set(getattr(result, kind)) - expected[kind]
if extra:
raise ValueError(
"Result from applying user function has unexpected "
f"{nice_str} variables {extra}."
)
def dataset_to_dataarray(obj: Dataset) -> DataArray:
if not isinstance(obj, Dataset):
raise TypeError(f"Expected Dataset, got {type(obj)}")
if len(obj.data_vars) > 1:
raise TypeError(
"Trying to convert Dataset with more than one data variable to DataArray"
)
return next(iter(obj.data_vars.values()))
def dataarray_to_dataset(obj: DataArray) -> Dataset:
# only using _to_temp_dataset would break
# func = lambda x: x.to_dataset()
# since that relies on preserving name.
if obj.name is None:
dataset = obj._to_temp_dataset()
else:
dataset = obj.to_dataset()
return dataset
def make_meta(obj):
"""If obj is a DataArray or Dataset, return a new object of the same type and with
the same variables and dtypes, but where all variables have size 0 and numpy
backend.
If obj is neither a DataArray nor Dataset, return it unaltered.
"""
if isinstance(obj, DataArray):
obj_array = obj
obj = dataarray_to_dataset(obj)
elif isinstance(obj, Dataset):
obj_array = None
else:
return obj
from dask.array.utils import meta_from_array
meta = Dataset()
for name, variable in obj.variables.items():
meta_obj = meta_from_array(variable.data, ndim=variable.ndim)
meta[name] = (variable.dims, meta_obj, variable.attrs)
meta.attrs = obj.attrs
meta = meta.set_coords(obj.coords)
if obj_array is not None:
return dataset_to_dataarray(meta)
return meta
def infer_template(
func: Callable[..., T_Xarray], obj: DataArray | Dataset, *args, **kwargs
) -> T_Xarray:
"""Infer return object by running the function on meta objects."""
meta_args = [make_meta(arg) for arg in (obj,) + args]
try:
template = func(*meta_args, **kwargs)
except Exception as e:
raise Exception(
"Cannot infer object returned from running user provided function. "
"Please supply the 'template' kwarg to map_blocks."
) from e
if not isinstance(template, Dataset | DataArray):
raise TypeError(
"Function must return an xarray DataArray or Dataset. Instead it returned "
f"{type(template)}"
)
return template
def make_dict(x: DataArray | Dataset) -> dict[Hashable, Any]:
"""Map variable name to numpy(-like) data
(Dataset.to_dict() is too complicated).
"""
if isinstance(x, DataArray):
x = x._to_temp_dataset()
return {k: v.data for k, v in x.variables.items()}
def _get_chunk_slicer(dim: Hashable, chunk_index: Mapping, chunk_bounds: Mapping):
if dim in chunk_index:
which_chunk = chunk_index[dim]
return slice(chunk_bounds[dim][which_chunk], chunk_bounds[dim][which_chunk + 1])
return slice(None)
def subset_dataset_to_block(
graph: dict, gname: str, dataset: Dataset, input_chunk_bounds, chunk_index
):
"""
Creates a task that subsets an xarray dataset to a block determined by chunk_index.
Block extents are determined by input_chunk_bounds.
Also subtasks that subset the constituent variables of a dataset.
"""
import dask
# this will become [[name1, variable1],
# [name2, variable2],
# ...]
# which is passed to dict and then to Dataset
data_vars = []
coords = []
chunk_tuple = tuple(chunk_index.values())
chunk_dims_set = set(chunk_index)
variable: Variable
for name, variable in dataset.variables.items():
# make a task that creates tuple of (dims, chunk)
if dask.is_dask_collection(variable.data):
# get task name for chunk
chunk = (
variable.data.name,
*tuple(chunk_index[dim] for dim in variable.dims),
)
chunk_variable_task = (f"{name}-{gname}-{chunk[0]!r}",) + chunk_tuple
graph[chunk_variable_task] = (
tuple,
[variable.dims, chunk, variable.attrs],
)
else:
assert name in dataset.dims or variable.ndim == 0
# non-dask array possibly with dimensions chunked on other variables
# index into variable appropriately
subsetter = {
dim: _get_chunk_slicer(dim, chunk_index, input_chunk_bounds)
for dim in variable.dims
}
if set(variable.dims) < chunk_dims_set:
this_var_chunk_tuple = tuple(chunk_index[dim] for dim in variable.dims)
else:
this_var_chunk_tuple = chunk_tuple
chunk_variable_task = (
f"{name}-{gname}-{dask.base.tokenize(subsetter)}",
) + this_var_chunk_tuple
# We are including a dimension coordinate,
# minimize duplication by not copying it in the graph for every chunk.
if variable.ndim == 0 or chunk_variable_task not in graph:
subset = variable.isel(subsetter)
graph[chunk_variable_task] = (
tuple,
[subset.dims, subset._data, subset.attrs],
)
# this task creates dict mapping variable name to above tuple
if name in dataset._coord_names:
coords.append([name, chunk_variable_task])
else:
data_vars.append([name, chunk_variable_task])
return (Dataset, (dict, data_vars), (dict, coords), dataset.attrs)
def map_blocks(
func: Callable[..., T_Xarray],
obj: DataArray | Dataset,
args: Sequence[Any] = (),
kwargs: Mapping[str, Any] | None = None,
template: DataArray | Dataset | None = None,
) -> T_Xarray:
"""Apply a function to each block of a DataArray or Dataset.
.. warning::
This function is experimental and its signature may change.
Parameters
----------
func : callable
User-provided function that accepts a DataArray or Dataset as its first
parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below),
corresponding to one chunk along each chunked dimension. ``func`` will be
executed as ``func(subset_obj, *subset_args, **kwargs)``.
This function must return either a single DataArray or a single Dataset.
This function cannot add a new chunked dimension.
obj : DataArray, Dataset
Passed to the function as its first argument, one block at a time.
args : sequence
Passed to func after unpacking and subsetting any xarray objects by blocks.
xarray objects in args must be aligned with obj, otherwise an error is raised.
kwargs : mapping
Passed verbatim to func after unpacking. xarray objects, if any, will not be
subset to blocks. Passing dask collections in kwargs is not allowed.
template : DataArray or Dataset, optional
xarray object representing the final result after compute is called. If not provided,
the function will be first run on mocked-up data, that looks like ``obj`` but
has sizes 0, to determine properties of the returned object such as dtype,
variable names, attributes, new dimensions and new indexes (if any).
``template`` must be provided if the function changes the size of existing dimensions.
When provided, ``attrs`` on variables in `template` are copied over to the result. Any
``attrs`` set by ``func`` will be ignored.
Returns
-------
obj : same as obj
A single DataArray or Dataset with dask backend, reassembled from the outputs of the
function.
Notes
-----
This function is designed for when ``func`` needs to manipulate a whole xarray object
subset to each block. Each block is loaded into memory. In the more common case where
``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.
If none of the variables in ``obj`` is backed by dask arrays, calling this function is
equivalent to calling ``func(obj, *args, **kwargs)``.
See Also
--------
dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks
xarray.DataArray.map_blocks
Examples
--------
Calculate an anomaly from climatology using ``.groupby()``. Using
``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,
its indices, and its methods like ``.groupby()``.
>>> def calculate_anomaly(da, groupby_type="time.month"):
... gb = da.groupby(groupby_type)
... clim = gb.mean(dim="time")
... return gb - clim
...
>>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True)
>>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"])
>>> np.random.seed(123)
>>> array = xr.DataArray(
... np.random.rand(len(time)),
... dims=["time"],
... coords={"time": time, "month": month},
... ).chunk()
>>> array.map_blocks(calculate_anomaly, template=array).compute()
<xarray.DataArray (time: 24)> Size: 192B
array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862,
0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714,
-0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 ,
0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108,
0.07673453, 0.22865714, 0.19063865, -0.0590131 ])
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12
Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments
to the function being applied in ``xr.map_blocks()``:
>>> array.map_blocks(
... calculate_anomaly,
... kwargs={"groupby_type": "time.year"},
... template=array,
... ) # doctest: +ELLIPSIS
<xarray.DataArray (time: 24)> Size: 192B
dask.array<<this-array>-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray>
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B dask.array<chunksize=(24,), meta=np.ndarray>
"""
def _wrapper(
func: Callable,
args: list,
kwargs: dict,
arg_is_array: Iterable[bool],
expected: ExpectedDict,
expected_indexes: dict[Hashable, Index],
):
"""
Wrapper function that receives datasets in args; converts to dataarrays when necessary;
passes these to the user function `func` and checks returned objects for expected shapes/sizes/etc.
"""
converted_args = [
dataset_to_dataarray(arg) if is_array else arg
for is_array, arg in zip(arg_is_array, args, strict=True)
]
result = func(*converted_args, **kwargs)
merged_coordinates = merge(
[arg.coords for arg in args if isinstance(arg, Dataset | DataArray)],
join="exact",
compat="override",
).coords
# check all dims are present
missing_dimensions = set(expected["shapes"]) - set(result.sizes)
if missing_dimensions:
raise ValueError(
f"Dimensions {missing_dimensions} missing on returned object."
)
# check that index lengths and values are as expected
for name, index in result._indexes.items():
if (
name in expected["shapes"]
and result.sizes[name] != expected["shapes"][name]
):
raise ValueError(
f"Received dimension {name!r} of length {result.sizes[name]}. "
f"Expected length {expected['shapes'][name]}."
)
# ChainMap wants MutableMapping, but xindexes is Mapping
merged_indexes = collections.ChainMap(
expected_indexes,
merged_coordinates.xindexes, # type: ignore[arg-type]
)
expected_index = merged_indexes.get(name, None)
if expected_index is not None and not index.equals(expected_index):
raise ValueError(
f"Expected index {name!r} to be {expected_index!r}. Received {index!r} instead."
)
# check that all expected variables were returned
check_result_variables(result, expected, "coords")
if isinstance(result, Dataset):
check_result_variables(result, expected, "data_vars")
return make_dict(result)
if template is not None and not isinstance(template, DataArray | Dataset):
raise TypeError(
f"template must be a DataArray or Dataset. Received {type(template).__name__} instead."
)
if not isinstance(args, Sequence):
raise TypeError("args must be a sequence (for example, a list or tuple).")
if kwargs is None:
kwargs = {}
elif not isinstance(kwargs, Mapping):
raise TypeError("kwargs must be a mapping (for example, a dict)")
for value in kwargs.values():
if is_dask_collection(value):
raise TypeError(
"Cannot pass dask collections in kwargs yet. Please compute or "
"load values before passing to map_blocks."
)
if not is_dask_collection(obj):
return func(obj, *args, **kwargs)
try:
import dask
import dask.array
from dask.base import tokenize
from dask.highlevelgraph import HighLevelGraph
except ImportError:
pass
all_args = [obj] + list(args)
is_xarray = [isinstance(arg, Dataset | DataArray) for arg in all_args]
is_array = [isinstance(arg, DataArray) for arg in all_args]
# there should be a better way to group this. partition?
xarray_indices, xarray_objs = unzip(
(index, arg) for index, arg in enumerate(all_args) if is_xarray[index]
)
others = [
(index, arg) for index, arg in enumerate(all_args) if not is_xarray[index]
]
# all xarray objects must be aligned. This is consistent with apply_ufunc.
aligned = align(*xarray_objs, join="exact")
xarray_objs = tuple(
dataarray_to_dataset(arg) if isinstance(arg, DataArray) else arg
for arg in aligned
)
# rechunk any numpy variables appropriately
xarray_objs = tuple(arg.chunk(arg.chunksizes) for arg in xarray_objs)
merged_coordinates = merge(
[arg.coords for arg in aligned],
join="exact",
compat="override",
).coords
_, npargs = unzip(
sorted(
list(zip(xarray_indices, xarray_objs, strict=True)) + others,
key=lambda x: x[0],
)
)
# check that chunk sizes are compatible
input_chunks = dict(npargs[0].chunks)
for arg in xarray_objs[1:]:
assert_chunks_compatible(npargs[0], arg)
input_chunks.update(arg.chunks)
coordinates: Coordinates
if template is None:
# infer template by providing zero-shaped arrays
template = infer_template(func, aligned[0], *args, **kwargs)
template_coords = set(template.coords)
preserved_coord_vars = template_coords & set(merged_coordinates)
new_coord_vars = template_coords - set(merged_coordinates)
preserved_coords = merged_coordinates.to_dataset()[preserved_coord_vars]
# preserved_coords contains all coordinates variables that share a dimension
# with any index variable in preserved_indexes
# Drop any unneeded vars in a second pass, this is required for e.g.
# if the mapped function were to drop a non-dimension coordinate variable.
preserved_coords = preserved_coords.drop_vars(
tuple(k for k in preserved_coords.variables if k not in template_coords)
)
coordinates = merge(
(preserved_coords, template.coords.to_dataset()[new_coord_vars]),
# FIXME: this should be join="exact", but breaks a test
join="outer",
compat="override",
).coords
output_chunks: Mapping[Hashable, tuple[int, ...]] = {
dim: input_chunks[dim] for dim in template.dims if dim in input_chunks
}
else:
# template xarray object has been provided with proper sizes and chunk shapes
coordinates = template.coords
output_chunks = template.chunksizes
if not output_chunks:
raise ValueError(
"Provided template has no dask arrays. "
" Please construct a template with appropriately chunked dask arrays."
)
new_indexes = set(template.xindexes) - set(merged_coordinates)
modified_indexes = set(
name
for name, xindex in coordinates.xindexes.items()
if not xindex.equals(merged_coordinates.xindexes.get(name, None))
)
for dim in output_chunks:
if dim in input_chunks and len(input_chunks[dim]) != len(output_chunks[dim]):
raise ValueError(
"map_blocks requires that one block of the input maps to one block of output. "
f"Expected number of output chunks along dimension {dim!r} to be {len(input_chunks[dim])}. "
f"Received {len(output_chunks[dim])} instead. Please provide template if not provided, or "
"fix the provided template."
)
if isinstance(template, DataArray):
result_is_array = True
template_name = template.name
template = template._to_temp_dataset()
elif isinstance(template, Dataset):
result_is_array = False
else:
raise TypeError(
f"func output must be DataArray or Dataset; got {type(template)}"
)
# We're building a new HighLevelGraph hlg. We'll have one new layer
# for each variable in the dataset, which is the result of the
# func applied to the values.
graph: dict[Any, Any] = {}
new_layers: collections.defaultdict[str, dict[Any, Any]] = collections.defaultdict(
dict
)
gname = f"{dask.utils.funcname(func)}-{dask.base.tokenize(npargs[0], args, kwargs)}"
# map dims to list of chunk indexes
ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()}
# mapping from chunk index to slice bounds
input_chunk_bounds = {
dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items()
}
output_chunk_bounds = {
dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in output_chunks.items()
}
computed_variables = set(template.variables) - set(coordinates.indexes)
# iterate over all possible chunk combinations
for chunk_tuple in itertools.product(*ichunk.values()):
# mapping from dimension name to chunk index
chunk_index = dict(zip(ichunk.keys(), chunk_tuple, strict=True))
blocked_args = [
(
subset_dataset_to_block(
graph, gname, arg, input_chunk_bounds, chunk_index
)
if isxr
else arg
)
for isxr, arg in zip(is_xarray, npargs, strict=True)
]
# only include new or modified indexes to minimize duplication of data
indexes = {
dim: coordinates.xindexes[dim][
_get_chunk_slicer(dim, chunk_index, output_chunk_bounds)
]
for dim in (new_indexes | modified_indexes)
}
tokenized_indexes: dict[Hashable, str] = {}
for k, v in indexes.items():
tokenized_v = tokenize(v)
graph[f"{k}-coordinate-{tokenized_v}"] = v
tokenized_indexes[k] = f"{k}-coordinate-{tokenized_v}"
# raise nice error messages in _wrapper
expected: ExpectedDict = {
# input chunk 0 along a dimension maps to output chunk 0 along the same dimension
# even if length of dimension is changed by the applied function
"shapes": {
k: output_chunks[k][v]
for k, v in chunk_index.items()
if k in output_chunks
},
"data_vars": set(template.data_vars.keys()),
"coords": set(template.coords.keys()),
}
from_wrapper = (gname,) + chunk_tuple
graph[from_wrapper] = (
_wrapper,
func,
blocked_args,
kwargs,
is_array,
expected,
(dict, [[k, v] for k, v in tokenized_indexes.items()]),
)
# mapping from variable name to dask graph key
var_key_map: dict[Hashable, str] = {}
for name in computed_variables:
variable = template.variables[name]
gname_l = f"{name}-{gname}"
var_key_map[name] = gname_l
# unchunked dimensions in the input have one chunk in the result
# output can have new dimensions with exactly one chunk
key: tuple[Any, ...] = (gname_l,) + tuple(
chunk_index.get(dim, 0) for dim in variable.dims
)
# We're adding multiple new layers to the graph:
# The first new layer is the result of the computation on
# the array.
# Then we add one layer per variable, which extracts the
# result for that variable, and depends on just the first new
# layer.
new_layers[gname_l][key] = (operator.getitem, from_wrapper, name)
hlg = HighLevelGraph.from_collections(
gname,
graph,
dependencies=[arg for arg in npargs if dask.is_dask_collection(arg)],
)
# This adds in the getitems for each variable in the dataset.
hlg = HighLevelGraph(
{**hlg.layers, **new_layers},
dependencies={
**hlg.dependencies,
**{name: {gname} for name in new_layers.keys()},
},
)
result = Dataset(coords=coordinates, attrs=template.attrs)
for index in result._indexes:
result[index].attrs = template[index].attrs
result[index].encoding = template[index].encoding
for name, gname_l in var_key_map.items():
dims = template[name].dims
var_chunks = []
for dim in dims:
if dim in output_chunks:
var_chunks.append(output_chunks[dim])
elif dim in result._indexes:
var_chunks.append((result.sizes[dim],))
elif dim in template.dims:
# new unindexed dimension
var_chunks.append((template.sizes[dim],))
data = dask.array.Array(
hlg, name=gname_l, chunks=var_chunks, dtype=template[name].dtype
)
result[name] = (dims, data, template[name].attrs)
result[name].encoding = template[name].encoding
result = result.set_coords(template._coord_names)
if result_is_array:
da = dataset_to_dataarray(result)
da.name = template_name
return da # type: ignore[return-value]
return result # type: ignore[return-value]
| ExpectedDict |
python | django__django | tests/template_tests/test_custom.py | {
"start": 1032,
"end": 1437
} | class ____(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(app_dirs=True, libraries=LIBRARIES)
super().setUpClass()
def verify_tag(self, tag, name):
self.assertEqual(tag.__name__, name)
self.assertEqual(tag.__doc__, "Expected %s __doc__" % name)
self.assertEqual(tag.__dict__["anything"], "Expected %s __dict__" % name)
| TagTestCase |
python | openai__openai-python | src/openai/_base_client.py | {
"start": 8889,
"end": 10410
} | class ____(BasePage[_T], Generic[_T]):
_client: AsyncAPIClient = pydantic.PrivateAttr()
def _set_private_attributes(
self,
model: Type[_T],
client: AsyncAPIClient,
options: FinalRequestOptions,
) -> None:
if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None:
self.__pydantic_private__ = {}
self._model = model
self._client = client
self._options = options
async def __aiter__(self) -> AsyncIterator[_T]:
async for page in self.iter_pages():
for item in page._get_page_items():
yield item
async def iter_pages(self: AsyncPageT) -> AsyncIterator[AsyncPageT]:
page = self
while True:
yield page
if page.has_next_page():
page = await page.get_next_page()
else:
return
async def get_next_page(self: AsyncPageT) -> AsyncPageT:
info = self.next_page_info()
if not info:
raise RuntimeError(
"No next page expected; please check `.has_next_page()` before calling `.get_next_page()`."
)
options = self._info_to_options(info)
return await self._client._request_api_list(self._model, page=self.__class__, options=options)
_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient])
_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]])
| BaseAsyncPage |
python | milvus-io__pymilvus | pymilvus/orm/index.py | {
"start": 829,
"end": 5183
} | class ____:
def __init__(
self,
collection: Collection,
field_name: str,
index_params: Dict,
**kwargs,
) -> Index:
"""Creates index on a specified field according to the index parameters.
Args:
collection(Collection): The collection in which the index is created
field_name(str): The name of the field to create an index for.
index_params(dict): Indexing parameters.
kwargs:
* *index_name* (``str``) --
The name of index which will be created. If no index name is specified,
default index name will be used.
Raises:
MilvusException: If anything goes wrong.
Examples:
>>> from pymilvus import *
>>> from pymilvus.schema import *
>>> from pymilvus.types import DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7fac15e53470>
>>> field1 = FieldSchema("int64", DataType.INT64, is_primary=True)
>>> field2 = FieldSchema("fvec", DataType.FLOAT_VECTOR, is_primary=False, dim=128)
>>> schema = CollectionSchema(fields=[field1, field2])
>>> collection = Collection(name='test_collection', schema=schema)
>>> # insert some data
>>> index_params = {
... "index_type": "IVF_FLAT",
... "metric_type": "L2",
... "params": {"nlist": 128}}
>>> index = Index(collection, "fvec", index_params)
>>> index.params
{'index_type': 'IVF_FLAT', 'metric_type': 'L2', 'params': {'nlist': 128}}
>>> index.collection_name
test_collection
>>> index.field_name
fvec
>>> index.drop()
"""
# ruff: noqa: PLC0415
from .collection import Collection
if not isinstance(collection, Collection):
raise CollectionNotExistException(message=ExceptionsMessage.CollectionType)
self._collection = collection
self._field_name = field_name
self._index_params = index_params
self._index_name = kwargs.get("index_name", Config.IndexName)
if kwargs.get("construct_only", False):
return
conn = self._get_connection()
conn.create_index(self._collection.name, self._field_name, self._index_params, **kwargs)
indexes = conn.list_indexes(self._collection.name)
for index in indexes:
if index.field_name == self._field_name:
self._index_name = index.index_name
break
def _get_connection(self):
return self._collection._get_connection()
@property
def params(self) -> dict:
"""dict: The index parameters"""
return copy.deepcopy(self._index_params)
@property
def collection_name(self) -> str:
"""str: The corresponding collection name"""
return self._collection.name
@property
def field_name(self) -> str:
"""str: The corresponding field name."""
return self._field_name
@property
def index_name(self) -> str:
"""str: The corresponding index name."""
return self._index_name
def __eq__(self, other: Index) -> bool:
"""The order of the fields of index must be consistent."""
return self.to_dict() == other.to_dict()
def to_dict(self):
"""Put collection name, field name and index params into dict."""
return {
"collection": self._collection._name,
"field": self._field_name,
"index_name": self._index_name,
"index_param": self.params,
}
def drop(self, timeout: Optional[float] = None, **kwargs):
"""Drop an index and its corresponding index files.
Args:
timeout(float, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur
"""
conn = self._get_connection()
conn.drop_index(
collection_name=self._collection.name,
field_name=self.field_name,
index_name=self.index_name,
timeout=timeout,
)
| Index |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 186570,
"end": 188386
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
host: str,
port: int,
database: str,
username: str,
password: str,
schemas: Optional[list[str]] = None,
jdbc_url_params: Optional[str] = None,
):
"""Airbyte Source for Redshift.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/redshift
Args:
name (str): The name of the destination.
host (str): Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com).
port (int): Port of the database.
database (str): Name of the database.
schemas (Optional[List[str]]): The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive.
username (str): Username to use to access the database.
password (str): Password associated with the username.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.database = check.str_param(database, "database")
self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)
self.username = check.str_param(username, "username")
self.password = check.str_param(password, "password")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
super().__init__("Redshift", name)
| RedshiftSource |
python | django__django | tests/utils_tests/test_module/__init__.py | {
"start": 0,
"end": 55
} | class ____:
_registry = {}
site = SiteMock()
| SiteMock |
python | PyCQA__pylint | tests/functional/a/access/access_attr_before_def_false_positive.py | {
"start": 1918,
"end": 2169
} | class ____:
"""blabla"""
_the_instance = None
def __new__(cls):
if cls._the_instance is None:
cls._the_instance = object.__new__(cls)
return cls._the_instance
def __init__(self):
pass
| QoSALConnection |
python | huggingface__transformers | tests/models/myt5/test_tokenization_myt5.py | {
"start": 2672,
"end": 6525
} | class ____(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MyT5Tokenizer
from_pretrained_id = "Tomlim/myt5-base"
test_rust_tokenizer = False
def get_tokenizer(cls, **kwargs) -> MyT5Tokenizer:
return cls.tokenizer_class.from_pretrained("Tomlim/myt5-base", **kwargs)
@unittest.skip(reason="inputs cannot be pretokenized as ids depend on whole input string")
def test_pretokenized_inputs(self):
pass
def test_convert_tokens_to_string_format(self):
tokenizer = self.get_tokenizer()
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokens = ["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"]
string = tokenizer.convert_tokens_to_string(tokens)
self.assertIsInstance(string, str)
def test_simple_tokenize(self):
tokenizer = self.get_tokenizer()
in_str = "Hello World"
out_tokens = ["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90"]
self.assertEqual(tokenizer.tokenize(in_str), out_tokens)
in_pl_str = "Witaj świecie"
out_tokens = ["77", "41", "69", "74", "61", "6a", "20", "4b", "a5", "97", "63", "69", "65"]
self.assertEqual(tokenizer.tokenize(in_pl_str), out_tokens)
in_jp_str = "こんにちは世界"
out_tokens = ["58", "80", "91", "a1", "e4", "b8", "96", "e7", "95", "8c"]
self.assertEqual(tokenizer.tokenize(in_jp_str), out_tokens)
def test_batch_tokenize(self):
tokenizer = self.get_tokenizer()
in_batch = ["Hello World", "Witaj świecie", "こんにちは世界"]
out_tokens = [
["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"],
["77", "41", "69", "74", "61", "6a", "20", "4b", "a5", "97", "63", "69", "65", "</s>"],
["58", "80", "91", "a1", "e4", "b8", "96", "e7", "95", "8c", "</s>"],
]
self.assertListEqual(
[tokenizer.convert_ids_to_tokens(ids) for ids in tokenizer(in_batch)["input_ids"]], out_tokens
)
def test_special_bytes(self):
tokenizer = self.get_tokenizer()
in_str_special = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09"
out_tokens = ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09"]
self.assertEqual(tokenizer.tokenize(in_str_special), out_tokens)
in_str_mixed = "\x00Hello\x01 World\x02"
out_tokens = ["00", "52", "85", "91", "9f", "6f", "01", "20", "52", "85", "9f", "90", "02"]
self.assertEqual(tokenizer.tokenize(in_str_mixed), out_tokens)
def test_special_tokens(self):
tokenizer = self.get_tokenizer()
in_str_special = "<unk></s><pad>"
out_tokens = ["<unk>", "</s>", "<pad>"]
self.assertEqual(tokenizer.tokenize(in_str_special), out_tokens)
in_str_not_special = "<s>"
out_tokens = ["3c", "73", "3e"]
self.assertEqual(tokenizer.tokenize(in_str_not_special), out_tokens)
in_str_mixed = "<s>Hello World</s>"
out_tokens = ["3c", "73", "3e", "52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"]
self.assertEqual(tokenizer.tokenize(in_str_mixed), out_tokens)
def test_token_ids_conversion(self):
tokenizer = self.get_tokenizer()
tokens_range = [f"{x:02x}" for x in range(256)]
indices_range = list(range(3, 256 + 3))
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens_range), indices_range)
self.assertListEqual(tokenizer.convert_ids_to_tokens(indices_range), tokens_range)
special_tokens = ["<pad>", "</s>", "<unk>"]
special_indices = [0, 1, 2]
self.assertListEqual(tokenizer.convert_tokens_to_ids(special_tokens), special_indices)
self.assertListEqual(tokenizer.convert_ids_to_tokens(special_indices), special_tokens)
| MyT5TokenizationTest |
python | getsentry__sentry | src/sentry/api/endpoints/chunk.py | {
"start": 2068,
"end": 3202
} | class ____(OrganizationReleasePermission):
"""
Allow OrganizationReleasePermission OR Launchpad service authentication
"""
def _is_launchpad_authenticated(self, request: Request) -> bool:
"""Check if the request is authenticated via Launchpad service."""
return isinstance(
getattr(request, "successful_authenticator", None), LaunchpadRpcSignatureAuthentication
)
def has_permission(self, request: Request, view) -> bool:
# Allow access for Launchpad service authentication
if self._is_launchpad_authenticated(request):
return True
# Fall back to standard organization permission check
return super().has_permission(request, view)
def has_object_permission(self, request: Request, view, organization) -> bool:
# Allow access for Launchpad service authentication
if self._is_launchpad_authenticated(request):
return True
# Fall back to standard organization permission check
return super().has_object_permission(request, view, organization)
@region_silo_endpoint
| ChunkUploadPermission |
python | tornadoweb__tornado | tornado/queues.py | {
"start": 2153,
"end": 2328
} | class ____(Generic[_T]):
def __init__(self, q: "Queue[_T]") -> None:
self.q = q
def __anext__(self) -> Awaitable[_T]:
return self.q.get()
| _QueueIterator |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 16666,
"end": 17356
} | class ____(Protocol):
async def executemany(
self, operation: Any, seq_of_parameters: Sequence[Tuple[Any, ...]]
) -> Any: ...
async def reload_schema_state(self) -> None: ...
async def prepare(
self, operation: Any, *, name: Optional[str] = None
) -> Any: ...
def is_closed(self) -> bool: ...
def transaction(
self,
*,
isolation: Optional[str] = None,
readonly: bool = False,
deferrable: bool = False,
) -> _AsyncpgTransaction: ...
def fetchrow(self, operation: str) -> Any: ...
async def close(self, timeout: int = ...) -> None: ...
def terminate(self) -> None: ...
| _AsyncpgConnection |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/config.py | {
"start": 908,
"end": 1320
} | class ____(BaseModel):
"""configuration serializer."""
page_size: int
auto_refresh_interval: int
hide_paused_dags_by_default: bool
instance_name: str
enable_swagger_ui: bool
require_confirmation_dag_change: bool
default_wrap: bool
test_connection: str
dashboard_alert: list[UIAlert]
show_external_log_redirect: bool
external_log_name: str | None = None
| ConfigResponse |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 40780,
"end": 54469
} | class ____(Blip2PreTrainedModel):
config: Blip2Config
main_input_name = "pixel_values"
_keep_in_fp32_modules = ["query_tokens", "qformer"]
_supports_flash_attn = False # because self.qformer does not support FA2
def __init__(self, config: Blip2Config):
super().__init__(config)
self.vision_model = Blip2VisionModel._from_config(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.qformer = Blip2QFormerModel._from_config(config.qformer_config)
self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
if config.use_decoder_only_language_model:
language_model = AutoModelForCausalLM.from_config(config.text_config)
else:
language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
self.language_model = language_model
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def set_output_embeddings(self, new_embeddings):
self.language_model.set_output_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.language_model.get_output_embeddings()
def get_encoder(self, modality=None):
if modality is None:
return self.language_model.get_encoder()
else:
return super().get_encoder(modality=modality)
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[torch.FloatTensor, CausalLMOutputWithPast]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Returns:
text_outputs (``torch.FloatTensor`):
The language model's last hidden states.
Examples:
```python
>>> import torch
>>> from transformers import AutoTokenizer, Blip2Model
>>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> inputs = tokenizer(["a photo of a cat"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
if self.config.use_decoder_only_language_model:
text_outputs: CausalLMOutputWithPast = self.language_model(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True,
)
else:
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
text_outputs: Seq2SeqLMOutput = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels,
return_dict=True,
)
return text_outputs.logits
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
interpolate_pos_encoding: bool = False,
) -> Union[torch.FloatTensor, CausalLMOutputWithPast]:
r"""
Returns:
vision_outputs (`torch.FloatTensor`):
The vision model's last layer pooled logits.
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, Blip2Model
>>> from transformers.image_utils import load_image
>>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_outputs = model.get_image_features(**inputs)
```"""
vision_outputs = self.vision_model(
pixel_values=pixel_values,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=True,
)
return vision_outputs.pooler_output
@filter_out_non_signature_kwargs()
@auto_docstring
def get_qformer_features(
self,
pixel_values: torch.FloatTensor,
interpolate_pos_encoding: bool = False,
) -> Union[torch.FloatTensor, BaseModelOutputWithPooling]:
r"""
Returns:
qformer_outputs (`torch.FloatTensor`):
The Q-Former model's last layer hidden states.
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, Blip2Model
>>> from transformers.image_utils import load_image
>>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... qformer_outputs = model.get_qformer_features(**inputs)
```"""
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values=pixel_values,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=True,
)
image_embeds = vision_outputs.last_hidden_state
# step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.qformer(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
return_dict=True,
)
return query_outputs.last_hidden_state
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
input_ids: torch.FloatTensor,
attention_mask: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
interpolate_pos_encoding: bool = False,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Blip2ForConditionalGenerationModelOutput]:
r"""
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Only relevant in case an encoder-decoder language model (like T5) is used.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import Blip2Processor, Blip2Model
>>> import torch
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16)
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> prompt = "Question: how many cats are there? Answer:"
>>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16)
>>> outputs = model(**inputs)
```"""
# step 1: forward the images through the vision encoder,
# to get image embeddings of shape (batch_size, seq_len, hidden_size)
vision_outputs = self.vision_model(
pixel_values=pixel_values,
interpolate_pos_encoding=interpolate_pos_encoding,
**kwargs,
)
image_embeds = vision_outputs[0]
# step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_outputs = self.qformer(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
**kwargs,
)
query_output = query_outputs[0]
# Qformer is kept in fp32, we downcast the output back if needed
if query_output.dtype != image_embeds.dtype:
query_output = query_output.to(image_embeds.dtype)
# step 3: use the language model, conditioned on the query outputs and the prompt
language_model_inputs = self.language_projection(query_output)
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.to(language_model_inputs.device).masked_scatter(
special_image_mask, language_model_inputs
)
if self.config.use_decoder_only_language_model:
outputs = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
**kwargs,
)
logits = outputs[0]
loss = None
# we compute the loss here since we need to take into account the sequence length of the query embeds
if labels is not None:
labels = labels.to(logits.device)
logits = logits[:, -labels.size(1) :, :]
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous().to(logits.device)
# Flatten the tokens
loss_fct = CrossEntropyLoss(reduction="mean")
loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
else:
outputs = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels,
return_dict=True,
**kwargs,
)
loss = outputs.loss
logits = outputs.logits
return Blip2ForConditionalGenerationModelOutput(
loss=loss,
logits=logits,
vision_outputs=vision_outputs,
qformer_outputs=query_outputs,
language_model_outputs=outputs,
)
@auto_docstring
| Blip2Model |
python | apache__airflow | airflow-core/src/airflow/models/variable.py | {
"start": 2010,
"end": 20452
} | class ____(Base, LoggingMixin):
"""A generic way to store and retrieve arbitrary content or settings as a simple key/value store."""
__tablename__ = "variable"
__NO_DEFAULT_SENTINEL = object()
id: Mapped[int] = mapped_column(Integer, primary_key=True)
key: Mapped[str] = mapped_column(String(ID_LEN), unique=True)
_val: Mapped[str] = mapped_column("val", Text().with_variant(MEDIUMTEXT, "mysql"))
description: Mapped[str | None] = mapped_column(Text, nullable=True)
is_encrypted: Mapped[bool] = mapped_column(Boolean, unique=False, default=False)
team_id: Mapped[str | None] = mapped_column(UUIDType(binary=False), ForeignKey("team.id"), nullable=True)
def __init__(self, key=None, val=None, description=None, team_id=None):
super().__init__()
self.key = key
self.val = val
self.description = description
self.team_id = team_id
@reconstructor
def on_db_load(self):
if self._val:
mask_secret(self.val, self.key)
def __repr__(self):
# Hiding the value
return f"{self.key} : {self._val}"
def get_val(self):
"""Get Airflow Variable from Metadata DB and decode it using the Fernet Key."""
from cryptography.fernet import InvalidToken as InvalidFernetToken
if self._val is not None and self.is_encrypted:
try:
fernet = get_fernet()
return fernet.decrypt(bytes(self._val, "utf-8")).decode()
except InvalidFernetToken:
self.log.error("Can't decrypt _val for key=%s, invalid token or value", self.key)
return None
except Exception:
self.log.error("Can't decrypt _val for key=%s, FERNET_KEY configuration missing", self.key)
return None
else:
return self._val
def set_val(self, value):
"""Encode the specified value with Fernet Key and store it in Variables Table."""
if value is not None:
fernet = get_fernet()
self._val = fernet.encrypt(bytes(value, "utf-8")).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def val(cls):
"""Get Airflow Variable from Metadata DB and decode it using the Fernet Key."""
return synonym("_val", descriptor=property(cls.get_val, cls.set_val))
@classmethod
def setdefault(cls, key, default, description=None, deserialize_json=False):
"""
Return the current value for a key or store the default value and return it.
Works the same as the Python builtin dict object.
:param key: Dict key for this Variable
:param default: Default value to set and return if the variable
isn't already in the DB
:param description: Default value to set Description of the Variable
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:param session: Session
:return: Mixed
"""
obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key=key, value=default, description=description, serialize_json=deserialize_json)
return default
raise ValueError("Default Value must be set")
return obj
@classmethod
def get(
cls,
key: str,
default_var: Any = __NO_DEFAULT_SENTINEL,
deserialize_json: bool = False,
) -> Any:
"""
Get a value for an Airflow Variable Key.
:param key: Variable Key
:param default_var: Default value of the Variable if the Variable doesn't exist
:param deserialize_json: Deserialize the value to a Python dict
"""
# TODO: This is not the best way of having compat, but it's "better than erroring" for now. This still
# means SQLA etc is loaded, but we can't avoid that unless/until we add import shims as a big
# back-compat layer
# If this is set it means we are in some kind of execution context (Task, Dag Parse or Triggerer perhaps)
# and should use the Task SDK API server path
if hasattr(sys.modules.get("airflow.sdk.execution_time.task_runner"), "SUPERVISOR_COMMS"):
warnings.warn(
"Using Variable.get from `airflow.models` is deprecated."
"Please use `get` on Variable from sdk(`airflow.sdk.Variable`) instead",
DeprecationWarning,
stacklevel=1,
)
from airflow.sdk import Variable as TaskSDKVariable
default_kwargs = {} if default_var is cls.__NO_DEFAULT_SENTINEL else {"default": default_var}
var_val = TaskSDKVariable.get(key, deserialize_json=deserialize_json, **default_kwargs)
if isinstance(var_val, str):
mask_secret(var_val, key)
return var_val
var_val = Variable.get_variable_from_secrets(key=key)
if var_val is None:
if default_var is not cls.__NO_DEFAULT_SENTINEL:
return default_var
raise KeyError(f"Variable {key} does not exist")
if deserialize_json:
obj = json.loads(var_val)
mask_secret(obj, key)
return obj
mask_secret(var_val, key)
return var_val
@staticmethod
def set(
key: str,
value: Any,
description: str | None = None,
serialize_json: bool = False,
team_id: str | None = None,
session: Session | None = None,
) -> None:
"""
Set a value for an Airflow Variable with a given Key.
This operation overwrites an existing variable using the session's dialect-specific upsert operation.
:param key: Variable Key
:param value: Value to set for the Variable
:param description: Description of the Variable
:param serialize_json: Serialize the value to a JSON string
:param team_id: ID of the team associated to the variable (if any)
:param session: optional session, use if provided or create a new one
"""
# TODO: This is not the best way of having compat, but it's "better than erroring" for now. This still
# means SQLA etc is loaded, but we can't avoid that unless/until we add import shims as a big
# back-compat layer
# If this is set it means we are in some kind of execution context (Task, Dag Parse or Triggerer perhaps)
# and should use the Task SDK API server path
if hasattr(sys.modules.get("airflow.sdk.execution_time.task_runner"), "SUPERVISOR_COMMS"):
warnings.warn(
"Using Variable.set from `airflow.models` is deprecated."
"Please use `set` on Variable from sdk(`airflow.sdk.Variable`) instead",
DeprecationWarning,
stacklevel=1,
)
from airflow.sdk import Variable as TaskSDKVariable
TaskSDKVariable.set(
key=key,
value=value,
description=description,
serialize_json=serialize_json,
)
return
if team_id and not conf.getboolean("core", "multi_team"):
raise ValueError(
"Multi-team mode is not configured in the Airflow environment. To assign a team to a variable, multi-mode must be enabled."
)
# check if the secret exists in the custom secrets' backend.
Variable.check_for_write_conflict(key=key)
if serialize_json:
stored_value = json.dumps(value, indent=2)
else:
stored_value = str(value)
ctx: contextlib.AbstractContextManager
if session is not None:
ctx = contextlib.nullcontext(session)
else:
ctx = create_session()
with ctx as session:
new_variable = Variable(key=key, val=stored_value, description=description, team_id=team_id)
val = new_variable._val
is_encrypted = new_variable.is_encrypted
# Create dialect-specific upsert statement
dialect_name = session.get_bind().dialect.name
stmt: MySQLInsert | PostgreSQLInsert | SQLiteInsert
if dialect_name == "postgresql":
from sqlalchemy.dialects.postgresql import insert as pg_insert
pg_stmt = pg_insert(Variable).values(
key=key,
val=val,
description=description,
is_encrypted=is_encrypted,
team_id=team_id,
)
stmt = pg_stmt.on_conflict_do_update(
index_elements=["key"],
set_=dict(
val=val,
description=description,
is_encrypted=is_encrypted,
team_id=team_id,
),
)
elif dialect_name == "mysql":
from sqlalchemy.dialects.mysql import insert as mysql_insert
mysql_stmt = mysql_insert(Variable).values(
key=key,
val=val,
description=description,
is_encrypted=is_encrypted,
team_id=team_id,
)
stmt = mysql_stmt.on_duplicate_key_update(
val=val,
description=description,
is_encrypted=is_encrypted,
team_id=team_id,
)
else:
from sqlalchemy.dialects.sqlite import insert as sqlite_insert
sqlite_stmt = sqlite_insert(Variable).values(
key=key,
val=val,
description=description,
is_encrypted=is_encrypted,
team_id=team_id,
)
stmt = sqlite_stmt.on_conflict_do_update(
index_elements=["key"],
set_=dict(
val=val,
description=description,
is_encrypted=is_encrypted,
team_id=team_id,
),
)
session.execute(stmt)
# invalidate key in cache for faster propagation
# we cannot save the value set because it's possible that it's shadowed by a custom backend
# (see call to check_for_write_conflict above)
SecretCache.invalidate_variable(key)
@staticmethod
def update(
key: str,
value: Any,
serialize_json: bool = False,
session: Session | None = None,
) -> None:
"""
Update a given Airflow Variable with the Provided value.
:param key: Variable Key
:param value: Value to set for the Variable
:param serialize_json: Serialize the value to a JSON string
:param session: optional session, use if provided or create a new one
"""
# TODO: This is not the best way of having compat, but it's "better than erroring" for now. This still
# means SQLA etc is loaded, but we can't avoid that unless/until we add import shims as a big
# back-compat layer
# If this is set it means are in some kind of execution context (Task, Dag Parse or Triggerer perhaps)
# and should use the Task SDK API server path
if hasattr(sys.modules.get("airflow.sdk.execution_time.task_runner"), "SUPERVISOR_COMMS"):
warnings.warn(
"Using Variable.update from `airflow.models` is deprecated."
"Please use `set` on Variable from sdk(`airflow.sdk.Variable`) instead as it is an upsert.",
DeprecationWarning,
stacklevel=1,
)
from airflow.sdk import Variable as TaskSDKVariable
# set is an upsert command, it can handle updates too
TaskSDKVariable.set(
key=key,
value=value,
serialize_json=serialize_json,
)
return
Variable.check_for_write_conflict(key=key)
if Variable.get_variable_from_secrets(key=key) is None:
raise KeyError(f"Variable {key} does not exist")
ctx: contextlib.AbstractContextManager
if session is not None:
ctx = contextlib.nullcontext(session)
else:
ctx = create_session()
with ctx as session:
obj = session.scalar(select(Variable).where(Variable.key == key))
if obj is None:
raise AttributeError(f"Variable {key} does not exist in the Database and cannot be updated.")
Variable.set(
key=key,
value=value,
description=obj.description,
serialize_json=serialize_json,
session=session,
)
@staticmethod
def delete(key: str, session: Session | None = None) -> int:
"""
Delete an Airflow Variable for a given key.
:param key: Variable Keys
:param session: optional session, use if provided or create a new one
"""
# TODO: This is not the best way of having compat, but it's "better than erroring" for now. This still
# means SQLA etc is loaded, but we can't avoid that unless/until we add import shims as a big
# back-compat layer
# If this is set it means are in some kind of execution context (Task, Dag Parse or Triggerer perhaps)
# and should use the Task SDK API server path
if hasattr(sys.modules.get("airflow.sdk.execution_time.task_runner"), "SUPERVISOR_COMMS"):
warnings.warn(
"Using Variable.delete from `airflow.models` is deprecated."
"Please use `delete` on Variable from sdk(`airflow.sdk.Variable`) instead",
DeprecationWarning,
stacklevel=1,
)
from airflow.sdk import Variable as TaskSDKVariable
TaskSDKVariable.delete(
key=key,
)
return 1
ctx: contextlib.AbstractContextManager
if session is not None:
ctx = contextlib.nullcontext(session)
else:
ctx = create_session()
with ctx as session:
result = session.execute(delete(Variable).where(Variable.key == key))
rows = getattr(result, "rowcount", 0) or 0
SecretCache.invalidate_variable(key)
return rows
def rotate_fernet_key(self):
"""Rotate Fernet Key."""
fernet = get_fernet()
if self._val and self.is_encrypted:
self._val = fernet.rotate(self._val.encode("utf-8")).decode()
@staticmethod
def check_for_write_conflict(key: str) -> None:
"""
Log a warning if a variable exists outside the metastore.
If we try to write a variable to the metastore while the same key
exists in an environment variable or custom secrets backend, then
subsequent reads will not read the set value.
:param key: Variable Key
"""
for secrets_backend in ensure_secrets_loaded():
if not isinstance(secrets_backend, MetastoreBackend):
try:
var_val = secrets_backend.get_variable(key=key)
if var_val is not None:
_backend_name = type(secrets_backend).__name__
log.warning(
"The variable %s is defined in the %s secrets backend, which takes "
"precedence over reading from the database. The value in the database will be "
"updated, but to read it you have to delete the conflicting variable "
"from %s",
key,
_backend_name,
_backend_name,
)
return
except Exception:
log.exception(
"Unable to retrieve variable from secrets backend (%s). "
"Checking subsequent secrets backend.",
type(secrets_backend).__name__,
)
return None
@staticmethod
def get_variable_from_secrets(key: str) -> str | None:
"""
Get Airflow Variable by iterating over all Secret Backends.
:param key: Variable Key
:return: Variable Value
"""
# check cache first
# enabled only if SecretCache.init() has been called first
try:
return SecretCache.get_variable(key)
except SecretCache.NotPresentException:
pass # continue business
var_val = None
# iterate over backends if not in cache (or expired)
for secrets_backend in ensure_secrets_loaded():
try:
var_val = secrets_backend.get_variable(key=key)
if var_val is not None:
break
except Exception:
log.exception(
"Unable to retrieve variable from secrets backend (%s). "
"Checking subsequent secrets backend.",
type(secrets_backend).__name__,
)
SecretCache.save_variable(key, var_val) # we save None as well
return var_val
@staticmethod
@provide_session
def get_team_name(variable_key: str, session=NEW_SESSION) -> str | None:
stmt = (
select(Team.name).join(Variable, Team.id == Variable.team_id).where(Variable.key == variable_key)
)
return session.scalar(stmt)
@staticmethod
@provide_session
def get_key_to_team_name_mapping(variable_keys: list[str], session=NEW_SESSION) -> dict[str, str | None]:
stmt = (
select(Variable.key, Team.name)
.join(Team, Variable.team_id == Team.id)
.where(Variable.key.in_(variable_keys))
)
return {key: team_name for key, team_name in session.execute(stmt)}
| Variable |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1528593,
"end": 1529012
} | class ____(Transform):
"""
SampleTransform schema wrapper.
Parameters
----------
sample : float
The maximum number of data objects to include in the sample.
**Default value:** ``1000``
"""
_schema = {"$ref": "#/definitions/SampleTransform"}
def __init__(self, sample: Optional[float] = Undefined, **kwds):
super().__init__(sample=sample, **kwds)
| SampleTransform |
python | EpistasisLab__tpot | tpot/builtin_modules/nn.py | {
"start": 7626,
"end": 9516
} | class ____(PytorchClassifier):
"""Logistic Regression classifier, implemented in PyTorch, for use with
TPOT.
For examples on standalone use (i.e., non-TPOT) refer to:
https://github.com/trang1618/tpot-nn/blob/master/tpot_nn/estimator_sandbox.py
"""
def __init__(
self,
num_epochs=10,
batch_size=16,
learning_rate=0.02,
weight_decay=1e-4,
verbose=False
):
self.num_epochs = num_epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.verbose = verbose
self.input_size = None
self.num_classes = None
self.network = None
self.loss_function = None
self.optimizer = None
self.data_loader = None
self.train_dset_len = None
self.device = None
def _init_model(self, X, y):
device = _get_cuda_device_if_available()
X, y = self.validate_inputs(X, y)
self.input_size = X.shape[-1]
self.num_classes = len(set(y))
X = torch.tensor(X, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.long)
train_dset = TensorDataset(X, y)
# Set parameters of the network
self.network = _LR(self.input_size, self.num_classes).to(device)
self.loss_function = nn.CrossEntropyLoss()
self.optimizer = Adam(self.network.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
self.data_loader = DataLoader(
train_dset, batch_size=self.batch_size, shuffle=True, num_workers=2
)
self.train_dset_len = len(train_dset)
self.device = device
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.non_deterministic = True
tags.target_tags.single_output = True
return tags
| PytorchLRClassifier |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 114,
"end": 155
} | class ____(object):
__slots__ = ()
| Node |
python | scrapy__scrapy | tests/test_crawler.py | {
"start": 20638,
"end": 21129
} | class ____(TestBaseCrawler):
def test_crawler_process_accepts_dict(self):
runner = CrawlerProcess({"foo": "bar"}, install_root_handler=False)
assert runner.settings["foo"] == "bar"
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
def test_crawler_process_accepts_None(self):
runner = CrawlerProcess(install_root_handler=False)
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
@pytest.mark.only_asyncio
| TestCrawlerProcess |
python | mahmoud__boltons | boltons/queueutils.py | {
"start": 7205,
"end": 7650
} | class ____(BasePriorityQueue):
"""A priority queue inherited from :class:`BasePriorityQueue`, based
on the :func:`bisect.insort` approach for in-order insertion into
a sorted list.
"""
_backend_type = BList
@staticmethod
def _pop_entry(backend):
return backend.pop(0)
@staticmethod
def _push_entry(backend, entry):
insort(backend, entry)
PriorityQueue = SortedPriorityQueue
| SortedPriorityQueue |
python | mlflow__mlflow | dev/update_changelog.py | {
"start": 685,
"end": 1389
} | class ____(NamedTuple):
title: str
number: int
author: str
labels: list[str]
@property
def url(self):
return f"https://github.com/mlflow/mlflow/pull/{self.number}"
@property
def release_note_labels(self):
return [l for l in self.labels if l.startswith("rn/")]
def __str__(self):
areas = " / ".join(
sorted(
map(
format_label,
filter(lambda l: l.split("/")[0] in ("area", "language"), self.labels),
)
)
)
return f"[{areas}] {self.title} (#{self.number}, @{self.author})"
def __repr__(self):
return str(self)
| PullRequest |
python | pypa__virtualenv | src/virtualenv/run/plugin/discovery.py | {
"start": 69,
"end": 1395
} | class ____(PluginLoader):
"""Discovery plugins."""
def get_discover(parser, args):
discover_types = Discovery.entry_points_for("virtualenv.discovery")
discovery_parser = parser.add_argument_group(
title="discovery",
description="discover and provide a target interpreter",
)
choices = _get_default_discovery(discover_types)
# prefer the builtin if present, otherwise fallback to first defined type
choices = sorted(choices, key=lambda a: 0 if a == "builtin" else 1)
try:
default_discovery = next(iter(choices))
except StopIteration as e:
msg = "No discovery plugin found. Try reinstalling virtualenv to fix this issue."
raise RuntimeError(msg) from e
discovery_parser.add_argument(
"--discovery",
choices=choices,
default=default_discovery,
required=False,
help="interpreter discovery method",
)
options, _ = parser.parse_known_args(args)
discover_class = discover_types[options.discovery]
discover_class.add_parser_arguments(discovery_parser)
options, _ = parser.parse_known_args(args, namespace=options)
return discover_class(options)
def _get_default_discovery(discover_types):
return list(discover_types.keys())
__all__ = [
"Discovery",
"get_discover",
]
| Discovery |
python | pytest-dev__pytest | testing/test_subtests.py | {
"start": 13049,
"end": 20393
} | class ____:
"""Test unittest.TestCase.subTest functionality."""
def test_failures(
self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("COLUMNS", "120")
pytester.makepyfile(
"""
from unittest import TestCase
class T(TestCase):
def test_foo(self):
with self.subTest("foo subtest"):
assert False, "foo subtest failure"
def test_bar(self):
with self.subTest("bar subtest"):
assert False, "bar subtest failure"
assert False, "test_bar also failed"
def test_zaz(self):
with self.subTest("zaz subtest"):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"* 3 failed, 2 passed in *",
]
)
def test_passes(
self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("COLUMNS", "120")
pytester.makepyfile(
"""
from unittest import TestCase
class T(TestCase):
def test_foo(self):
with self.subTest("foo subtest"):
pass
def test_bar(self):
with self.subTest("bar subtest"):
pass
def test_zaz(self):
with self.subTest("zaz subtest"):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"* 3 passed in *",
]
)
def test_skip(
self,
pytester: pytest.Pytester,
) -> None:
pytester.makepyfile(
"""
from unittest import TestCase, main
class T(TestCase):
def test_foo(self):
for i in range(5):
with self.subTest(msg="custom", i=i):
if i % 2 == 0:
self.skipTest('even number')
"""
)
# This output might change #13756.
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 passed in *"])
def test_non_subtest_skip(
self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("COLUMNS", "120")
pytester.makepyfile(
"""
from unittest import TestCase, main
class T(TestCase):
def test_foo(self):
with self.subTest(msg="subtest"):
assert False, "failed subtest"
self.skipTest('non-subtest skip')
"""
)
# This output might change #13756.
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"SUBFAILED[[]subtest[]] test_non_subtest_skip.py::T::test_foo*",
"* 1 failed, 1 skipped in *",
]
)
def test_xfail(
self,
pytester: pytest.Pytester,
) -> None:
pytester.makepyfile(
"""
import pytest
from unittest import expectedFailure, TestCase
class T(TestCase):
@expectedFailure
def test_foo(self):
for i in range(5):
with self.subTest(msg="custom", i=i):
if i % 2 == 0:
raise pytest.xfail('even number')
if __name__ == '__main__':
main()
"""
)
# This output might change #13756.
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 xfailed in *"])
def test_only_original_skip_is_called(
self,
pytester: pytest.Pytester,
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Regression test for pytest-dev/pytest-subtests#173."""
monkeypatch.setenv("COLUMNS", "120")
pytester.makepyfile(
"""
import unittest
from unittest import TestCase
@unittest.skip("skip this test")
class T(unittest.TestCase):
def test_foo(self):
assert 1 == 2
"""
)
result = pytester.runpytest("-v", "-rsf")
result.stdout.fnmatch_lines(
["SKIPPED [1] test_only_original_skip_is_called.py:6: skip this test"]
)
def test_skip_with_failure(
self,
pytester: pytest.Pytester,
monkeypatch: pytest.MonkeyPatch,
) -> None:
monkeypatch.setenv("COLUMNS", "120")
pytester.makepyfile(
"""
import pytest
from unittest import TestCase
class T(TestCase):
def test_foo(self):
with self.subTest("subtest 1"):
self.skipTest(f"skip subtest 1")
with self.subTest("subtest 2"):
assert False, "fail subtest 2"
"""
)
result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*.py u. * [[]100%[]]",
"*=== short test summary info ===*",
"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
"* 1 failed, 1 passed in *",
]
)
result = pytester.runpytest("-v", "-ra")
result.stdout.fnmatch_lines(
[
"*.py::T::test_foo SUBSKIPPED[[]subtest 1[]] (skip subtest 1) * [[]100%[]]",
"*.py::T::test_foo SUBFAILED[[]subtest 2[]] * [[]100%[]]",
"*.py::T::test_foo PASSED * [[]100%[]]",
"SUBSKIPPED[[]subtest 1[]] [[]1[]] *.py:*: skip subtest 1",
"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
"* 1 failed, 1 passed, 1 skipped in *",
]
)
pytester.makeini(
"""
[pytest]
verbosity_subtests = 0
"""
)
result = pytester.runpytest("-v", "-ra")
result.stdout.fnmatch_lines(
[
"*.py::T::test_foo SUBFAILED[[]subtest 2[]] * [[]100%[]]",
"*.py::T::test_foo PASSED * [[]100%[]]",
"*=== short test summary info ===*",
r"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
r"* 1 failed, 1 passed in *",
]
)
result.stdout.no_fnmatch_line(
"*.py::T::test_foo SUBSKIPPED[[]subtest 1[]] (skip subtest 1) * [[]100%[]]"
)
result.stdout.no_fnmatch_line(
"SUBSKIPPED[[]subtest 1[]] [[]1[]] *.py:*: skip subtest 1"
)
| TestUnittestSubTest |
python | huggingface__transformers | tests/models/markuplm/test_feature_extraction_markuplm.py | {
"start": 859,
"end": 1792
} | class ____:
def __init__(self, parent):
self.parent = parent
def prepare_feat_extract_dict(self):
return {}
def get_html_strings():
html_string_1 = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
html_string_2 = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_1, html_string_2]
@require_bs4
| MarkupLMFeatureExtractionTester |
python | neetcode-gh__leetcode | python/1834-single-threaded-cpu.py | {
"start": 0,
"end": 822
} | class ____:
def getOrder(self, tasks: List[List[int]]) -> List[int]:
tasks = sorted([(t[0], t[1], i) for i, t in enumerate(tasks)])
result, heap = [], []
cur_task_index = 0
cur_time = tasks[0][0]
while len(result) < len(tasks):
while (cur_task_index < len(tasks)) and (tasks[cur_task_index][0] <= cur_time):
heapq.heappush(heap, (tasks[cur_task_index][1], tasks[cur_task_index][2]))
cur_task_index += 1
if heap:
time_difference, original_index = heapq.heappop(heap)
cur_time += time_difference
result.append(original_index)
elif cur_task_index < len(tasks):
cur_time = tasks[cur_task_index][0]
return result
| Solution |
python | getsentry__sentry | tests/sentry/issues/test_issue_occurrence.py | {
"start": 2741,
"end": 3121
} | class ____(OccurrenceTestMixin, TestCase):
def test(self) -> None:
occurrence = self.build_occurrence()
occurrence.save()
fetched_occurrence = IssueOccurrence.fetch(occurrence.id, occurrence.project_id)
assert fetched_occurrence is not None
self.assert_occurrences_identical(occurrence, fetched_occurrence)
| IssueOccurrenceSaveAndFetchTest |
python | pytorch__pytorch | test/dynamo/test_deque_reconstruct.py | {
"start": 118,
"end": 2607
} | class ____(torch._inductor.test_case.TestCase):
UNSET = object()
@contextlib.contextmanager
def set_deque_in_globals(self, value):
prev = globals().pop("deque", self.UNSET)
assert "deque" not in globals()
try:
if value is not self.UNSET:
globals()["deque"] = value
yield
finally:
if prev is self.UNSET:
globals().pop("deque", None)
assert "deque" not in globals()
else:
globals()["deque"] = prev
def test_deque_reconstruct_not_in_globals(self):
with self.set_deque_in_globals(self.UNSET):
@torch.compile(backend="eager", fullgraph=True)
def func(x):
return collections.deque([x, x + 1, x + 2], maxlen=2)
x = torch.randn(3, 4)
out = func(x)
self.assertIsInstance(out, collections.deque)
self.assertEqual(out.maxlen, 2)
self.assertEqual(out, collections.deque([x + 1, x + 2], maxlen=2))
def test_deque_reconstruct_in_globals(self):
with self.set_deque_in_globals(collections.deque):
# This does not emit a NameError
dummy = deque([0, 1, 2], maxlen=2) # noqa: F821
self.assertIsInstance(dummy, collections.deque)
self.assertEqual(list(dummy), [1, 2])
@torch.compile(backend="eager", fullgraph=True)
def func(x):
return collections.deque([x, x + 1, x + 2], maxlen=2)
x = torch.randn(3, 4)
out = func(x)
self.assertIsInstance(out, collections.deque)
self.assertEqual(out.maxlen, 2)
self.assertEqual(out, collections.deque([x + 1, x + 2], maxlen=2))
def test_deque_reconstruct_shallows_globals(self):
with self.set_deque_in_globals(None):
# This does not emit a NameError
self.assertIsNone(deque) # noqa: F821
@torch.compile(backend="eager", fullgraph=True)
def func(x):
return collections.deque([x, x + 1, x + 2], maxlen=2)
x = torch.randn(3, 4)
out = func(x)
self.assertIsInstance(out, collections.deque)
self.assertEqual(out.maxlen, 2)
self.assertEqual(out, collections.deque([x + 1, x + 2], maxlen=2))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TestDequeReconstruct |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 14850,
"end": 16526
} | class ____(RoleImpl):
__slots__ = ()
def _warn_for_scalar_subquery_coercion(self):
util.warn(
"implicitly coercing SELECT object to scalar subquery; "
"please use the .scalar_subquery() method to produce a scalar "
"subquery.",
)
def _implicit_coercions(self, element, resolved, argname=None, **kw):
original_element = element
if not getattr(resolved, "is_clause_element", False):
self._raise_for_expected(original_element, argname, resolved)
elif resolved._is_select_base:
self._warn_for_scalar_subquery_coercion()
return resolved.scalar_subquery()
elif resolved._is_from_clause and isinstance(
resolved, selectable.Subquery
):
self._warn_for_scalar_subquery_coercion()
return resolved.element.scalar_subquery()
elif self._role_class.allows_lambda and resolved._is_lambda_element:
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
def _no_text_coercion(
element: Any,
argname: Optional[str] = None,
exc_cls: Type[exc.SQLAlchemyError] = exc.ArgumentError,
extra: Optional[str] = None,
err: Optional[Exception] = None,
) -> NoReturn:
raise exc_cls(
"%(extra)sTextual SQL expression %(expr)r %(argname)sshould be "
"explicitly declared as text(%(expr)r)"
% {
"expr": util.ellipses_string(element),
"argname": "for argument %s" % (argname,) if argname else "",
"extra": "%s " % extra if extra else "",
}
) from err
| _ColumnCoercions |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_org_info_from_entity.py | {
"start": 226,
"end": 320
} | class ____(GQLResult):
entity: Optional[FetchOrgInfoFromEntityEntity]
| FetchOrgInfoFromEntity |
python | scipy__scipy | scipy/sparse/_dok.py | {
"start": 416,
"end": 19799
} | class ____(_spbase, IndexMixin, dict):
_format = 'dok'
_allow_nd = (1, 2)
def __init__(self, arg1, shape=None, dtype=None, copy=False, *, maxprint=None):
_spbase.__init__(self, arg1, maxprint=maxprint)
if isinstance(arg1, tuple) and isshape(arg1, allow_nd=self._allow_nd):
self._shape = check_shape(arg1, allow_nd=self._allow_nd)
self._dict = {}
self.dtype = getdtype(dtype, default=float)
elif issparse(arg1): # Sparse ctor
if arg1.format == self.format:
arg1 = arg1.copy() if copy else arg1
else:
arg1 = arg1.todok()
if dtype is not None:
arg1 = arg1.astype(dtype, copy=False)
self._dict = arg1._dict
self._shape = check_shape(arg1.shape, allow_nd=self._allow_nd)
self.dtype = getdtype(arg1.dtype)
else: # Dense ctor
try:
arg1 = np.asarray(arg1)
except Exception as e:
raise TypeError('Invalid input format.') from e
if arg1.ndim > 2:
raise ValueError(f"DOK arrays don't yet support {arg1.ndim}D input.")
if arg1.ndim == 1:
if dtype is not None:
arg1 = arg1.astype(dtype, copy=False)
self._dict = {i: v for i, v in enumerate(arg1) if v != 0}
self.dtype = getdtype(arg1.dtype)
else:
d = self._coo_container(arg1, shape=shape, dtype=dtype).todok()
self._dict = d._dict
self.dtype = getdtype(d.dtype)
self._shape = check_shape(arg1.shape, allow_nd=self._allow_nd)
def update(self, val):
"""Update values from a dict, sparse dok or iterable of 2-tuples like .items()
Keys of the input must be sequences of nonnegative integers less than the shape
for each axis.
"""
if isinstance(val, dict):
inputs = val.items()
else:
inputs = val
for key, value in inputs:
index = (key,) if isintlike(key) else tuple(key)
if len(index) != self.ndim:
raise IndexError(f'Index {key} length needs to match self.shape')
if not all(
isintlike(idx) and 0 <= idx < max_idx
for idx, max_idx in zip(index, self.shape)
):
# Error handling. Re-search to find which error occured
for idx, max_idx in zip(index, self.shape):
if not isintlike(idx):
raise IndexError(f'integer keys required for update. Got {key}')
if idx < 0:
raise IndexError(f'negative index {key} not allowed in update')
if idx >= max_idx:
raise IndexError(f'index {key} is too large for self.shape')
# do the update
self._dict.update(inputs)
def _getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError(
"_getnnz over an axis is not implemented for DOK format."
)
return len(self._dict)
def count_nonzero(self, axis=None):
if axis is not None:
raise NotImplementedError(
"count_nonzero over an axis is not implemented for DOK format."
)
return sum(x != 0 for x in self.values())
_getnnz.__doc__ = _spbase._getnnz.__doc__
count_nonzero.__doc__ = _spbase.count_nonzero.__doc__
def __len__(self):
return len(self._dict)
def __contains__(self, key):
return key in self._dict
def setdefault(self, key, default=None, /):
return self._dict.setdefault(key, default)
def __delitem__(self, key, /):
del self._dict[key]
def clear(self):
return self._dict.clear()
def pop(self, /, *args):
return self._dict.pop(*args)
def __reversed__(self):
raise TypeError("reversed is not defined for dok_array type")
def __or__(self, other):
type_names = f"{type(self).__name__} and {type(other).__name__}"
raise TypeError(f"unsupported operand type for |: {type_names}")
def __ror__(self, other):
type_names = f"{type(self).__name__} and {type(other).__name__}"
raise TypeError(f"unsupported operand type for |: {type_names}")
def __ior__(self, other):
type_names = f"{type(self).__name__} and {type(other).__name__}"
raise TypeError(f"unsupported operand type for |: {type_names}")
def popitem(self):
return self._dict.popitem()
def items(self):
return self._dict.items()
def keys(self):
return self._dict.keys()
def values(self):
return self._dict.values()
def get(self, key, default=0.0):
"""This provides dict.get method functionality with type checking"""
if key in self._dict:
return self._dict[key]
if isintlike(key) and self.ndim == 1:
key = (key,)
if self.ndim != len(key):
raise IndexError(f'Index {key} length needs to match self.shape')
try:
for i in key:
assert isintlike(i)
except (AssertionError, TypeError, ValueError) as e:
raise IndexError('Index must be or consist of integers.') from e
key = tuple(i + M if i < 0 else i for i, M in zip(key, self.shape))
if any(i < 0 or i >= M for i, M in zip(key, self.shape)):
raise IndexError('Index out of bounds.')
if self.ndim == 1:
key = key[0]
return self._dict.get(key, default)
# 1D get methods
def _get_int(self, idx):
return self._dict.get(idx, self.dtype.type(0))
def _get_slice(self, idx):
i_range = range(*idx.indices(self.shape[0]))
return self._get_array(list(i_range))
def _get_array(self, idx):
idx = np.asarray(idx)
if idx.ndim == 0:
val = self._dict.get(int(idx), self.dtype.type(0))
return np.array(val, stype=self.dtype)
new_dok = self._dok_container(idx.shape, dtype=self.dtype)
dok_vals = [self._dict.get(i, 0) for i in idx.ravel()]
if dok_vals:
if len(idx.shape) == 1:
for i, v in enumerate(dok_vals):
if v:
new_dok._dict[i] = v
else:
new_idx = np.unravel_index(np.arange(len(dok_vals)), idx.shape)
new_idx = new_idx[0] if len(new_idx) == 1 else zip(*new_idx)
for i, v in zip(new_idx, dok_vals, strict=True):
if v:
new_dok._dict[i] = v
return new_dok
# 2D get methods
def _get_intXint(self, row, col):
return self._dict.get((row, col), self.dtype.type(0))
def _get_intXslice(self, row, col):
return self._get_sliceXslice(slice(row, row + 1), col)
def _get_sliceXint(self, row, col):
return self._get_sliceXslice(row, slice(col, col + 1))
def _get_sliceXslice(self, row, col):
row_start, row_stop, row_step = row.indices(self.shape[0])
col_start, col_stop, col_step = col.indices(self.shape[1])
row_range = range(row_start, row_stop, row_step)
col_range = range(col_start, col_stop, col_step)
shape = (len(row_range), len(col_range))
# Switch paths only when advantageous
# (count the iterations in the loops, adjust for complexity)
if len(self) >= 2 * shape[0] * shape[1]:
# O(nr*nc) path: loop over <row x col>
return self._get_columnXarray(row_range, col_range)
# O(nnz) path: loop over entries of self
newdok = self._dok_container(shape, dtype=self.dtype)
for key in self.keys():
i, ri = divmod(int(key[0]) - row_start, row_step)
if ri != 0 or i < 0 or i >= shape[0]:
continue
j, rj = divmod(int(key[1]) - col_start, col_step)
if rj != 0 or j < 0 or j >= shape[1]:
continue
newdok._dict[i, j] = self._dict[key]
return newdok
def _get_intXarray(self, row, col):
return self._get_columnXarray([row], col.ravel())
def _get_arrayXint(self, row, col):
res = self._get_columnXarray(row.ravel(), [col])
if row.ndim > 1:
return res.reshape(row.shape)
return res
def _get_sliceXarray(self, row, col):
row = list(range(*row.indices(self.shape[0])))
return self._get_columnXarray(row, col)
def _get_arrayXslice(self, row, col):
col = list(range(*col.indices(self.shape[1])))
return self._get_columnXarray(row, col)
def _get_columnXarray(self, row, col):
# outer indexing
newdok = self._dok_container((len(row), len(col)), dtype=self.dtype)
for i, r in enumerate(row):
for j, c in enumerate(col):
v = self._dict.get((r, c), 0)
if v:
newdok._dict[i, j] = v
return newdok
def _get_arrayXarray(self, row, col):
# inner indexing
i, j = map(np.atleast_2d, np.broadcast_arrays(row, col))
newdok = self._dok_container(i.shape, dtype=self.dtype)
for key in itertools.product(range(i.shape[0]), range(i.shape[1])):
v = self._dict.get((i[key], j[key]), 0)
if v:
newdok._dict[key] = v
return newdok
# 1D set methods
def _set_int(self, idx, x):
if x:
self._dict[idx] = x
elif idx in self._dict:
del self._dict[idx]
def _set_array(self, idx, x):
idx_set = idx.ravel()
x_set = x.ravel()
if len(idx_set) != len(x_set):
if len(x_set) == 1:
x_set = np.full(len(idx_set), x_set[0], dtype=self.dtype)
else:
raise ValueError("Need len(index)==len(data) or len(data)==1")
for i, v in zip(idx_set, x_set):
if v:
self._dict[i] = v
elif i in self._dict:
del self._dict[i]
# 2D set methods
def _set_intXint(self, row, col, x):
key = (row, col)
if x:
self._dict[key] = x
elif key in self._dict:
del self._dict[key]
def _set_arrayXarray(self, row, col, x):
row = list(map(int, row.ravel()))
col = list(map(int, col.ravel()))
x = x.ravel()
self._dict.update(zip(zip(row, col), x))
for i in np.nonzero(x == 0)[0]:
key = (row[i], col[i])
if self._dict[key] == 0:
# may have been superseded by later update
del self._dict[key]
def __add__(self, other):
if isscalarlike(other):
res_dtype = upcast_scalar(self.dtype, other)
new = self._dok_container(self.shape, dtype=res_dtype)
# Add this scalar to each element.
for key in itertools.product(*[range(d) for d in self.shape]):
aij = self._dict.get(key, 0) + other
if aij:
new[key] = aij
elif issparse(other):
if other.shape != self.shape:
raise ValueError("Matrix dimensions are not equal.")
res_dtype = upcast(self.dtype, other.dtype)
new = self._dok_container(self.shape, dtype=res_dtype)
new._dict = self._dict.copy()
if other.format == "dok":
o_items = other.items()
else:
other = other.tocoo()
if self.ndim == 1:
o_items = zip(other.coords[0], other.data)
else:
o_items = zip(zip(*other.coords), other.data)
with np.errstate(over='ignore'):
new._dict.update((k, new[k] + v) for k, v in o_items)
elif isdense(other):
new = self.todense() + other
else:
return NotImplemented
return new
def __radd__(self, other):
return self + other # addition is commutative
def __neg__(self):
if self.dtype.kind == 'b':
raise NotImplementedError(
'Negating a sparse boolean matrix is not supported.'
)
new = self._dok_container(self.shape, dtype=self.dtype)
new._dict.update((k, -v) for k, v in self.items())
return new
def _mul_scalar(self, other):
res_dtype = upcast_scalar(self.dtype, other)
# Multiply this scalar by every element.
new = self._dok_container(self.shape, dtype=res_dtype)
new._dict.update(((k, v * other) for k, v in self.items()))
return new
def _matmul_vector(self, other):
res_dtype = upcast(self.dtype, other.dtype)
# vector @ vector
if self.ndim == 1:
if issparse(other):
if other.format == "dok":
keys = self.keys() & other.keys()
else:
keys = self.keys() & other.tocoo().coords[0]
return res_dtype(sum(self._dict[k] * other._dict[k] for k in keys))
elif isdense(other):
return res_dtype(sum(other[k] * v for k, v in self.items()))
else:
return NotImplemented
# matrix @ vector
result = np.zeros(self.shape[0], dtype=res_dtype)
for (i, j), v in self.items():
result[i] += v * other[j]
return result
def _matmul_multivector(self, other):
result_dtype = upcast(self.dtype, other.dtype)
# vector @ multivector
if self.ndim == 1:
# works for other 1d or 2d
return sum(v * other[j] for j, v in self._dict.items())
# matrix @ multivector
M = self.shape[0]
new_shape = (M,) if other.ndim == 1 else (M, other.shape[1])
result = np.zeros(new_shape, dtype=result_dtype)
for (i, j), v in self.items():
result[i] += v * other[j]
return result
def __imul__(self, other):
if isscalarlike(other):
self._dict.update((k, v * other) for k, v in self.items())
return self
return NotImplemented
def __truediv__(self, other):
if isscalarlike(other):
res_dtype = upcast_scalar(self.dtype, other)
new = self._dok_container(self.shape, dtype=res_dtype)
new._dict.update(((k, v / other) for k, v in self.items()))
return new
return self.tocsr() / other
def __itruediv__(self, other):
if isscalarlike(other):
self._dict.update((k, v / other) for k, v in self.items())
return self
return NotImplemented
def __reduce__(self):
# this approach is necessary because __setstate__ is called after
# __setitem__ upon unpickling and since __init__ is not called there
# is no shape attribute hence it is not possible to unpickle it.
return dict.__reduce__(self)
def diagonal(self, k=0):
if self.ndim == 2:
return super().diagonal(k)
raise ValueError("diagonal requires two dimensions")
def transpose(self, axes=None, copy=False):
if self.ndim == 1:
return self.copy()
if axes is not None and axes != (1, 0):
raise ValueError(
"Sparse arrays/matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."
)
M, N = self.shape
new = self._dok_container((N, M), dtype=self.dtype, copy=copy)
new._dict.update((((right, left), val) for (left, right), val in self.items()))
return new
transpose.__doc__ = _spbase.transpose.__doc__
def copy(self):
new = self._dok_container(self.shape, dtype=self.dtype)
new._dict.update(self._dict)
return new
copy.__doc__ = _spbase.copy.__doc__
@classmethod
def fromkeys(cls, iterable, value=1, /):
tmp = dict.fromkeys(iterable, value)
if isinstance(next(iter(tmp)), tuple):
shape = tuple(max(idx) + 1 for idx in zip(*tmp))
else:
shape = (max(tmp) + 1,)
result = cls(shape, dtype=type(value))
result._dict = tmp
return result
def tocoo(self, copy=False):
nnz = self.nnz
if nnz == 0:
return self._coo_container(self.shape, dtype=self.dtype)
idx_dtype = self._get_index_dtype(maxval=max(self.shape))
data = np.fromiter(self.values(), dtype=self.dtype, count=nnz)
# handle 1d keys specially b/c not a tuple
inds = zip(*self.keys()) if self.ndim > 1 else (self.keys(),)
coords = tuple(np.fromiter(ix, dtype=idx_dtype, count=nnz) for ix in inds)
A = self._coo_container((data, coords), shape=self.shape, dtype=self.dtype)
A.has_canonical_format = True
return A
tocoo.__doc__ = _spbase.tocoo.__doc__
def todok(self, copy=False):
if copy:
return self.copy()
return self
todok.__doc__ = _spbase.todok.__doc__
def tocsc(self, copy=False):
if self.ndim == 1:
raise NotImplementedError("tocsr() not valid for 1d sparse array")
return self.tocoo(copy=False).tocsc(copy=copy)
tocsc.__doc__ = _spbase.tocsc.__doc__
def resize(self, *shape):
shape = check_shape(shape, allow_nd=self._allow_nd)
if len(shape) != len(self.shape):
# TODO implement resize across dimensions
raise NotImplementedError
if self.ndim == 1:
newN = shape[-1]
for i in list(self._dict):
if i >= newN:
del self._dict[i]
self._shape = shape
return
newM, newN = shape
M, N = self.shape
if newM < M or newN < N:
# Remove all elements outside new dimensions
for i, j in list(self.keys()):
if i >= newM or j >= newN:
del self._dict[i, j]
self._shape = shape
resize.__doc__ = _spbase.resize.__doc__
# Added for 1d to avoid `tocsr` from _base.py
def astype(self, dtype, casting='unsafe', copy=True):
dtype = np.dtype(dtype)
if self.dtype != dtype:
result = self._dok_container(self.shape, dtype=dtype)
data = np.array(list(self._dict.values()), dtype=dtype)
result._dict = dict(zip(self._dict, data))
return result
elif copy:
return self.copy()
return self
def isspmatrix_dok(x):
"""Is `x` of dok_array type?
Parameters
----------
x
object to check for being a dok matrix
Returns
-------
bool
True if `x` is a dok matrix, False otherwise
Examples
--------
>>> from scipy.sparse import dok_array, dok_matrix, coo_matrix, isspmatrix_dok
>>> isspmatrix_dok(dok_matrix([[5]]))
True
>>> isspmatrix_dok(dok_array([[5]]))
False
>>> isspmatrix_dok(coo_matrix([[5]]))
False
"""
return isinstance(x, dok_matrix)
# This namespace class separates array from matrix with isinstance
| _dok_base |
python | tensorflow__tensorflow | tensorflow/python/distribute/cross_device_ops.py | {
"start": 10105,
"end": 23882
} | class ____(object):
"""Base class for cross-device reduction and broadcasting algorithms.
The main purpose of this class is to be passed to
`tf.distribute.MirroredStrategy` in order to choose among different cross
device communication implementations. Prefer using the methods of
`tf.distribute.Strategy` instead of the ones of this class.
Implementations:
* `tf.distribute.ReductionToOneDevice`
* `tf.distribute.NcclAllReduce`
* `tf.distribute.HierarchicalCopyAllReduce`
"""
def __init__(self):
self._canonicalize_devices = True
pass
@property
def _num_between_graph_workers(self):
# Returns 1 by default, the value may be overridden by sub classes.
return 1
def reduce(self, reduce_op, per_replica_value, destinations, options=None):
"""Reduce `per_replica_value` to `destinations`.
See `tf.distribute.StrategyExtended.reduce_to`. This can only be called in
the cross-replica context.
Args:
reduce_op: a `tf.distribute.ReduceOp` specifying how values should be
combined.
per_replica_value: a `tf.distribute.DistributedValues`, or a `tf.Tensor`
like object.
destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a
`tf.Tensor` alike object, or a device string. It specifies the devices
to reduce to. To perform an all-reduce, pass the same to `value` and
`destinations`. Note that if it's a `tf.Variable`, the value is reduced
to the devices of that variable, and this method doesn't update the
variable.
options: a `tf.distribute.experimental.CommunicationOptions`. See
`tf.distribute.experimental.CommunicationOptions` for details.
Returns:
A `tf.Tensor` or `tf.distribute.DistributedValues`.
Raises:
ValueError: if per_replica_value can't be converted to a
`tf.distribute.DistributedValues` or if destinations is not a string,
`tf.Variable` or `tf.distribute.DistributedValues`.
"""
if options is None:
options = collective_util.Options()
per_replica_value = _make_tensor_into_per_replica(per_replica_value)
validate_destinations(destinations)
# Shortcut if `per_replica_value` only contains one value.
if self._num_between_graph_workers == 1 and len(
per_replica_value.values) == 1 and _devices_match(
per_replica_value, destinations, self._canonicalize_devices):
with ops.device(per_replica_value.values[0].device):
v = array_ops.identity(per_replica_value.values[0])
return distribute_utils.regroup((v,), wrap_class=value_lib.Mirrored)
if options is None:
options = collective_util.Options()
return self.reduce_implementation(reduce_op, per_replica_value,
destinations, options)
def _gather(self, per_replica_value, destinations, axis, options=None):
"""Gather `per_replica_value` to `destinations`.
Args:
per_replica_value: a `tf.distribute.DistributedValues`, or a `tf.Tensor`
like object.
destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a
`tf.Tensor` alike object, or a device string. It specifies the devices
to gather to. To perform an all-gather, pass the same to `value` and
`destinations`. Note that if it's a `tf.Variable`, the value is gathered
to the devices of that variable, and this method doesn't update the
variable.
axis: specifies the dimension to gather along within each replica's
tensor.
options: a `tf.distribute.experimental.CommunicationOptions`. See
`tf.distribute.experimental.CommunicationOptions` for details.
Returns:
A `tf.Tensor` or `tf.distribute.DistributedValues`
Raises:
ValueError: if per_replica_value can't be converted to a
`tf.distribute.DistributedValues` or if destinations is not a string,
`tf.Variable` or `tf.distribute.DistributedValues`.
"""
if isinstance(per_replica_value, indexed_slices.IndexedSlices):
raise NotImplementedError("gather/all_gather does not support "
"IndexedSlices")
if options is None:
options = collective_util.Options()
per_replica_value = _make_tensor_into_per_replica(per_replica_value)
validate_destinations(destinations)
# Shortcut if `per_replica_value` only contains one value.
if self._num_between_graph_workers == 1 and len(
per_replica_value.values) == 1 and _devices_match(
per_replica_value, destinations, self._canonicalize_devices):
with ops.device(per_replica_value.values[0].device):
v = array_ops.identity(per_replica_value.values[0])
return distribute_utils.regroup((v,), wrap_class=value_lib.Mirrored)
return self._gather_implementation(per_replica_value, destinations, axis,
options)
def _gather_implementation(self, per_replica_value, destinations, axis,
options):
"""Implementation of `gather` method of `tf.distribute.CrossDeviceOps`.
Overriding this method is useful for subclass implementers.
Args:
per_replica_value: a `tf.distribute.DistributedValues`, or a `tf.Tensor`
like object.
destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a
`tf.Tensor` alike object, or a device string. It specifies the devices
to gather to. To perform an all-gather, pass the same to `value` and
`destinations`. Note that if it's a `tf.Variable`, the value is gathered
to the devices of that variable, this method doesn't update the
variable.
axis: specifies the dimension to gather along within each replica's
tensor.
options: a `tf.distribute.experimental.CommunicationOptions`. See
`tf.distribute.experimental.CommunicationOptions` for details.
Returns:
A `tf.Tensor` or `tf.distribute.DistributedValues`.
Raises:
ValueError: if per_replica_value can't be converted to a
`tf.distribute.DistributedValues` or if destinations is not a string,
`tf.Variable` or `tf.distribute.DistributedValues`.
"""
raise NotImplementedError(
"_gather method must be implemented in descendants.")
def batch_reduce(self, reduce_op, value_destination_pairs, options=None):
"""Reduce values to destinations in batches.
See `tf.distribute.StrategyExtended.batch_reduce_to`. This can only be
called in the cross-replica context.
Args:
reduce_op: a `tf.distribute.ReduceOp` specifying how values should be
combined.
value_destination_pairs: a sequence of (value, destinations) pairs. See
`tf.distribute.CrossDeviceOps.reduce` for descriptions.
options: a `tf.distribute.experimental.CommunicationOptions`. See
`tf.distribute.experimental.CommunicationOptions` for details.
Returns:
A list of `tf.Tensor` or `tf.distribute.DistributedValues`, one per pair
in `value_destination_pairs`.
Raises:
ValueError: if `value_destination_pairs` is not an iterable of
tuples of `tf.distribute.DistributedValues` and destinations.
"""
if options is None:
options = collective_util.Options()
# TODO(yuefengz): if destinations are different, split into several
# `_batch_reduce` invocations.
if not _validate_value_destination_pairs(value_destination_pairs):
# If the first element of each pair is a tensor, we try to turn it into a
# PerReplica object.
value_destination_pairs = _normalize_value_destination_pairs(
value_destination_pairs)
for _, d in value_destination_pairs:
validate_destinations(d)
# Shortcut all PerReplica objects only contain one value.
if self._num_between_graph_workers == 1 and _all_devices_match(
value_destination_pairs, self._canonicalize_devices) and len(
value_destination_pairs[0][0].values) == 1:
return [
distribute_utils.regroup(v.values, wrap_class=value_lib.Mirrored)
for v, _ in value_destination_pairs
]
if options is None:
options = collective_util.Options()
return self.batch_reduce_implementation(reduce_op, value_destination_pairs,
options)
def broadcast(self, tensor, destinations):
"""Broadcast `tensor` to `destinations`.
This can only be called in the cross-replica context.
Args:
tensor: a `tf.Tensor` like object. The value to broadcast.
destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a
`tf.Tensor` alike object, or a device string. It specifies the devices
to broadcast to. Note that if it's a `tf.Variable`, the value is
broadcasted to the devices of that variable, this method doesn't update
the variable.
Returns:
A `tf.Tensor` or `tf.distribute.DistributedValues`.
"""
validate_destinations(destinations)
return self.broadcast_implementation(tensor, destinations)
@doc_controls.for_subclass_implementers
def reduce_implementation(self, reduce_op, per_replica_value, destinations,
options):
"""Implementation of `reduce`.
Overriding this method is useful for subclass implementers.
Args:
reduce_op: a `tf.distribute.ReduceOp` specifying how values should be
combined.
per_replica_value: a `tf.distribute.DistributedValues`, or a `tf.Tensor`
like object.
destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a
`tf.Tensor` alike object, or a device string. It specifies the devices
to reduce to. To perform an all-reduce, pass the same to `value` and
`destinations`. Note that if it's a `tf.Variable`, the value is reduced
to the devices of that variable, this method doesn't update the
variable.
options: a `tf.distribute.experimental.CommunicationOptions`. See
`tf.distribute.experimental.CommunicationOptions` for details.
Returns:
A `tf.Tensor` or `tf.distribute.DistributedValues`.
Raises:
ValueError: if per_replica_value can't be converted to a
`tf.distribute.DistributedValues` or if destinations is not a string,
`tf.Variable` or `tf.distribute.DistributedValues`.
"""
raise NotImplementedError(
"_reduce method must be implemented in descendants.")
@doc_controls.for_subclass_implementers
def batch_reduce_implementation(self, reduce_op, value_destination_pairs,
options):
"""Implementation of `batch_reduce`.
Overriding this method is useful for subclass implementers.
Args:
reduce_op: a `tf.distribute.ReduceOp` specifying how values should be
combined.
value_destination_pairs: a sequence of (value, destinations) pairs. See
`reduce` for descriptions.
options: a `tf.distribute.experimental.CommunicationOptions`. See
`tf.distribute.experimental.CommunicationOptions` for details.
Returns:
A list of `tf.Tensor` or `tf.distribute.DistributedValues`, one per pair
in `value_destination_pairs`.
Raises:
ValueError: if `value_destination_pairs` is not an iterable of
tuples of `tf.distribute.DistributedValues` and destinations.
"""
raise NotImplementedError(
"batch_reduce_implementation method must be implemented in descendants."
)
@doc_controls.for_subclass_implementers
def broadcast_implementation(self, tensor, destinations):
"""Implementation of `broadcast`.
Args:
tensor: a `tf.Tensor` like object. The value to broadcast.
destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a
`tf.Tensor` alike object, or a device string. It specifies the devices
to broadcast to.
`destinations`. Note that if it's a `tf.Variable`, the value is
broadcasted to the devices of that variable, this method doesn't update
the variable.
Returns:
A `tf.Tensor` or `tf.distribute.DistributedValues`.
"""
return simple_broadcast(
tensor,
destinations,
always_mirrored=True,
canonicalize_devices=self._canonicalize_devices)
# ========================== Collective APIs ================================
#
# Different than `reduce`, `batch_reduce` and `broadcast` which must be called
# in cross-replcia context, collective APIs are to be called in replica
# context.
def _all_reduce(self, reduce_op, value, replica_id, options):
"""All-reduce the `value` across all replicas so that all get the result.
`value` can be a nested structure of tensors or `IndexedSlices`. The
implementation should generally batch the all-reduces when possible.
`options` can be set to hint the batching behavior.
This API must be called in a replica context.
Args:
reduce_op: A `tf.distribute.ReduceOp` value specifying how values should
be combined.
value: Value to be reduced. A tensor or a nested structure of tensors or
`IndexedSlices`.
replica_id: An integer indicating the id of the replica where this
all_reduce is called under. This is the local replica id that ranges
from 0 to len(local_devices) - 1.
options: A `tf.distribute.experimental.CommunicationOptions`.
Returns:
A tensor/IndexedSlices or a nested structure of tensors/IndexedSlices with
the reduced values. The structure is the same as `value`.
"""
raise NotImplementedError("_all_reduce must be implemented in descendants.")
@tf_export("distribute.ReductionToOneDevice")
| CrossDeviceOps |
python | giampaolo__psutil | tests/__init__.py | {
"start": 21249,
"end": 27109
} | class ____:
"""A retry decorator."""
def __init__(
self,
exception=Exception,
timeout=None,
retries=None,
interval=0.001,
logfun=None,
):
if timeout and retries:
raise ValueError("timeout and retries args are mutually exclusive")
self.exception = exception
self.timeout = timeout
self.retries = retries
self.interval = interval
self.logfun = logfun
def __iter__(self):
if self.timeout:
stop_at = time.time() + self.timeout
while time.time() < stop_at:
yield
elif self.retries:
for _ in range(self.retries):
yield
else:
while True:
yield
def sleep(self):
if self.interval is not None:
time.sleep(self.interval)
def __call__(self, fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
exc = None
for _ in self:
try:
return fun(*args, **kwargs)
except self.exception as _:
exc = _
if self.logfun is not None:
self.logfun(exc)
self.sleep()
continue
raise exc
# This way the user of the decorated function can change config
# parameters.
wrapper.decorator = self
return wrapper
@retry(
exception=psutil.NoSuchProcess,
logfun=None,
timeout=GLOBAL_TIMEOUT,
interval=0.001,
)
def wait_for_pid(pid):
"""Wait for pid to show up in the process list then return.
Used in the test suite to give time the sub process to initialize.
"""
if pid not in psutil.pids():
raise psutil.NoSuchProcess(pid)
psutil.Process(pid)
@retry(
exception=(FileNotFoundError, AssertionError),
logfun=None,
timeout=GLOBAL_TIMEOUT,
interval=0.001,
)
def wait_for_file(fname, delete=True, empty=False):
"""Wait for a file to be written on disk with some content."""
with open(fname, "rb") as f:
data = f.read()
if not empty:
assert data
if delete:
safe_rmpath(fname)
return data
@retry(
exception=(AssertionError, pytest.fail.Exception),
logfun=None,
timeout=GLOBAL_TIMEOUT,
interval=0.001,
)
def call_until(fun):
"""Keep calling function until it evaluates to True."""
ret = fun()
assert ret
return ret
# ===================================================================
# --- fs
# ===================================================================
def safe_rmpath(path):
"""Convenience function for removing temporary test files or dirs."""
def retry_fun(fun):
# On Windows it could happen that the file or directory has
# open handles or references preventing the delete operation
# to succeed immediately, so we retry for a while. See:
# https://bugs.python.org/issue33240
stop_at = time.time() + GLOBAL_TIMEOUT
while time.time() < stop_at:
try:
return fun()
except FileNotFoundError:
pass
except OSError as _:
err = _
warn(f"ignoring {err}")
time.sleep(0.01)
raise err
try:
st = os.stat(path)
if stat.S_ISDIR(st.st_mode):
fun = functools.partial(shutil.rmtree, path)
else:
fun = functools.partial(os.remove, path)
if POSIX:
fun()
else:
retry_fun(fun)
except FileNotFoundError:
pass
def safe_mkdir(dir):
"""Convenience function for creating a directory."""
try:
os.mkdir(dir)
except FileExistsError:
pass
@contextlib.contextmanager
def chdir(dirname):
"""Context manager which temporarily changes the current directory."""
curdir = os.getcwd()
try:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def create_py_exe(path):
"""Create a Python executable file in the given location."""
assert not os.path.exists(path), path
atexit.register(safe_rmpath, path)
shutil.copyfile(PYTHON_EXE, path)
if POSIX:
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
return path
def create_c_exe(path, c_code=None):
"""Create a compiled C executable in the given location."""
assert not os.path.exists(path), path
if not shutil.which("gcc"):
return pytest.skip("gcc is not installed")
if c_code is None:
c_code = textwrap.dedent("""
#include <unistd.h>
int main() {
pause();
return 1;
}
""")
else:
assert isinstance(c_code, str), c_code
atexit.register(safe_rmpath, path)
with open(get_testfn(suffix='.c'), "w") as f:
f.write(c_code)
try:
subprocess.check_call(["gcc", f.name, "-o", path])
finally:
safe_rmpath(f.name)
return path
def get_testfn(suffix="", dir=None):
"""Return an absolute pathname of a file or dir that did not
exist at the time this call is made. Also schedule it for safe
deletion at interpreter exit. It's technically racy but probably
not really due to the time variant.
"""
while True:
name = tempfile.mktemp(prefix=TESTFN_PREFIX, suffix=suffix, dir=dir)
if not os.path.exists(name): # also include dirs
path = os.path.realpath(name) # needed for OSX
atexit.register(safe_rmpath, path)
return path
# ===================================================================
# --- testing
# ===================================================================
| retry |
python | doocs__leetcode | solution/2700-2799/2706.Buy Two Chocolates/Solution2.py | {
"start": 0,
"end": 295
} | class ____:
def buyChoco(self, prices: List[int], money: int) -> int:
a = b = inf
for x in prices:
if x < a:
a, b = x, a
elif x < b:
b = x
cost = a + b
return money if money < cost else money - cost
| Solution |
python | huggingface__transformers | src/transformers/models/canine/modeling_canine.py | {
"start": 24336,
"end": 26238
} | class ____(GradientCheckpointingLayer):
def __init__(
self,
config,
local,
always_attend_to_first_position,
first_position_attends_to_all,
attend_from_chunk_width,
attend_from_chunk_stride,
attend_to_chunk_width,
attend_to_chunk_stride,
):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = CanineAttention(
config,
local,
always_attend_to_first_position,
first_position_attends_to_all,
attend_from_chunk_width,
attend_from_chunk_stride,
attend_to_chunk_width,
attend_to_chunk_stride,
)
self.intermediate = CanineIntermediate(config)
self.output = CanineOutput(config)
def forward(
self,
hidden_states: tuple[torch.FloatTensor],
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| CanineLayer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/cloud_composer.py | {
"start": 2386,
"end": 21997
} | class ____(GoogleBaseHook, OperationHelper):
"""Hook for Google Cloud Composer APIs."""
client_options = ClientOptions(api_endpoint="composer.googleapis.com:443")
def get_environment_client(self) -> EnvironmentsClient:
"""Retrieve client library object that allow access Environments service."""
return EnvironmentsClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=self.client_options,
)
def get_image_versions_client(self) -> ImageVersionsClient:
"""Retrieve client library object that allow access Image Versions service."""
return ImageVersionsClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=self.client_options,
)
def make_composer_airflow_api_request(
self,
method: str,
airflow_uri: str,
path: str,
data: Any | None = None,
timeout: float | None = None,
):
"""
Make a request to Cloud Composer environment's web server.
:param method: The request method to use ('GET', 'OPTIONS', 'HEAD', 'POST', 'PUT', 'PATCH', 'DELETE').
:param airflow_uri: The URI of the Apache Airflow Web UI hosted within this environment.
:param path: The path to send the request.
:param data: Dictionary, list of tuples, bytes, or file-like object to send in the body of the request.
:param timeout: The timeout for this request.
"""
authed_session = AuthorizedSession(self.get_credentials())
resp = authed_session.request(
method=method,
url=urljoin(airflow_uri, path),
data=data,
headers={"Content-Type": "application/json"},
timeout=timeout,
)
return resp
def get_operation(self, operation_name):
return self.get_environment_client().transport.operations_client.get_operation(name=operation_name)
def get_environment_name(self, project_id, region, environment_id):
return f"projects/{project_id}/locations/{region}/environments/{environment_id}"
def get_parent(self, project_id, region):
return f"projects/{project_id}/locations/{region}"
@GoogleBaseHook.fallback_to_default_project_id
def create_environment(
self,
project_id: str,
region: str,
environment: Environment | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create a new environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment: The environment to create. This corresponds to the ``environment`` field on the
``request`` instance; if ``request`` is provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.create_environment(
request={"parent": self.get_parent(project_id, region), "environment": environment},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_environment(
self,
project_id: str,
region: str,
environment_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
name = self.get_environment_name(project_id, region, environment_id)
result = client.delete_environment(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_environment(
self,
project_id: str,
region: str,
environment_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Environment:
"""
Get an existing environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.get_environment(
request={"name": self.get_environment_name(project_id, region, environment_id)},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_environments(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListEnvironmentsPager:
"""
List environments.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param page_size: The maximum number of environments to return.
:param page_token: The next_page_token value returned from a previous List
request, if any.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.list_environments(
request={
"parent": self.get_parent(project_id, region),
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_environment(
self,
project_id: str,
region: str,
environment_id: str,
environment: Environment | dict,
update_mask: dict | FieldMask,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
r"""
Update an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param environment: A patch environment. Fields specified by the ``updateMask`` will be copied from
the patch environment into the environment under update.
This corresponds to the ``environment`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param update_mask: Required. A comma-separated list of paths, relative to ``Environment``, of fields
to update. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
name = self.get_environment_name(project_id, region, environment_id)
result = client.update_environment(
request={"name": name, "environment": environment, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_image_versions(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
include_past_releases: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListImageVersionsPager:
"""
List ImageVersions for provided location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param page_size: The maximum number of environments to return.
:param page_token: The next_page_token value returned from a previous List
request, if any.
:param include_past_releases: Flag to include past releases
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_image_versions_client()
result = client.list_image_versions(
request={
"parent": self.get_parent(project_id, region),
"page_size": page_size,
"page_token": page_token,
"include_past_releases": include_past_releases,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def execute_airflow_command(
self,
project_id: str,
region: str,
environment_id: str,
command: str,
subcommand: str,
parameters: MutableSequence[str],
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ExecuteAirflowCommandResponse:
"""
Execute Airflow command for provided Composer environment.
:param project_id: The ID of the Google Cloud project that the service belongs to.
:param region: The ID of the Google Cloud region that the service belongs to.
:param environment_id: The ID of the Google Cloud environment that the service belongs to.
:param command: Airflow command.
:param subcommand: Airflow subcommand.
:param parameters: Parameters for the Airflow command/subcommand as an array of arguments. It may
contain positional arguments like ``["my-dag-id"]``, key-value parameters like ``["--foo=bar"]``
or ``["--foo","bar"]``, or other flags like ``["-f"]``.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.execute_airflow_command(
request={
"environment": self.get_environment_name(project_id, region, environment_id),
"command": command,
"subcommand": subcommand,
"parameters": parameters,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def poll_airflow_command(
self,
project_id: str,
region: str,
environment_id: str,
execution_id: str,
pod: str,
pod_namespace: str,
next_line_number: int,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> PollAirflowCommandResponse:
"""
Poll Airflow command execution result for provided Composer environment.
:param project_id: The ID of the Google Cloud project that the service belongs to.
:param region: The ID of the Google Cloud region that the service belongs to.
:param environment_id: The ID of the Google Cloud environment that the service belongs to.
:param execution_id: The unique ID of the command execution.
:param pod: The name of the pod where the command is executed.
:param pod_namespace: The namespace of the pod where the command is executed.
:param next_line_number: Line number from which new logs should be fetched.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.poll_airflow_command(
request={
"environment": self.get_environment_name(project_id, region, environment_id),
"execution_id": execution_id,
"pod": pod,
"pod_namespace": pod_namespace,
"next_line_number": next_line_number,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
def wait_command_execution_result(
self,
project_id: str,
region: str,
environment_id: str,
execution_cmd_info: dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
poll_interval: int = 10,
) -> dict:
while True:
try:
result = self.poll_airflow_command(
project_id=project_id,
region=region,
environment_id=environment_id,
execution_id=execution_cmd_info["execution_id"],
pod=execution_cmd_info["pod"],
pod_namespace=execution_cmd_info["pod_namespace"],
next_line_number=1,
retry=retry,
timeout=timeout,
metadata=metadata,
)
except Exception as ex:
self.log.exception("Exception occurred while polling CMD result")
raise AirflowException(ex)
result_dict = PollAirflowCommandResponse.to_dict(result)
if result_dict["output_end"]:
return result_dict
self.log.info("Waiting for result...")
time.sleep(poll_interval)
def trigger_dag_run(
self,
composer_airflow_uri: str,
composer_dag_id: str,
composer_dag_conf: dict | None = None,
timeout: float | None = None,
) -> dict:
"""
Trigger DAG run for provided Apache Airflow Web UI hosted within Composer environment.
:param composer_airflow_uri: The URI of the Apache Airflow Web UI hosted within Composer environment.
:param composer_dag_id: The ID of DAG which will be triggered.
:param composer_dag_conf: Configuration parameters for the DAG run.
:param timeout: The timeout for this request.
"""
response = self.make_composer_airflow_api_request(
method="POST",
airflow_uri=composer_airflow_uri,
path=f"/api/v1/dags/{composer_dag_id}/dagRuns",
data=json.dumps(
{
"conf": composer_dag_conf or {},
}
),
timeout=timeout,
)
if response.status_code != 200:
self.log.error(response.text)
response.raise_for_status()
return response.json()
def get_dag_runs(
self,
composer_airflow_uri: str,
composer_dag_id: str,
timeout: float | None = None,
) -> dict:
"""
Get the list of dag runs for provided DAG.
:param composer_airflow_uri: The URI of the Apache Airflow Web UI hosted within Composer environment.
:param composer_dag_id: The ID of DAG.
:param timeout: The timeout for this request.
"""
response = self.make_composer_airflow_api_request(
method="GET",
airflow_uri=composer_airflow_uri,
path=f"/api/v1/dags/{composer_dag_id}/dagRuns",
timeout=timeout,
)
if response.status_code != 200:
self.log.error(
"Failed to get DAG runs for dag_id=%s from %s (status=%s): %s",
composer_dag_id,
composer_airflow_uri,
response.status_code,
response.text,
)
response.raise_for_status()
return response.json()
def get_task_instances(
self,
composer_airflow_uri: str,
composer_dag_id: str,
query_parameters: dict | None = None,
timeout: float | None = None,
) -> dict:
"""
Get the list of task instances for provided DAG.
:param composer_airflow_uri: The URI of the Apache Airflow Web UI hosted within Composer environment.
:param composer_dag_id: The ID of DAG.
:query_parameters: Query parameters for this request.
:param timeout: The timeout for this request.
"""
query_string = f"?{urlencode(query_parameters)}" if query_parameters else ""
response = self.make_composer_airflow_api_request(
method="GET",
airflow_uri=composer_airflow_uri,
path=f"/api/v1/dags/{composer_dag_id}/dagRuns/~/taskInstances{query_string}",
timeout=timeout,
)
if response.status_code != 200:
self.log.error(
"Failed to get task instances for dag_id=%s from %s (status=%s): %s",
composer_dag_id,
composer_airflow_uri,
response.status_code,
response.text,
)
response.raise_for_status()
return response.json()
| CloudComposerHook |
python | spack__spack | lib/spack/spack/database.py | {
"start": 73865,
"end": 74307
} | class ____(KeyError):
"""Raised when a spec is not found in the database."""
def __init__(self, spec):
self.spec = spec
super().__init__(spec)
def __str__(self):
# This exception is raised frequently, and almost always
# caught, so ensure we don't pay the cost of Spec.__str__
# unless the exception is actually printed.
return f"No such spec in database: {self.spec}"
| NoSuchSpecError |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_impl.py | {
"start": 49783,
"end": 235119
} | class ____:
"""See `tf.image.resize` for details."""
BILINEAR = 'bilinear'
NEAREST_NEIGHBOR = 'nearest'
BICUBIC = 'bicubic'
AREA = 'area'
LANCZOS3 = 'lanczos3'
LANCZOS5 = 'lanczos5'
GAUSSIAN = 'gaussian'
MITCHELLCUBIC = 'mitchellcubic'
def _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name,
skip_resize_if_same):
"""Core functionality for v1 and v2 resize functions."""
with ops.name_scope(name, 'resize', [images, size]):
images = ops.convert_to_tensor(images, name='images')
if images.get_shape().ndims is None:
raise ValueError('\'images\' contains no shape.')
# TODO(shlens): Migrate this functionality to the underlying Op's.
is_batch = True
if images.get_shape().ndims == 3:
is_batch = False
images = array_ops.expand_dims(images, 0)
elif images.get_shape().ndims != 4:
raise ValueError('\'images\' must have either 3 or 4 dimensions.')
_, height, width, _ = images.get_shape().as_list()
try:
size = ops.convert_to_tensor(size, dtypes.int32, name='size')
except (TypeError, ValueError):
raise ValueError('\'size\' must be a 1-D int32 Tensor')
if not size.get_shape().is_compatible_with([2]):
raise ValueError('\'size\' must be a 1-D Tensor of 2 elements: '
'new_height, new_width')
if preserve_aspect_ratio:
# Get the current shapes of the image, even if dynamic.
_, current_height, current_width, _ = _ImageDimensions(images, rank=4)
# do the computation to find the right scale and height/width.
scale_factor_height = (
math_ops.cast(size[0], dtypes.float32) /
math_ops.cast(current_height, dtypes.float32))
scale_factor_width = (
math_ops.cast(size[1], dtypes.float32) /
math_ops.cast(current_width, dtypes.float32))
scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width)
scaled_height_const = math_ops.cast(
math_ops.round(scale_factor *
math_ops.cast(current_height, dtypes.float32)),
dtypes.int32)
scaled_width_const = math_ops.cast(
math_ops.round(scale_factor *
math_ops.cast(current_width, dtypes.float32)),
dtypes.int32)
# NOTE: Reset the size and other constants used later.
size = ops.convert_to_tensor([scaled_height_const, scaled_width_const],
dtypes.int32,
name='size')
size_const_as_shape = tensor_util.constant_value_as_shape(size)
new_height_const = tensor_shape.dimension_at_index(size_const_as_shape,
0).value
new_width_const = tensor_shape.dimension_at_index(size_const_as_shape,
1).value
# If we can determine that the height and width will be unmodified by this
# transformation, we avoid performing the resize.
if skip_resize_if_same and all(
x is not None
for x in [new_width_const, width, new_height_const, height]) and (
width == new_width_const and height == new_height_const):
if not is_batch:
images = array_ops.squeeze(images, axis=[0])
return images
images = resizer_fn(images, size)
# NOTE(mrry): The shape functions for the resize ops cannot unpack
# the packed values in `new_size`, so set the shape here.
images.set_shape([None, new_height_const, new_width_const, None])
if not is_batch:
images = array_ops.squeeze(images, axis=[0])
return images
@tf_export(v1=['image.resize_images', 'image.resize'])
@dispatch.add_dispatch_support
def resize_images(images,
size,
method=ResizeMethodV1.BILINEAR,
align_corners=False,
preserve_aspect_ratio=False,
name=None):
"""Resize `images` to `size` using the specified `method`.
Resized images will be distorted if their original aspect ratio is not
the same as `size`. To avoid distortions see
`tf.image.resize_with_pad` or `tf.image.resize_with_crop_or_pad`.
The `method` can be one of:
* <b>`tf.image.ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.](
https://en.wikipedia.org/wiki/Bilinear_interpolation)
* <b>`tf.image.ResizeMethod.NEAREST_NEIGHBOR`</b>: [
Nearest neighbor interpolation.](
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
* <b>`tf.image.ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.](
https://en.wikipedia.org/wiki/Bicubic_interpolation)
* <b>`tf.image.ResizeMethod.AREA`</b>: Area interpolation.
The return value has the same type as `images` if `method` is
`tf.image.ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type
as `images` if the size of `images` can be statically determined to be the
same as `size`, because `images` is returned in this case. Otherwise, the
return value has type `float32`.
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new
size for the images.
method: ResizeMethod. Defaults to `tf.image.ResizeMethod.BILINEAR`.
align_corners: bool. If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the corner
pixels. Defaults to `False`.
preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set,
then `images` will be resized to a size that fits in `size` while
preserving the aspect ratio of the original image. Scales up the image if
`size` is bigger than the current size of the `image`. Defaults to False.
name: A name for this operation (optional).
Raises:
ValueError: if the shape of `images` is incompatible with the
shape arguments to this function
ValueError: if `size` has invalid shape or type.
ValueError: if an unsupported resize method is specified.
Returns:
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
def resize_fn(images_t, new_size):
"""Legacy resize core function, passed to _resize_images_common."""
if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR:
return gen_image_ops.resize_bilinear(
images_t, new_size, align_corners=align_corners)
elif (method == ResizeMethodV1.NEAREST_NEIGHBOR or
method == ResizeMethod.NEAREST_NEIGHBOR):
return gen_image_ops.resize_nearest_neighbor(
images_t, new_size, align_corners=align_corners)
elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC:
return gen_image_ops.resize_bicubic(
images_t, new_size, align_corners=align_corners)
elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA:
return gen_image_ops.resize_area(
images_t, new_size, align_corners=align_corners)
else:
raise ValueError('Resize method is not implemented: {}'.format(method))
return _resize_images_common(
images,
resize_fn,
size,
preserve_aspect_ratio=preserve_aspect_ratio,
name=name,
skip_resize_if_same=True)
@tf_export('image.resize', v1=[])
@dispatch.add_dispatch_support
def resize_images_v2(images,
size,
method=ResizeMethod.BILINEAR,
preserve_aspect_ratio=False,
antialias=False,
name=None):
"""Resize `images` to `size` using the specified `method`.
Resized images will be distorted if their original aspect ratio is not
the same as `size`. To avoid distortions see
`tf.image.resize_with_pad`.
>>> image = tf.constant([
... [1,0,0,0,0],
... [0,1,0,0,0],
... [0,0,1,0,0],
... [0,0,0,1,0],
... [0,0,0,0,1],
... ])
>>> # Add "batch" and "channels" dimensions
>>> image = image[tf.newaxis, ..., tf.newaxis]
>>> image.shape.as_list() # [batch, height, width, channels]
[1, 5, 5, 1]
>>> tf.image.resize(image, [3,5])[0,...,0].numpy()
array([[0.6666667, 0.3333333, 0. , 0. , 0. ],
[0. , 0. , 1. , 0. , 0. ],
[0. , 0. , 0. , 0.3333335, 0.6666665]],
dtype=float32)
It works equally well with a single image instead of a batch of images:
>>> tf.image.resize(image[0], [3,5]).shape.as_list()
[3, 5, 1]
When `antialias` is true, the sampling filter will anti-alias the input image
as well as interpolate. When downsampling an image with [anti-aliasing](
https://en.wikipedia.org/wiki/Spatial_anti-aliasing) the sampling filter
kernel is scaled in order to properly anti-alias the input image signal.
`antialias` has no effect when upsampling an image:
>>> a = tf.image.resize(image, [5,10])
>>> b = tf.image.resize(image, [5,10], antialias=True)
>>> tf.reduce_max(abs(a - b)).numpy()
0.0
The `method` argument expects an item from the `image.ResizeMethod` enum, or
the string equivalent. The options are:
* <b>`bilinear`</b>: [Bilinear interpolation.](
https://en.wikipedia.org/wiki/Bilinear_interpolation) If `antialias` is
true, becomes a hat/tent filter function with radius 1 when downsampling.
* <b>`lanczos3`</b>: [Lanczos kernel](
https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 3.
High-quality practical filter but may have some ringing, especially on
synthetic images.
* <b>`lanczos5`</b>: [Lanczos kernel] (
https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 5.
Very-high-quality filter but may have stronger ringing.
* <b>`bicubic`</b>: [Cubic interpolant](
https://en.wikipedia.org/wiki/Bicubic_interpolation) of Keys. Equivalent to
Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel,
particularly when upsampling.
* <b>`gaussian`</b>: [Gaussian kernel](
https://en.wikipedia.org/wiki/Gaussian_filter) with radius 3,
sigma = 1.5 / 3.0.
* <b>`nearest`</b>: [Nearest neighbor interpolation.](
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
`antialias` has no effect when used with nearest neighbor interpolation.
* <b>`area`</b>: Anti-aliased resampling with area interpolation.
`antialias` has no effect when used with area interpolation; it
always anti-aliases.
* <b>`mitchellcubic`</b>: Mitchell-Netravali Cubic non-interpolating filter.
For synthetic images (especially those lacking proper prefiltering), less
ringing than Keys cubic kernel but less sharp.
Note: Near image edges the filtering kernel may be partially outside the
image boundaries. For these pixels, only input pixels inside the image will be
included in the filter sum, and the output value will be appropriately
normalized.
The return value has type `float32`, unless the `method` is
`ResizeMethod.NEAREST_NEIGHBOR`, then the return dtype is the dtype
of `images`:
>>> nn = tf.image.resize(image, [5,7], method='nearest')
>>> nn[0,...,0].numpy()
array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=int32)
With `preserve_aspect_ratio=True`, the aspect ratio is preserved, so `size`
is the maximum for each dimension:
>>> max_10_20 = tf.image.resize(image, [10,20], preserve_aspect_ratio=True)
>>> max_10_20.shape.as_list()
[1, 10, 10, 1]
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new
size for the images.
method: An `image.ResizeMethod`, or string equivalent. Defaults to
`bilinear`.
preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set,
then `images` will be resized to a size that fits in `size` while
preserving the aspect ratio of the original image. Scales up the image if
`size` is bigger than the current size of the `image`. Defaults to False.
antialias: Whether to use an anti-aliasing filter when downsampling an
image.
name: A name for this operation (optional).
Raises:
ValueError: if the shape of `images` is incompatible with the
shape arguments to this function
ValueError: if `size` has an invalid shape or type.
ValueError: if an unsupported resize method is specified.
Returns:
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
def resize_fn(images_t, new_size):
"""Resize core function, passed to _resize_images_common."""
scale_and_translate_methods = [
ResizeMethod.LANCZOS3, ResizeMethod.LANCZOS5, ResizeMethod.GAUSSIAN,
ResizeMethod.MITCHELLCUBIC
]
def resize_with_scale_and_translate(method):
scale = (
math_ops.cast(new_size, dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(images_t)[1:3], dtype=dtypes.float32))
return gen_image_ops.scale_and_translate(
images_t,
new_size,
scale,
array_ops.zeros([2]),
kernel_type=method,
antialias=antialias)
if method == ResizeMethod.BILINEAR:
if antialias:
return resize_with_scale_and_translate('triangle')
else:
return gen_image_ops.resize_bilinear(
images_t, new_size, half_pixel_centers=True)
elif method == ResizeMethod.NEAREST_NEIGHBOR:
return gen_image_ops.resize_nearest_neighbor(
images_t, new_size, half_pixel_centers=True)
elif method == ResizeMethod.BICUBIC:
if antialias:
return resize_with_scale_and_translate('keyscubic')
else:
return gen_image_ops.resize_bicubic(
images_t, new_size, half_pixel_centers=True)
elif method == ResizeMethod.AREA:
return gen_image_ops.resize_area(images_t, new_size)
elif method in scale_and_translate_methods:
return resize_with_scale_and_translate(method)
else:
raise ValueError('Resize method is not implemented: {}'.format(method))
return _resize_images_common(
images,
resize_fn,
size,
preserve_aspect_ratio=preserve_aspect_ratio,
name=name,
skip_resize_if_same=False)
def _resize_image_with_pad_common(image, target_height, target_width,
resize_fn):
"""Core functionality for v1 and v2 resize_image_with_pad functions."""
with ops.name_scope(None, 'resize_image_with_pad', [image]):
image = ops.convert_to_tensor(image, name='image')
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError(
'\'image\' (shape %s) must have either 3 or 4 dimensions.' %
image_shape)
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
_, height, width, _ = _ImageDimensions(image, rank=4)
# convert values to float, to ease divisions
f_height = math_ops.cast(height, dtype=dtypes.float32)
f_width = math_ops.cast(width, dtype=dtypes.float32)
f_target_height = math_ops.cast(target_height, dtype=dtypes.float32)
f_target_width = math_ops.cast(target_width, dtype=dtypes.float32)
# Find the ratio by which the image must be adjusted
# to fit within the target
ratio = max_(f_width / f_target_width, f_height / f_target_height)
resized_height_float = f_height / ratio
resized_width_float = f_width / ratio
resized_height = math_ops.cast(
math_ops.floor(resized_height_float), dtype=dtypes.int32)
resized_width = math_ops.cast(
math_ops.floor(resized_width_float), dtype=dtypes.int32)
padding_height = (f_target_height - resized_height_float) / 2
padding_width = (f_target_width - resized_width_float) / 2
f_padding_height = math_ops.floor(padding_height)
f_padding_width = math_ops.floor(padding_width)
p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32))
p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32))
# Resize first, then pad to meet requested dimensions
resized = resize_fn(image, [resized_height, resized_width])
padded = pad_to_bounding_box(resized, p_height, p_width, target_height,
target_width)
if padded.get_shape().ndims is None:
raise ValueError('padded contains no shape.')
_ImageDimensions(padded, rank=4)
if not is_batch:
padded = array_ops.squeeze(padded, axis=[0])
return padded
@tf_export(v1=['image.resize_image_with_pad'])
@dispatch.add_dispatch_support
def resize_image_with_pad_v1(image,
target_height,
target_width,
method=ResizeMethodV1.BILINEAR,
align_corners=False):
"""Resizes and pads an image to a target width and height.
Resizes an image to a target width and height by keeping
the aspect ratio the same without distortion. If the target
dimensions don't match the image dimensions, the image
is resized and then padded with zeroes to match requested
dimensions.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
target_height: Target height.
target_width: Target width.
method: Method to use for resizing image. See `resize_images()`
align_corners: bool. If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the corner
pixels. Defaults to `False`.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Resized and padded image.
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
def _resize_fn(im, new_size):
return resize_images(im, new_size, method, align_corners=align_corners)
return _resize_image_with_pad_common(image, target_height, target_width,
_resize_fn)
@tf_export('image.resize_with_pad', v1=[])
@dispatch.add_dispatch_support
def resize_image_with_pad_v2(image,
target_height,
target_width,
method=ResizeMethod.BILINEAR,
antialias=False):
"""Resizes and pads an image to a target width and height.
Resizes an image to a target width and height by keeping
the aspect ratio the same without distortion. If the target
dimensions don't match the image dimensions, the image
is resized and then padded with zeroes to match requested
dimensions.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
target_height: Target height.
target_width: Target width.
method: Method to use for resizing image. See `image.resize()`
antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Resized and padded image.
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
def _resize_fn(im, new_size):
return resize_images_v2(im, new_size, method, antialias=antialias)
return _resize_image_with_pad_common(image, target_height, target_width,
_resize_fn)
@tf_export('image.per_image_standardization')
@dispatch.add_dispatch_support
def per_image_standardization(image):
"""Linearly scales each image in `image` to have mean 0 and variance 1.
For each 3-D image `x` in `image`, computes `(x - mean) / adjusted_stddev`,
where
- `mean` is the average of all values in `x`
- `adjusted_stddev = max(stddev, 1.0/sqrt(N))` is capped away from 0 to
protect against division by 0 when handling uniform images
- `N` is the number of elements in `x`
- `stddev` is the standard deviation of all values in `x`
Example Usage:
>>> image = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
>>> image # 3-D tensor
<tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
array([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]], dtype=int32)>
>>> new_image = tf.image.per_image_standardization(image)
>>> new_image # 3-D tensor with mean ~= 0 and variance ~= 1
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[-1.593255 , -1.3035723 , -1.0138896 ],
[-0.7242068 , -0.4345241 , -0.14484136]],
[[ 0.14484136, 0.4345241 , 0.7242068 ],
[ 1.0138896 , 1.3035723 , 1.593255 ]]], dtype=float32)>
Args:
image: An n-D `Tensor` with at least 3 dimensions, the last 3 of which are
the dimensions of each image.
Returns:
A `Tensor` with the same shape as `image` and its dtype is `float32`.
Raises:
ValueError: The shape of `image` has fewer than 3 dimensions.
"""
with ops.name_scope(None, 'per_image_standardization', [image]) as scope:
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
image = math_ops.cast(image, dtype=dtypes.float32)
num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:])
image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True)
# Apply a minimum normalization that protects us against uniform images.
stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True)
min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
adjusted_stddev = math_ops.maximum(stddev, min_stddev)
image -= image_mean
image = math_ops.divide(image, adjusted_stddev, name=scope)
return image
@tf_export('image.random_brightness')
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def random_brightness(image, max_delta, seed=None):
"""Adjust the brightness of images by a random factor.
Equivalent to `adjust_brightness()` using a `delta` randomly picked in the
interval `[-max_delta, max_delta)`.
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_brightness`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: An image or images to adjust.
max_delta: float, must be non-negative. This parameter controls the maximum
relative change in brightness.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed` for behavior.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_brightness(x, 0.2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
Returns:
The brightness-adjusted image(s).
Raises:
ValueError: if `max_delta` is negative.
"""
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)
return adjust_brightness(image, delta)
@tf_export('image.stateless_random_brightness', v1=[])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def stateless_random_brightness(image, max_delta, seed):
"""Adjust the brightness of images by a random factor deterministically.
Equivalent to `adjust_brightness()` using a `delta` randomly picked in the
interval `[-max_delta, max_delta)`.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> seed = (1, 2)
>>> tf.image.stateless_random_brightness(x, 0.2, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1.1376241, 2.1376243, 3.1376243],
[ 4.1376243, 5.1376243, 6.1376243]],
[[ 7.1376243, 8.137624 , 9.137624 ],
[10.137624 , 11.137624 , 12.137624 ]]], dtype=float32)>
Args:
image: An image or images to adjust.
max_delta: float, must be non-negative.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
The brightness-adjusted image(s).
Raises:
ValueError: if `max_delta` is negative.
"""
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = stateless_random_ops.stateless_random_uniform(
shape=[], minval=-max_delta, maxval=max_delta, seed=seed)
return adjust_brightness(image, delta)
@tf_export('image.random_contrast')
@dispatch.add_dispatch_support
def random_contrast(image, lower, upper, seed=None):
"""Adjust the contrast of an image or images by a random factor.
Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly
picked in the interval `[lower, upper)`.
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_contrast`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: An image tensor with 3 or more dimensions.
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed` for behavior.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_contrast(x, 0.2, 0.5)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
Returns:
The contrast-adjusted image(s).
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed)
return adjust_contrast(image, contrast_factor)
@tf_export('image.stateless_random_contrast', v1=[])
@dispatch.add_dispatch_support
def stateless_random_contrast(image, lower, upper, seed):
"""Adjust the contrast of images by a random factor deterministically.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
Args:
image: An image tensor with 3 or more dimensions.
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> seed = (1, 2)
>>> tf.image.stateless_random_contrast(x, 0.2, 0.5, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[3.4605184, 4.4605184, 5.4605184],
[4.820173 , 5.820173 , 6.820173 ]],
[[6.179827 , 7.179827 , 8.179828 ],
[7.5394816, 8.539482 , 9.539482 ]]], dtype=float32)>
Returns:
The contrast-adjusted image(s).
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
contrast_factor = stateless_random_ops.stateless_random_uniform(
shape=[], minval=lower, maxval=upper, seed=seed)
return adjust_contrast(image, contrast_factor)
@tf_export('image.adjust_brightness')
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def adjust_brightness(image, delta):
"""Adjust the brightness of RGB or Grayscale images.
This is a convenience method that converts RGB images to float
representation, adjusts their brightness, and then converts them back to the
original data type. If several adjustments are chained, it is advisable to
minimize the number of redundant conversions.
The value `delta` is added to all components of the tensor `image`. `image` is
converted to `float` and scaled appropriately if it is in fixed-point
representation, and `delta` is converted to the same data type. For regular
images, `delta` should be in the range `(-1,1)`, as it is added to the image
in floating point representation, where pixel values are in the `[0,1)` range.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_brightness(x, delta=0.1)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1.1, 2.1, 3.1],
[ 4.1, 5.1, 6.1]],
[[ 7.1, 8.1, 9.1],
[10.1, 11.1, 12.1]]], dtype=float32)>
Args:
image: RGB image or images to adjust.
delta: A scalar. Amount to add to the pixel values.
Returns:
A brightness-adjusted tensor of the same shape and type as `image`.
"""
with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in [dtypes.float16, dtypes.float32]:
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
adjusted = math_ops.add(
flt_image, math_ops.cast(delta, flt_image.dtype), name=name)
return convert_image_dtype(adjusted, orig_dtype, saturate=True)
@tf_export('image.adjust_contrast')
@dispatch.add_dispatch_support
def adjust_contrast(images, contrast_factor):
"""Adjust contrast of RGB or grayscale images.
This is a convenience method that converts RGB images to float
representation, adjusts their contrast, and then converts them back to the
original data type. If several adjustments are chained, it is advisable to
minimize the number of redundant conversions.
`images` is a tensor of at least 3 dimensions. The last 3 dimensions are
interpreted as `[height, width, channels]`. The other dimensions only
represent a collection of images, such as `[batch, height, width, channels].`
Contrast is adjusted independently for each channel of each image.
For each channel, this Op computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
`contrast_factor` must be in the interval `(-inf, inf)`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_contrast(x, 2.)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[-3.5, -2.5, -1.5],
[ 2.5, 3.5, 4.5]],
[[ 8.5, 9.5, 10.5],
[14.5, 15.5, 16.5]]], dtype=float32)>
Args:
images: Images to adjust. At least 3-D.
contrast_factor: A float multiplier for adjusting contrast.
Returns:
The contrast-adjusted image or images.
"""
with ops.name_scope(None, 'adjust_contrast',
[images, contrast_factor]) as name:
images = ops.convert_to_tensor(images, name='images')
# Remember original dtype to so we can convert back if needed
orig_dtype = images.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_images = images
else:
flt_images = convert_image_dtype(images, dtypes.float32)
adjusted = gen_image_ops.adjust_contrastv2(
flt_images, contrast_factor=contrast_factor, name=name)
return convert_image_dtype(adjusted, orig_dtype, saturate=True)
@tf_export('image.adjust_gamma')
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def adjust_gamma(image, gamma=1, gain=1):
"""Performs [Gamma Correction](http://en.wikipedia.org/wiki/Gamma_correction).
on the input image.
Also known as Power Law Transform. This function converts the
input images at first to float representation, then transforms them
pixelwise according to the equation `Out = gain * In**gamma`,
and then converts the back to the original data type.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_gamma(x, 0.2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[1. , 1.1486983, 1.2457309],
[1.319508 , 1.3797297, 1.4309691]],
[[1.4757731, 1.5157166, 1.5518456],
[1.5848932, 1.6153942, 1.6437519]]], dtype=float32)>
Args:
image : RGB image or images to adjust.
gamma : A scalar or tensor. Non-negative real number.
gain : A scalar or tensor. The constant multiplier.
Returns:
A Tensor. A Gamma-adjusted tensor of the same shape and type as `image`.
Raises:
ValueError: If gamma is negative.
Notes:
For gamma greater than 1, the histogram will shift towards left and
the output image will be darker than the input image.
For gamma less than 1, the histogram will shift towards right and
the output image will be brighter than the input image.
References:
[Wikipedia](http://en.wikipedia.org/wiki/Gamma_correction)
"""
with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in [dtypes.float16, dtypes.float32]:
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
assert_op = _assert(gamma >= 0, ValueError,
'Gamma should be a non-negative real number.')
if assert_op:
gamma = control_flow_ops.with_dependencies(assert_op, gamma)
# According to the definition of gamma correction.
adjusted_img = gain * flt_image**gamma
return convert_image_dtype(adjusted_img, orig_dtype, saturate=True)
@tf_export('image.convert_image_dtype')
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def convert_image_dtype(image, dtype, saturate=False, name=None):
"""Convert `image` to `dtype`, scaling its values if needed.
The operation supports data types (for `image` and `dtype`) of
`uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `bfloat16`.
Images that are represented using floating point values are expected to have
values in the range [0,1). Image data stored in integer data types are
expected to have values in the range `[0,MAX]`, where `MAX` is the largest
positive representable number for the data type.
This op converts between data types, scaling the values appropriately before
casting.
Usage Example:
>>> x = [[[1, 2, 3], [4, 5, 6]],
... [[7, 8, 9], [10, 11, 12]]]
>>> x_int8 = tf.convert_to_tensor(x, dtype=tf.int8)
>>> tf.image.convert_image_dtype(x_int8, dtype=tf.float16, saturate=False)
<tf.Tensor: shape=(2, 2, 3), dtype=float16, numpy=
array([[[0.00787, 0.01575, 0.02362],
[0.0315 , 0.03937, 0.04724]],
[[0.0551 , 0.063 , 0.07086],
[0.07874, 0.0866 , 0.0945 ]]], dtype=float16)>
Converting integer types to floating point types returns normalized floating
point values in the range [0, 1); the values are normalized by the `MAX` value
of the input dtype. Consider the following two examples:
>>> a = [[[1], [2]], [[3], [4]]]
>>> a_int8 = tf.convert_to_tensor(a, dtype=tf.int8)
>>> tf.image.convert_image_dtype(a_int8, dtype=tf.float32)
<tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy=
array([[[0.00787402],
[0.01574803]],
[[0.02362205],
[0.03149606]]], dtype=float32)>
>>> a_int32 = tf.convert_to_tensor(a, dtype=tf.int32)
>>> tf.image.convert_image_dtype(a_int32, dtype=tf.float32)
<tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy=
array([[[4.6566129e-10],
[9.3132257e-10]],
[[1.3969839e-09],
[1.8626451e-09]]], dtype=float32)>
Despite having identical values of `a` and output dtype of `float32`, the
outputs differ due to the different input dtypes (`int8` vs. `int32`). This
is, again, because the values are normalized by the `MAX` value of the input
dtype.
Note that converting floating point values to integer type may lose precision.
In the example below, an image tensor `b` of dtype `float32` is converted to
`int8` and back to `float32`. The final output, however, is different from
the original input `b` due to precision loss.
>>> b = [[[0.12], [0.34]], [[0.56], [0.78]]]
>>> b_float32 = tf.convert_to_tensor(b, dtype=tf.float32)
>>> b_int8 = tf.image.convert_image_dtype(b_float32, dtype=tf.int8)
>>> tf.image.convert_image_dtype(b_int8, dtype=tf.float32)
<tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy=
array([[[0.11811024],
[0.33858266]],
[[0.5590551 ],
[0.77952754]]], dtype=float32)>
Scaling up from an integer type (input dtype) to another integer type (output
dtype) will not map input dtype's `MAX` to output dtype's `MAX` but converting
back and forth should result in no change. For example, as shown below, the
`MAX` value of int8 (=127) is not mapped to the `MAX` value of int16 (=32,767)
but, when scaled back, we get the same, original values of `c`.
>>> c = [[[1], [2]], [[127], [127]]]
>>> c_int8 = tf.convert_to_tensor(c, dtype=tf.int8)
>>> c_int16 = tf.image.convert_image_dtype(c_int8, dtype=tf.int16)
>>> print(c_int16)
tf.Tensor(
[[[ 256]
[ 512]]
[[32512]
[32512]]], shape=(2, 2, 1), dtype=int16)
>>> c_int8_back = tf.image.convert_image_dtype(c_int16, dtype=tf.int8)
>>> print(c_int8_back)
tf.Tensor(
[[[ 1]
[ 2]]
[[127]
[127]]], shape=(2, 2, 1), dtype=int8)
Scaling down from an integer type to another integer type can be a lossy
conversion. Notice in the example below that converting `int16` to `uint8` and
back to `int16` has lost precision.
>>> d = [[[1000], [2000]], [[3000], [4000]]]
>>> d_int16 = tf.convert_to_tensor(d, dtype=tf.int16)
>>> d_uint8 = tf.image.convert_image_dtype(d_int16, dtype=tf.uint8)
>>> d_int16_back = tf.image.convert_image_dtype(d_uint8, dtype=tf.int16)
>>> print(d_int16_back)
tf.Tensor(
[[[ 896]
[1920]]
[[2944]
[3968]]], shape=(2, 2, 1), dtype=int16)
Note that converting from floating point inputs to integer types may lead to
over/underflow problems. Set saturate to `True` to avoid such problem in
problematic conversions. If enabled, saturation will clip the output into the
allowed range before performing a potentially dangerous cast (and only before
performing such a cast, i.e., when casting from a floating point to an integer
type, and when casting from a signed to an unsigned type; `saturate` has no
effect on casts between floats, or on casts that increase the type's range).
Args:
image: An image.
dtype: A `DType` to convert `image` to.
saturate: If `True`, clip the input before casting (if necessary).
name: A name for this operation (optional).
Returns:
`image`, converted to `dtype`.
Raises:
AttributeError: Raises an attribute error when dtype is neither
float nor integer.
"""
image = ops.convert_to_tensor(image, name='image')
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise AttributeError('dtype must be either floating point or integer')
if not image.dtype.is_floating and not image.dtype.is_integer:
raise AttributeError('image dtype must be either floating point or integer')
if dtype == image.dtype:
return array_ops.identity(image, name=name)
with ops.name_scope(name, 'convert_image', [image]) as name:
# Both integer: use integer multiplication in the larger range
if image.dtype.is_integer and dtype.is_integer:
scale_in = image.dtype.max
scale_out = dtype.max
if scale_in > scale_out:
# Scaling down, scale first, then cast. The scaling factor will
# cause in.max to be mapped to above out.max but below out.max+1,
# so that the output is safely in the supported range.
scale = (scale_in + 1) // (scale_out + 1)
scaled = math_ops.floordiv(image, scale)
if saturate:
return math_ops.saturate_cast(scaled, dtype, name=name)
else:
return math_ops.cast(scaled, dtype, name=name)
else:
# Scaling up, cast first, then scale. The scale will not map in.max to
# out.max, but converting back and forth should result in no change.
if saturate:
cast = math_ops.saturate_cast(image, dtype)
else:
cast = math_ops.cast(image, dtype)
scale = (scale_out + 1) // (scale_in + 1)
return math_ops.multiply(cast, scale, name=name)
elif image.dtype.is_floating and dtype.is_floating:
# Both float: Just cast, no possible overflows in the allowed ranges.
# Note: We're ignoring float overflows. If your image dynamic range
# exceeds float range, you're on your own.
return math_ops.cast(image, dtype, name=name)
else:
if image.dtype.is_integer:
# Converting to float: first cast, then scale. No saturation possible.
cast = math_ops.cast(image, dtype)
scale = 1. / image.dtype.max
return math_ops.multiply(cast, scale, name=name)
else:
# Converting from float: first scale, then cast
scale = dtype.max + 0.5 # avoid rounding problems in the cast
scaled = math_ops.multiply(image, scale)
if saturate:
return math_ops.saturate_cast(scaled, dtype, name=name)
else:
return math_ops.cast(scaled, dtype, name=name)
@tf_export('image.rgb_to_grayscale')
@dispatch.add_dispatch_support
def rgb_to_grayscale(images, name=None):
"""Converts one or more images from RGB to Grayscale.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 1, containing the Grayscale value of the
pixels.
>>> original = tf.constant([[[1.0, 2.0, 3.0]]])
>>> converted = tf.image.rgb_to_grayscale(original)
>>> print(converted.numpy())
[[[1.81...]]]
Args:
images: The RGB tensor to convert. The last dimension must have size 3 and
should contain RGB values.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name:
images = ops.convert_to_tensor(images, name='images')
# Remember original dtype to so we can convert back if needed
orig_dtype = images.dtype
flt_image = convert_image_dtype(images, dtypes.float32)
# Reference for converting between RGB and grayscale.
# https://en.wikipedia.org/wiki/Luma_%28video%29
rgb_weights = [0.2989, 0.5870, 0.1140]
gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1])
gray_float = array_ops.expand_dims(gray_float, -1)
return convert_image_dtype(gray_float, orig_dtype, name=name)
@tf_export('image.grayscale_to_rgb')
@dispatch.add_dispatch_support
def grayscale_to_rgb(images, name=None):
"""Converts one or more images from Grayscale to RGB.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 3, containing the RGB value of the pixels.
The input images' last dimension must be size 1.
>>> original = tf.constant([[[1.0], [2.0], [3.0]]])
>>> converted = tf.image.grayscale_to_rgb(original)
>>> print(converted.numpy())
[[[1. 1. 1.]
[2. 2. 2.]
[3. 3. 3.]]]
Args:
images: The Grayscale tensor to convert. The last dimension must be size 1.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:
images = _AssertGrayscaleImage(images)
images = ops.convert_to_tensor(images, name='images')
rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)
shape_list = ([array_ops.ones(rank_1, dtype=dtypes.int32)] +
[array_ops.expand_dims(3, 0)])
multiples = array_ops.concat(shape_list, 0)
rgb = array_ops.tile(images, multiples, name=name)
rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
return rgb
# pylint: disable=invalid-name
@tf_export('image.random_hue')
@dispatch.add_dispatch_support
def random_hue(image, max_delta, seed=None):
"""Adjust the hue of RGB images by a random factor.
Equivalent to `adjust_hue()` but uses a `delta` randomly
picked in the interval `[-max_delta, max_delta)`.
`max_delta` must be in the interval `[0, 0.5]`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_hue(x, 0.2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_hue`. Unlike using the `seed` param with
`tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same
results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: RGB image or images. The size of the last dimension must be 3.
max_delta: float. The maximum value for the random delta.
seed: An operation-specific seed. It will be used in conjunction with the
graph-level seed to determine the real seeds that will be used in this
operation. Please see the documentation of set_random_seed for its
interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `max_delta` is invalid.
"""
if max_delta > 0.5:
raise ValueError('max_delta must be <= 0.5.')
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)
return adjust_hue(image, delta)
@tf_export('image.stateless_random_hue', v1=[])
@dispatch.add_dispatch_support
def stateless_random_hue(image, max_delta, seed):
"""Adjust the hue of RGB images by a random factor deterministically.
Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the
interval `[-max_delta, max_delta)`.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
`max_delta` must be in the interval `[0, 0.5]`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> seed = (1, 2)
>>> tf.image.stateless_random_hue(x, 0.2, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1.6514902, 1. , 3. ],
[ 4.65149 , 4. , 6. ]],
[[ 7.65149 , 7. , 9. ],
[10.65149 , 10. , 12. ]]], dtype=float32)>
Args:
image: RGB image or images. The size of the last dimension must be 3.
max_delta: float. The maximum value for the random delta.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `max_delta` is invalid.
"""
if max_delta > 0.5:
raise ValueError('max_delta must be <= 0.5.')
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = stateless_random_ops.stateless_random_uniform(
shape=[], minval=-max_delta, maxval=max_delta, seed=seed)
return adjust_hue(image, delta)
@tf_export('image.adjust_hue')
@dispatch.add_dispatch_support
def adjust_hue(image, delta, name=None):
"""Adjust hue of RGB images.
This is a convenience method that converts an RGB image to float
representation, converts it to HSV, adds an offset to the
hue channel, converts back to RGB and then back to the original
data type. If several adjustments are chained it is advisable to minimize
the number of redundant conversions.
`image` is an RGB image. The image hue is adjusted by converting the
image(s) to HSV and rotating the hue channel (H) by
`delta`. The image is then converted back to RGB.
`delta` must be in the interval `[-1, 1]`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_hue(x, 0.2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 2.3999996, 1. , 3. ],
[ 5.3999996, 4. , 6. ]],
[[ 8.4 , 7. , 9. ],
[11.4 , 10. , 12. ]]], dtype=float32)>
Args:
image: RGB image or images. The size of the last dimension must be 3.
delta: float. How much to add to the hue channel.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
InvalidArgumentError: image must have at least 3 dimensions.
InvalidArgumentError: The size of the last dimension must be 3.
ValueError: if `delta` is not in the interval of `[-1, 1]`.
Usage Example:
>>> image = [[[1, 2, 3], [4, 5, 6]],
... [[7, 8, 9], [10, 11, 12]],
... [[13, 14, 15], [16, 17, 18]]]
>>> image = tf.constant(image)
>>> tf.image.adjust_hue(image, 0.2)
<tf.Tensor: shape=(3, 2, 3), dtype=int32, numpy=
array([[[ 2, 1, 3],
[ 5, 4, 6]],
[[ 8, 7, 9],
[11, 10, 12]],
[[14, 13, 15],
[17, 16, 18]]], dtype=int32)>
"""
with ops.name_scope(name, 'adjust_hue', [image]) as name:
if context.executing_eagerly():
if delta < -1 or delta > 1:
raise ValueError('delta must be in the interval [-1, 1]')
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
rgb_altered = gen_image_ops.adjust_hue(flt_image, delta)
return convert_image_dtype(rgb_altered, orig_dtype)
# pylint: disable=invalid-name
@tf_export('image.random_jpeg_quality')
@dispatch.add_dispatch_support
def random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed=None):
"""Randomly changes jpeg encoding quality for inducing jpeg noise.
`min_jpeg_quality` must be in the interval `[0, 100]` and less than
`max_jpeg_quality`.
`max_jpeg_quality` must be in the interval `[0, 100]`.
Usage Example:
>>> x = tf.constant([[[1, 2, 3],
... [4, 5, 6]],
... [[7, 8, 9],
... [10, 11, 12]]], dtype=tf.uint8)
>>> tf.image.random_jpeg_quality(x, 75, 95)
<tf.Tensor: shape=(2, 2, 3), dtype=uint8, numpy=...>
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_jpeg_quality`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: 3D image. Size of the last dimension must be 1 or 3.
min_jpeg_quality: Minimum jpeg encoding quality to use.
max_jpeg_quality: Maximum jpeg encoding quality to use.
seed: An operation-specific seed. It will be used in conjunction with the
graph-level seed to determine the real seeds that will be used in this
operation. Please see the documentation of set_random_seed for its
interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid.
"""
if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or
max_jpeg_quality > 100):
raise ValueError('jpeg encoding range must be between 0 and 100.')
if min_jpeg_quality >= max_jpeg_quality:
raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')
jpeg_quality = random_ops.random_uniform([],
min_jpeg_quality,
max_jpeg_quality,
seed=seed,
dtype=dtypes.int32)
return adjust_jpeg_quality(image, jpeg_quality)
@tf_export('image.stateless_random_jpeg_quality', v1=[])
@dispatch.add_dispatch_support
def stateless_random_jpeg_quality(image,
min_jpeg_quality,
max_jpeg_quality,
seed):
"""Deterministically radomize jpeg encoding quality for inducing jpeg noise.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
`min_jpeg_quality` must be in the interval `[0, 100]` and less than
`max_jpeg_quality`.
`max_jpeg_quality` must be in the interval `[0, 100]`.
Usage Example:
>>> x = tf.constant([[[1, 2, 3],
... [4, 5, 6]],
... [[7, 8, 9],
... [10, 11, 12]]], dtype=tf.uint8)
>>> seed = (1, 2)
>>> tf.image.stateless_random_jpeg_quality(x, 75, 95, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=uint8, numpy=
array([[[ 0, 4, 5],
[ 1, 5, 6]],
[[ 5, 9, 10],
[ 5, 9, 10]]], dtype=uint8)>
Args:
image: 3D image. Size of the last dimension must be 1 or 3.
min_jpeg_quality: Minimum jpeg encoding quality to use.
max_jpeg_quality: Maximum jpeg encoding quality to use.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid.
"""
if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or
max_jpeg_quality > 100):
raise ValueError('jpeg encoding range must be between 0 and 100.')
if min_jpeg_quality >= max_jpeg_quality:
raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')
jpeg_quality = stateless_random_ops.stateless_random_uniform(
shape=[], minval=min_jpeg_quality, maxval=max_jpeg_quality, seed=seed,
dtype=dtypes.int32)
return adjust_jpeg_quality(image, jpeg_quality)
@tf_export('image.adjust_jpeg_quality')
@dispatch.add_dispatch_support
def adjust_jpeg_quality(image, jpeg_quality, dct_method='', name=None):
"""Adjust jpeg encoding quality of an image.
This is a convenience method that converts an image to uint8 representation,
encodes it to jpeg with `jpeg_quality`, decodes it, and then converts back
to the original data type.
`jpeg_quality` must be in the interval `[0, 100]`.
Usage Examples:
>>> x = [[[0.01, 0.02, 0.03],
... [0.04, 0.05, 0.06]],
... [[0.07, 0.08, 0.09],
... [0.10, 0.11, 0.12]]]
>>> x_jpeg = tf.image.adjust_jpeg_quality(x, 75)
>>> x_jpeg.numpy()
array([[[0.00392157, 0.01960784, 0.03137255],
[0.02745098, 0.04313726, 0.05490196]],
[[0.05882353, 0.07450981, 0.08627451],
[0.08235294, 0.09803922, 0.10980393]]], dtype=float32)
Note that floating point values are expected to have values in the range
[0,1) and values outside this range are clipped.
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_jpeg_quality(x, 75)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]], dtype=float32)>
Note that `jpeg_quality` 100 is still lossy compression.
>>> x = tf.constant([[[1, 2, 3],
... [4, 5, 6]],
... [[7, 8, 9],
... [10, 11, 12]]], dtype=tf.uint8)
>>> tf.image.adjust_jpeg_quality(x, 100)
<tf.Tensor: shape(2, 2, 3), dtype=uint8, numpy=
array([[[ 0, 1, 3],
[ 3, 4, 6]],
[[ 6, 7, 9],
[ 9, 10, 12]]], dtype=uint8)>
Args:
image: 3D image. The size of the last dimension must be None, 1 or 3.
jpeg_quality: Python int or Tensor of type int32. jpeg encoding quality.
dct_method: An optional string. Specifies the DCT method to use for JPEG
decompression. Currently available options are ["INTEGER_FAST",
"INTEGER_ACCURATE"]. Defaults to "" which maps to "INTEGER_FAST",
sacrificing image quality for speed.
name: A name for this operation (optional).
Returns:
Adjusted image, same shape and DType as `image`.
Raises:
InvalidArgumentError: quality must be in [0,100]
InvalidArgumentError: image must have 1 or 3 channels
"""
with ops.name_scope(name, 'adjust_jpeg_quality', [image]):
image = ops.convert_to_tensor(image, name='image')
channels = image.shape.as_list()[-1]
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
image = convert_image_dtype(image, dtypes.uint8, saturate=True)
if not _is_tensor(jpeg_quality):
# If jpeg_quality is a int (not tensor).
jpeg_quality = ops.convert_to_tensor(jpeg_quality, dtype=dtypes.int32)
image = gen_image_ops.encode_jpeg_variable_quality(image, jpeg_quality)
image = gen_image_ops.decode_jpeg(
image, channels=channels, dct_method=dct_method
)
return convert_image_dtype(image, orig_dtype, saturate=True)
@tf_export('image.random_saturation')
@dispatch.add_dispatch_support
def random_saturation(image, lower, upper, seed=None):
"""Adjust the saturation of RGB images by a random factor.
Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly
picked in the interval `[lower, upper)`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_saturation(x, 5, 10)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 0. , 1.5, 3. ],
[ 0. , 3. , 6. ]],
[[ 0. , 4.5, 9. ],
[ 0. , 6. , 12. ]]], dtype=float32)>
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_saturation`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: RGB image or images. The size of the last dimension must be 3.
lower: float. Lower bound for the random saturation factor.
upper: float. Upper bound for the random saturation factor.
seed: An operation-specific seed. It will be used in conjunction with the
graph-level seed to determine the real seeds that will be used in this
operation. Please see the documentation of set_random_seed for its
interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed)
return adjust_saturation(image, saturation_factor)
@tf_export('image.stateless_random_saturation', v1=[])
@dispatch.add_dispatch_support
def stateless_random_saturation(image, lower, upper, seed=None):
"""Adjust the saturation of RGB images by a random factor deterministically.
Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly
picked in the interval `[lower, upper)`.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> seed = (1, 2)
>>> tf.image.stateless_random_saturation(x, 0.5, 1.0, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1.1559395, 2.0779698, 3. ],
[ 4.1559396, 5.07797 , 6. ]],
[[ 7.1559396, 8.07797 , 9. ],
[10.155939 , 11.07797 , 12. ]]], dtype=float32)>
Args:
image: RGB image or images. The size of the last dimension must be 3.
lower: float. Lower bound for the random saturation factor.
upper: float. Upper bound for the random saturation factor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
saturation_factor = stateless_random_ops.stateless_random_uniform(
shape=[], minval=lower, maxval=upper, seed=seed)
return adjust_saturation(image, saturation_factor)
@tf_export('image.adjust_saturation')
@dispatch.add_dispatch_support
def adjust_saturation(image, saturation_factor, name=None):
"""Adjust saturation of RGB images.
This is a convenience method that converts RGB images to float
representation, converts them to HSV, adds an offset to the
saturation channel, converts back to RGB and then back to the original
data type. If several adjustments are chained it is advisable to minimize
the number of redundant conversions.
`image` is an RGB image or images. The image saturation is adjusted by
converting the images to HSV and multiplying the saturation (S) channel by
`saturation_factor` and clipping. The images are then converted back to RGB.
`saturation_factor` must be in the interval `[0, inf)`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_saturation(x, 0.5)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 2. , 2.5, 3. ],
[ 5. , 5.5, 6. ]],
[[ 8. , 8.5, 9. ],
[11. , 11.5, 12. ]]], dtype=float32)>
Args:
image: RGB image or images. The size of the last dimension must be 3.
saturation_factor: float. Factor to multiply the saturation by.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
InvalidArgumentError: input must have 3 channels
"""
with ops.name_scope(name, 'adjust_saturation', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
adjusted = gen_image_ops.adjust_saturation(flt_image, saturation_factor)
return convert_image_dtype(adjusted, orig_dtype)
@tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg'])
def is_jpeg(contents, name=None):
r"""Convenience function to check if the 'contents' encodes a JPEG image.
Args:
contents: 0-D `string`. The encoded image bytes.
name: A name for the operation (optional)
Returns:
A scalar boolean tensor indicating if 'contents' may be a JPEG image.
is_jpeg is susceptible to false positives.
"""
# Normal JPEGs start with \xff\xd8\xff\xe0
# JPEG with EXIF starts with \xff\xd8\xff\xe1
# Use \xff\xd8\xff to cover both.
with ops.name_scope(name, 'is_jpeg'):
substr = string_ops.substr(contents, 0, 3)
return math_ops.equal(substr, b'\xff\xd8\xff', name=name)
def _is_png(contents, name=None):
r"""Convenience function to check if the 'contents' encodes a PNG image.
Args:
contents: 0-D `string`. The encoded image bytes.
name: A name for the operation (optional)
Returns:
A scalar boolean tensor indicating if 'contents' may be a PNG image.
is_png is susceptible to false positives.
"""
with ops.name_scope(name, 'is_png'):
substr = string_ops.substr(contents, 0, 3)
return math_ops.equal(substr, b'\211PN', name=name)
decode_and_crop_jpeg = tf_export(
'io.decode_and_crop_jpeg',
'image.decode_and_crop_jpeg',
v1=['io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg'])(
dispatch.add_dispatch_support(gen_image_ops.decode_and_crop_jpeg))
decode_bmp = tf_export(
'io.decode_bmp',
'image.decode_bmp',
v1=['io.decode_bmp', 'image.decode_bmp'])(
dispatch.add_dispatch_support(gen_image_ops.decode_bmp))
decode_gif = tf_export(
'io.decode_gif',
'image.decode_gif',
v1=['io.decode_gif', 'image.decode_gif'])(
dispatch.add_dispatch_support(gen_image_ops.decode_gif))
decode_jpeg = tf_export(
'io.decode_jpeg',
'image.decode_jpeg',
v1=['io.decode_jpeg', 'image.decode_jpeg'])(
dispatch.add_dispatch_support(gen_image_ops.decode_jpeg))
decode_png = tf_export(
'io.decode_png',
'image.decode_png',
v1=['io.decode_png', 'image.decode_png'])(
dispatch.add_dispatch_support(gen_image_ops.decode_png))
decode_webp = tf_export(
'io.decode_webp',
'image.decode_webp',
v1=['io.decode_webp', 'image.decode_webp'],
)(dispatch.add_dispatch_support(gen_image_ops.decode_web_p))
encode_jpeg = tf_export(
'io.encode_jpeg',
'image.encode_jpeg',
v1=['io.encode_jpeg', 'image.encode_jpeg'])(
dispatch.add_dispatch_support(gen_image_ops.encode_jpeg))
extract_jpeg_shape = tf_export(
'io.extract_jpeg_shape',
'image.extract_jpeg_shape',
v1=['io.extract_jpeg_shape', 'image.extract_jpeg_shape'])(
dispatch.add_dispatch_support(gen_image_ops.extract_jpeg_shape))
@tf_export('io.encode_png', 'image.encode_png')
@dispatch.add_dispatch_support
def encode_png(image, compression=-1, name=None):
r"""PNG-encode an image.
`image` is a rank-N Tensor of type uint8 or uint16 with shape `batch_dims +
[height, width, channels]`, where `channels` is:
* 1: for grayscale.
* 2: for grayscale + alpha.
* 3: for RGB.
* 4: for RGBA.
The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
default or a value from 0 to 9. 9 is the highest compression level,
generating the smallest output, but is slower.
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `uint16`.
Rank N >= 3 with shape `batch_dims + [height, width, channels]`.
compression: An optional `int`. Defaults to `-1`. Compression level.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
return gen_image_ops.encode_png(
ops.convert_to_tensor(image), compression, name)
@tf_export(
'io.decode_image',
'image.decode_image',
v1=['io.decode_image', 'image.decode_image'])
@dispatch.add_dispatch_support
def decode_image(contents,
channels=None,
dtype=dtypes.uint8,
name=None,
expand_animations=True):
"""Function for `decode_bmp`, `decode_gif`, `decode_jpeg`, and `decode_png`.
Detects whether an image is a BMP, GIF, JPEG, WebP or PNG, and performs the
appropriate operation to convert the input bytes `string` into a `Tensor`
of type `dtype`.
Note: `decode_gif` and `decode_webp` return a 4-D array of
`[num_frames, height, width, 3]`, as opposed to the other image
formats which always return 3-D arrays of the form `[height, width,
num_channels]`. Make sure to take this into account when
constructing your graph if you are intermixing animation with static
images. Alternately, set the `expand_animations` argument of this
function to `False`, in which case the op will return 3-dimensional
tensors and will truncate animations to the first frame.
NOTE: If the first frame of an animated GIF does not occupy the entire
canvas (maximum frame width x maximum frame height), then it fills the
unoccupied areas (in the first frame) with zeros (black). For frames after the
first frame that does not occupy the entire canvas, it uses the previous
frame to fill the unoccupied areas.
Args:
contents: A `Tensor` of type `string`. 0-D. The encoded image bytes.
channels: An optional `int`. Defaults to `0`. Number of color channels for
the decoded image.
dtype: The desired DType of the returned `Tensor`.
name: A name for the operation (optional)
expand_animations: An optional `bool`. Defaults to `True`. Controls the
shape of the returned op's output. If `True`, the returned op will produce
a 4-D tensor for all GIFs and WebP images, animated or not, and a 3-D
tensor in all other cases. If, `False`, the returned op will produce a 3-D
tensor for all file types and will truncate animations to the first frame.
Returns:
`Tensor` with type `dtype` and a 3- or 4-dimensional shape, depending on
the file type and the value of the `expand_animations` parameter.
Raises:
ValueError: On incorrect number of channels.
"""
with ops.name_scope(name, 'decode_image'):
channels = 0 if channels is None else channels
original_dtype = dtype
if dtype not in [dtypes.float32, dtypes.uint8, dtypes.uint16]:
dtype_for_decode = dtypes.uint16
else:
dtype_for_decode = dtype
image = gen_image_ops.decode_image(
contents=contents,
channels=channels,
expand_animations=expand_animations,
dtype=dtype_for_decode,
)
if dtype_for_decode != original_dtype:
image = convert_image_dtype(image, original_dtype)
if not expand_animations:
# If not expanding animations, the output is always 3D.
if channels != 0:
image.set_shape([None, None, channels])
else:
# We can still set the rank, which is what matters for the resize op.
image.set_shape([None, None, None])
return image
@tf_export('image.total_variation')
@dispatch.add_dispatch_support
def total_variation(images, name=None):
"""Calculate and return the total variation for one or more images.
The total variation is the sum of the absolute differences for neighboring
pixel-values in the input images. This measures how much noise is in the
images.
This can be used as a loss-function during optimization so as to suppress
noise in images. If you have a batch of images, then you should calculate
the scalar loss-value as the sum:
`loss = tf.reduce_sum(tf.image.total_variation(images))`
This implements the anisotropic 2-D version of the formula described here:
https://en.wikipedia.org/wiki/Total_variation_denoising
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
name: A name for the operation (optional).
Raises:
ValueError: if images.shape is not a 3-D or 4-D vector.
Returns:
The total variation of `images`.
If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the
total variation for each image in the batch.
If `images` was 3-D, return a scalar float with the total variation for
that image.
"""
with ops.name_scope(name, 'total_variation'):
ndims = images.get_shape().ndims
if ndims == 3:
# The input is a single image with shape [height, width, channels].
# Calculate the difference of neighboring pixel-values.
# The images are shifted one pixel along the height and width by slicing.
pixel_dif1 = images[1:, :, :] - images[:-1, :, :]
pixel_dif2 = images[:, 1:, :] - images[:, :-1, :]
# Sum for all axis. (None is an alias for all axis.)
sum_axis = None
elif ndims == 4:
# The input is a batch of images with shape:
# [batch, height, width, channels].
# Calculate the difference of neighboring pixel-values.
# The images are shifted one pixel along the height and width by slicing.
pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :]
pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :]
# Only sum for the last 3 axis.
# This results in a 1-D tensor with the total variation for each image.
sum_axis = [1, 2, 3]
else:
raise ValueError('\'images\' must be either 3 or 4-dimensional.')
# Calculate the total variation by taking the absolute value of the
# pixel-differences and summing over the appropriate axis.
tot_var = (
math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) +
math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis))
return tot_var
@tf_export('image.sample_distorted_bounding_box', v1=[])
@dispatch.add_dispatch_support
def sample_distorted_bounding_box_v2(image_size,
bounding_boxes,
seed=0,
min_object_covered=0.1,
aspect_ratio_range=None,
area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
"""Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op outputs a randomly distorted
localization of an object, i.e. bounding box, given an `image_size`,
`bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to
visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and the height of the underlying image.
For example,
```python
# Generate a single distorted bounding box.
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bounding_boxes,
min_object_covered=0.1)
# Draw the bounding box in an image summary.
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox_for_draw)
tf.compat.v1.summary.image('images_with_box', image_with_box)
# Employ the bounding box to distort the image.
distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
For producing deterministic results given a `seed` value, use
`tf.image.stateless_sample_distorted_bounding_box`. Unlike using the `seed`
param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops
guarantee the same results given the same seed independent of how many times
the function is called, and independent of global seed settings
(e.g. tf.random.set_seed).
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`
describing the N bounding boxes associated with the image.
seed: An optional `int`. Defaults to `0`. If `seed` is set to non-zero, the
random number generator is seeded by the given `seed`. Otherwise, it is
seeded by a random seed.
min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The
cropped area of the image must contain at least this fraction of any
bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,
1.33]`. The cropped area of the image must have an aspect `ratio = width /
height` within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The
cropped area of the image must contain a fraction of the supplied image
within this range.
max_attempts: An optional `int`. Defaults to `100`. Number of attempts at
generating a cropped region of the image of the specified constraints.
After `max_attempts` failures, return the entire image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied. If true, assume an
implicit bounding box covering the whole input. If false, raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing
the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
Raises:
ValueError: If no seed is specified and op determinism is enabled.
"""
if seed:
seed1, seed2 = random_seed.get_seed(seed)
else:
if config.is_op_determinism_enabled():
raise ValueError(
f'tf.image.sample_distorted_bounding_box requires a non-zero seed to '
f'be passed in when determinism is enabled, but got seed={seed}. '
f'Please pass in a non-zero seed, e.g. by passing "seed=1".')
seed1, seed2 = (0, 0)
with ops.name_scope(name, 'sample_distorted_bounding_box'):
return gen_image_ops.sample_distorted_bounding_box_v2(
image_size,
bounding_boxes,
seed=seed1,
seed2=seed2,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,
name=name)
@tf_export('image.stateless_sample_distorted_bounding_box', v1=[])
@dispatch.add_dispatch_support
def stateless_sample_distorted_bounding_box(image_size,
bounding_boxes,
seed,
min_object_covered=0.1,
aspect_ratio_range=None,
area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
"""Generate a randomly distorted bounding box for an image deterministically.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op, given the same `seed`,
deterministically outputs a randomly distorted localization of an object, i.e.
bounding box, given an `image_size`, `bounding_boxes` and a series of
constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to
visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and the height of the underlying image.
The output of this Op is guaranteed to be the same given the same `seed` and
is independent of how many times the function is called, and independent of
global seed settings (e.g. `tf.random.set_seed`).
Example usage:
>>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]])
>>> bbox = tf.constant(
... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
>>> seed = (1, 2)
>>> # Generate a single distorted bounding box.
>>> bbox_begin, bbox_size, bbox_draw = (
... tf.image.stateless_sample_distorted_bounding_box(
... tf.shape(image), bounding_boxes=bbox, seed=seed))
>>> # Employ the bounding box to distort the image.
>>> tf.slice(image, bbox_begin, bbox_size)
<tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy=
array([[[1],
[2]],
[[4],
[5]]])>
>>> # Draw the bounding box in an image summary.
>>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
>>> tf.image.draw_bounding_boxes(
... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors)
<tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
array([[[[1.],
[1.],
[3.]],
[[1.],
[1.],
[6.]],
[[7.],
[8.],
[9.]]]], dtype=float32)>
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`
describing the N bounding boxes associated with the image.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The
cropped area of the image must contain at least this fraction of any
bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,
1.33]`. The cropped area of the image must have an aspect `ratio = width /
height` within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The
cropped area of the image must contain a fraction of the supplied image
within this range.
max_attempts: An optional `int`. Defaults to `100`. Number of attempts at
generating a cropped region of the image of the specified constraints.
After `max_attempts` failures, return the entire image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied. If true, assume an
implicit bounding box covering the whole input. If false, raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing
the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
"""
with ops.name_scope(name, 'stateless_sample_distorted_bounding_box'):
return gen_image_ops.stateless_sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_boxes,
seed=seed,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,
name=name)
@tf_export(v1=['image.sample_distorted_bounding_box'])
@dispatch.add_dispatch_support
@deprecation.deprecated(
date=None,
instructions='`seed2` arg is deprecated.'
'Use sample_distorted_bounding_box_v2 instead.')
def sample_distorted_bounding_box(image_size,
bounding_boxes,
seed=None,
seed2=None,
min_object_covered=0.1,
aspect_ratio_range=None,
area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
"""Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op outputs a randomly distorted
localization of an object, i.e. bounding box, given an `image_size`,
`bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to
visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.
The
bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
height of the underlying image.
For example,
```python
# Generate a single distorted bounding box.
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bounding_boxes,
min_object_covered=0.1)
# Draw the bounding box in an image summary.
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox_for_draw)
tf.compat.v1.summary.image('images_with_box', image_with_box)
# Employ the bounding box to distort the image.
distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = True` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`
describing the N bounding boxes associated with the image.
seed: An optional `int`. Defaults to `0`. If either `seed` or `seed2` are
set to non-zero, the random number generator is seeded by the given
`seed`. Otherwise, it is seeded by a random seed.
seed2: An optional `int`. Defaults to `0`. A second seed to avoid seed
collision.
min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The
cropped area of the image must contain at least this fraction of any
bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,
1.33]`. The cropped area of the image must have an aspect ratio = width /
height within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The
cropped area of the image must contain a fraction of the supplied image
within this range.
max_attempts: An optional `int`. Defaults to `100`. Number of attempts at
generating a cropped region of the image of the specified constraints.
After `max_attempts` failures, return the entire image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied. If true, assume an
implicit bounding box covering the whole input. If false, raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing
the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
Raises:
ValueError: If no seed is specified and op determinism is enabled.
"""
if not seed and not seed2 and config.is_op_determinism_enabled():
raise ValueError(
f'tf.compat.v1.image.sample_distorted_bounding_box requires "seed" or '
f'"seed2" to be non-zero when determinism is enabled. Please pass in '
f'a non-zero seed, e.g. by passing "seed=1". Got seed={seed} and '
f"seed2={seed2}")
with ops.name_scope(name, 'sample_distorted_bounding_box'):
return gen_image_ops.sample_distorted_bounding_box_v2(
image_size,
bounding_boxes,
seed=seed,
seed2=seed2,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,
name=name)
@tf_export('image.non_max_suppression')
@dispatch.add_dispatch_support
def non_max_suppression(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
`[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm
is agnostic to where the origin is in the coordinate system. Note that this
algorithm is invariant to orthogonal transformations and translations
of the coordinate system; thus translating or reflections of the coordinate
system result in the same boxes being selected by the algorithm.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather` operation. For example:
```python
selected_indices = tf.image.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
selected_boxes = tf.gather(boxes, selected_indices)
```
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non-max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding
whether boxes overlap too much with respect to IOU.
score_threshold: A 0-D float tensor representing the threshold for deciding
when to remove boxes based on score.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the boxes tensor, where `M <= max_output_size`.
"""
with ops.name_scope(name, 'non_max_suppression'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size,
iou_threshold, score_threshold)
@tf_export('image.non_max_suppression_with_scores')
@dispatch.add_dispatch_support
def non_max_suppression_with_scores(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
soft_nms_sigma=0.0,
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
`[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm
is agnostic to where the origin is in the coordinate system. Note that this
algorithm is invariant to orthogonal transformations and translations
of the coordinate system; thus translating or reflections of the coordinate
system result in the same boxes being selected by the algorithm.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather` operation. For example:
```python
selected_indices, selected_scores = tf.image.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold=1.0, score_threshold=0.1,
soft_nms_sigma=0.5)
selected_boxes = tf.gather(boxes, selected_indices)
```
This function generalizes the `tf.image.non_max_suppression` op by also
supporting a Soft-NMS (with Gaussian weighting) mode (c.f.
Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
of other overlapping boxes instead of directly causing them to be pruned.
Consequently, in contrast to `tf.image.non_max_suppression`,
`tf.image.non_max_suppression_with_scores` returns the new scores of each
input box in the second output, `selected_scores`.
To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
larger than 0. When `soft_nms_sigma` equals 0, the behavior of
`tf.image.non_max_suppression_with_scores` is identical to that of
`tf.image.non_max_suppression` (except for the extra output) both in function
and in running time.
Note that when `soft_nms_sigma` > 0, Soft-NMS is performed and `iou_threshold`
is ignored. `iou_threshold` is only used for standard NMS.
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non-max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding
whether boxes overlap too much with respect to IOU.
score_threshold: A 0-D float tensor representing the threshold for deciding
when to remove boxes based on score.
soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft
NMS; see Bodla et al (c.f. https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the boxes tensor, where `M <= max_output_size`.
selected_scores: A 1-D float tensor of shape `[M]` representing the
corresponding scores for each selected box, where `M <= max_output_size`.
Scores only differ from corresponding input scores when using Soft NMS
(i.e. when `soft_nms_sigma>0`)
"""
with ops.name_scope(name, 'non_max_suppression_with_scores'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
soft_nms_sigma = ops.convert_to_tensor(
soft_nms_sigma, name='soft_nms_sigma')
(selected_indices, selected_scores,
_) = gen_image_ops.non_max_suppression_v5(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma,
pad_to_max_output_size=False)
return selected_indices, selected_scores
@tf_export('image.non_max_suppression_overlaps')
@dispatch.add_dispatch_support
def non_max_suppression_with_overlaps(overlaps,
scores,
max_output_size,
overlap_threshold=0.5,
score_threshold=float('-inf'),
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Prunes away boxes that have high overlap with previously selected boxes.
N-by-n overlap values are supplied as square matrix.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather` operation. For example:
```python
selected_indices = tf.image.non_max_suppression_overlaps(
overlaps, scores, max_output_size, iou_threshold)
selected_boxes = tf.gather(boxes, selected_indices)
```
Args:
overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]`
representing the n-by-n box overlap values.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non-max suppression.
overlap_threshold: A 0-D float tensor representing the threshold for
deciding whether boxes overlap too much with respect to the provided
overlap values.
score_threshold: A 0-D float tensor representing the threshold for deciding
when to remove boxes based on score.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the overlaps tensor, where `M <= max_output_size`.
"""
with ops.name_scope(name, 'non_max_suppression_overlaps'):
overlap_threshold = ops.convert_to_tensor(
overlap_threshold, name='overlap_threshold')
# pylint: disable=protected-access
return gen_image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
# pylint: enable=protected-access
_rgb_to_yiq_kernel = [[0.299, 0.59590059, 0.2115],
[0.587, -0.27455667, -0.52273617],
[0.114, -0.32134392, 0.31119955]]
@tf_export('image.rgb_to_yiq')
@dispatch.add_dispatch_support
def rgb_to_yiq(images):
"""Converts one or more images from RGB to YIQ.
Outputs a tensor of the same shape as the `images` tensor, containing the YIQ
value of the pixels.
The output is only well defined if the value in images are in [0,1].
Usage Example:
>>> x = tf.constant([[[1.0, 2.0, 3.0]]])
>>> tf.image.rgb_to_yiq(x)
<tf.Tensor: shape=(1, 1, 3), dtype=float32,
numpy=array([[[ 1.815 , -0.91724455, 0.09962624]]], dtype=float32)>
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_rgb_to_yiq_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_yiq_to_rgb_kernel = [[1, 1, 1], [0.95598634, -0.27201283, -1.10674021],
[0.6208248, -0.64720424, 1.70423049]]
@tf_export('image.yiq_to_rgb')
@dispatch.add_dispatch_support
def yiq_to_rgb(images):
"""Converts one or more images from YIQ to RGB.
Outputs a tensor of the same shape as the `images` tensor, containing the RGB
value of the pixels.
The output is only well defined if the Y value in images are in [0,1],
I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226].
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_yiq_to_rgb_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_rgb_to_yuv_kernel = [[0.299, -0.14714119, 0.61497538],
[0.587, -0.28886916, -0.51496512],
[0.114, 0.43601035, -0.10001026]]
@tf_export('image.rgb_to_yuv')
@dispatch.add_dispatch_support
def rgb_to_yuv(images):
"""Converts one or more images from RGB to YUV.
Outputs a tensor of the same shape as the `images` tensor, containing the YUV
value of the pixels.
The output is only well defined if the value in images are in [0, 1].
There are two ways of representing an image: [0, 255] pixel values range or
[0, 1] (as float) pixel values range. Users need to convert the input image
into a float [0, 1] range.
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_rgb_to_yuv_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_yuv_to_rgb_kernel = [[1, 1, 1], [0, -0.394642334, 2.03206185],
[1.13988303, -0.58062185, 0]]
@tf_export('image.yuv_to_rgb')
@dispatch.add_dispatch_support
def yuv_to_rgb(images):
"""Converts one or more images from YUV to RGB.
Outputs a tensor of the same shape as the `images` tensor, containing the RGB
value of the pixels.
The output is only well defined if the Y value in images are in [0,1],
U and V value are in [-0.5,0.5].
As per the above description, you need to scale your YUV images if their
pixel values are not in the required range. Below given example illustrates
preprocessing of each channel of images before feeding them to `yuv_to_rgb`.
```python
yuv_images = tf.random.uniform(shape=[100, 64, 64, 3], maxval=255)
last_dimension_axis = len(yuv_images.shape) - 1
yuv_tensor_images = tf.truediv(
tf.subtract(
yuv_images,
tf.reduce_min(yuv_images)
),
tf.subtract(
tf.reduce_max(yuv_images),
tf.reduce_min(yuv_images)
)
)
y, u, v = tf.split(yuv_tensor_images, 3, axis=last_dimension_axis)
target_uv_min, target_uv_max = -0.5, 0.5
u = u * (target_uv_max - target_uv_min) + target_uv_min
v = v * (target_uv_max - target_uv_min) + target_uv_min
preprocessed_yuv_images = tf.concat([y, u, v], axis=last_dimension_axis)
rgb_tensor_images = tf.image.yuv_to_rgb(preprocessed_yuv_images)
```
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_yuv_to_rgb_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
def _verify_compatible_image_shapes(img1, img2):
"""Checks if two image tensors are compatible for applying SSIM or PSNR.
This function checks if two sets of images have ranks at least 3, and if the
last three dimensions match.
Args:
img1: Tensor containing the first image batch.
img2: Tensor containing the second image batch.
Returns:
A tuple containing: the first tensor shape, the second tensor shape, and a
list of control_flow_ops.Assert() ops implementing the checks.
Raises:
ValueError: When static shape check fails.
"""
shape1 = img1.get_shape().with_rank_at_least(3)
shape2 = img2.get_shape().with_rank_at_least(3)
shape1[-3:].assert_is_compatible_with(shape2[-3:])
if shape1.ndims is not None and shape2.ndims is not None:
for dim1, dim2 in zip(
reversed(shape1.dims[:-3]), reversed(shape2.dims[:-3])):
if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)):
raise ValueError('Two images are not compatible: %s and %s' %
(shape1, shape2))
# Now assign shape tensors.
shape1, shape2 = array_ops.shape_n([img1, img2])
# TODO(sjhwang): Check if shape1[:-3] and shape2[:-3] are broadcastable.
checks = []
checks.append(
control_flow_assert.Assert(
math_ops.greater_equal(array_ops.size(shape1), 3), [shape1, shape2],
summarize=10))
checks.append(
control_flow_assert.Assert(
math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])),
[shape1, shape2],
summarize=10))
return shape1, shape2, checks
@tf_export('image.psnr')
@dispatch.add_dispatch_support
def psnr(a, b, max_val, name=None):
"""Returns the Peak Signal-to-Noise Ratio between a and b.
This is intended to be used on signals (or images). Produces a PSNR value for
each image in batch.
The last three dimensions of input are expected to be [height, width, depth].
Example:
```python
# Read images from file.
im1 = tf.decode_png('path/to/im1.png')
im2 = tf.decode_png('path/to/im2.png')
# Compute PSNR over tf.uint8 Tensors.
psnr1 = tf.image.psnr(im1, im2, max_val=255)
# Compute PSNR over tf.float32 Tensors.
im1 = tf.image.convert_image_dtype(im1, tf.float32)
im2 = tf.image.convert_image_dtype(im2, tf.float32)
psnr2 = tf.image.psnr(im1, im2, max_val=1.0)
# psnr1 and psnr2 both have type tf.float32 and are almost equal.
```
Args:
a: First set of images.
b: Second set of images.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
name: Namespace to embed the computation in.
Returns:
The scalar PSNR between a and b. The returned tensor has type `tf.float32`
and shape [batch_size, 1].
"""
with ops.name_scope(name, 'PSNR', [a, b]):
# Need to convert the images to float32. Scale max_val accordingly so that
# PSNR is computed correctly.
max_val = math_ops.cast(max_val, a.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
a = convert_image_dtype(a, dtypes.float32)
b = convert_image_dtype(b, dtypes.float32)
mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1])
psnr_val = math_ops.subtract(
20 * math_ops.log(max_val) / math_ops.log(10.0),
np.float32(10 / np.log(10)) * math_ops.log(mse),
name='psnr')
_, _, checks = _verify_compatible_image_shapes(a, b)
with ops.control_dependencies(checks):
return array_ops.identity(psnr_val)
def _ssim_helper(x, y, reducer, max_val, compensation=1.0, k1=0.01, k2=0.03):
r"""Helper function for computing SSIM.
SSIM estimates covariances with weighted sums. The default parameters
use a biased estimate of the covariance:
Suppose `reducer` is a weighted sum, then the mean estimators are
\mu_x = \sum_i w_i x_i,
\mu_y = \sum_i w_i y_i,
where w_i's are the weighted-sum weights, and covariance estimator is
cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y)
with assumption \sum_i w_i = 1. This covariance estimator is biased, since
E[cov_{xy}] = (1 - \sum_i w_i ^ 2) Cov(X, Y).
For SSIM measure with unbiased covariance estimators, pass as `compensation`
argument (1 - \sum_i w_i ^ 2).
Args:
x: First set of images.
y: Second set of images.
reducer: Function that computes 'local' averages from the set of images. For
non-convolutional version, this is usually tf.reduce_mean(x, [1, 2]), and
for convolutional version, this is usually tf.nn.avg_pool2d or
tf.nn.conv2d with weighted-sum kernel.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
compensation: Compensation factor. See above.
k1: Default value 0.01
k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so
it would be better if we took the values in the range of 0 < K2 < 0.4).
Returns:
A pair containing the luminance measure, and the contrast-structure measure.
"""
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
# SSIM luminance measure is
# (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1).
mean0 = reducer(x)
mean1 = reducer(y)
num0 = mean0 * mean1 * 2.0
den0 = math_ops.square(mean0) + math_ops.square(mean1)
luminance = (num0 + c1) / (den0 + c1)
# SSIM contrast-structure measure is
# (2 * cov_{xy} + c2) / (cov_{xx} + cov_{yy} + c2).
# Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then
# cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y)
# = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j).
num1 = reducer(x * y) * 2.0
den1 = reducer(math_ops.square(x) + math_ops.square(y))
c2 *= compensation
cs = (num1 - num0 + c2) / (den1 - den0 + c2)
# SSIM score is the product of the luminance and contrast-structure measures.
return luminance, cs
def _fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
size = ops.convert_to_tensor(size, dtypes.int32)
sigma = ops.convert_to_tensor(sigma)
coords = math_ops.cast(math_ops.range(size), sigma.dtype)
coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0
g = math_ops.square(coords)
g *= -0.5 / math_ops.square(sigma)
g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1])
g = array_ops.reshape(g, shape=[1, -1]) # For tf.nn.softmax().
g = nn_ops.softmax(g)
return array_ops.reshape(g, shape=[size, size, 1, 1])
def _ssim_per_channel(img1,
img2,
max_val=1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_index_map=False):
"""Computes SSIM index between img1 and img2 per color channel.
This function matches the standard SSIM implementation from:
Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image
quality assessment: from error visibility to structural similarity. IEEE
transactions on image processing.
Details:
- 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
Args:
img1: First image batch.
img2: Second image batch.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Default value 11 (size of gaussian filter).
filter_sigma: Default value 1.5 (width of gaussian filter).
k1: Default value 0.01
k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so
it would be better if we took the values in the range of 0 < K2 < 0.4).
return_index_map: If True returns local SSIM map instead of the global mean.
Returns:
A pair of tensors containing and channel-wise SSIM and contrast-structure
values. The shape is [..., channels].
"""
filter_size = constant_op.constant(filter_size, dtype=dtypes.int32)
filter_sigma = constant_op.constant(filter_sigma, dtype=img1.dtype)
shape1, shape2 = array_ops.shape_n([img1, img2])
checks = [
control_flow_assert.Assert(
math_ops.reduce_all(
math_ops.greater_equal(shape1[-3:-1], filter_size)),
[shape1, filter_size],
summarize=8),
control_flow_assert.Assert(
math_ops.reduce_all(
math_ops.greater_equal(shape2[-3:-1], filter_size)),
[shape2, filter_size],
summarize=8)
]
# Enforce the check to run before computation.
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# TODO(sjhwang): Try to cache kernels and compensation factor.
kernel = _fspecial_gauss(filter_size, filter_sigma)
kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1])
# The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`,
# but to match MATLAB implementation of MS-SSIM, we use 1.0 instead.
compensation = 1.0
# TODO(sjhwang): Try FFT.
# TODO(sjhwang): Gaussian kernel is separable in space. Consider applying
# 1-by-n and n-by-1 Gaussian filters instead of an n-by-n filter.
def reducer(x):
shape = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0))
y = nn_impl.depthwise_conv2d(
x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return array_ops.reshape(
y, array_ops.concat([shape[:-3], array_ops.shape(y)[1:]], 0))
luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation, k1,
k2)
# Average over the second and the third from the last: height, width.
if return_index_map:
ssim_val = luminance * cs
else:
axes = constant_op.constant([-3, -2], dtype=dtypes.int32)
ssim_val = math_ops.reduce_mean(luminance * cs, axes)
cs = math_ops.reduce_mean(cs, axes)
return ssim_val, cs
@tf_export('image.ssim')
@dispatch.add_dispatch_support
def ssim(img1,
img2,
max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_index_map=False):
"""Computes SSIM index between img1 and img2.
This function is based on the standard SSIM implementation from:
Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image
quality assessment: from error visibility to structural similarity. IEEE
transactions on image processing.
Note: The true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. (If the input is already YUV, then it will
compute YUV SSIM average.)
Details:
- 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
The image sizes must be at least 11x11 because of the filter size.
Example:
```python
# Read images (of size 255 x 255) from file.
im1 = tf.image.decode_image(tf.io.read_file('path/to/im1.png'))
im2 = tf.image.decode_image(tf.io.read_file('path/to/im2.png'))
tf.shape(im1) # `img1.png` has 3 channels; shape is `(255, 255, 3)`
tf.shape(im2) # `img2.png` has 3 channels; shape is `(255, 255, 3)`
# Add an outer batch for each image.
im1 = tf.expand_dims(im1, axis=0)
im2 = tf.expand_dims(im2, axis=0)
# Compute SSIM over tf.uint8 Tensors.
ssim1 = tf.image.ssim(im1, im2, max_val=255, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)
# Compute SSIM over tf.float32 Tensors.
im1 = tf.image.convert_image_dtype(im1, tf.float32)
im2 = tf.image.convert_image_dtype(im2, tf.float32)
ssim2 = tf.image.ssim(im1, im2, max_val=1.0, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)
# ssim1 and ssim2 both have type tf.float32 and are almost equal.
```
Args:
img1: First image batch. 4-D Tensor of shape `[batch, height, width,
channels]` with only Positive Pixel Values.
img2: Second image batch. 4-D Tensor of shape `[batch, height, width,
channels]` with only Positive Pixel Values.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Default value 11 (size of gaussian filter).
filter_sigma: Default value 1.5 (width of gaussian filter).
k1: Default value 0.01
k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so
it would be better if we took the values in the range of 0 < K2 < 0.4).
return_index_map: If True returns local SSIM map instead of the global mean.
Returns:
A tensor containing an SSIM value for each image in batch or a tensor
containing an SSIM value for each pixel for each image in batch if
return_index_map is True. Returned SSIM values are in range (-1, 1], when
pixel values are non-negative. Returns a tensor with shape:
broadcast(img1.shape[:-3], img2.shape[:-3]) or broadcast(img1.shape[:-1],
img2.shape[:-1]).
"""
with ops.name_scope(None, 'SSIM', [img1, img2]):
# Convert to tensor if needed.
img1 = ops.convert_to_tensor(img1, name='img1')
img2 = ops.convert_to_tensor(img2, name='img2')
# Shape checking.
_, _, checks = _verify_compatible_image_shapes(img1, img2)
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# Need to convert the images to float32. Scale max_val accordingly so that
# SSIM is computed correctly.
max_val = math_ops.cast(max_val, img1.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
img1 = convert_image_dtype(img1, dtypes.float32)
img2 = convert_image_dtype(img2, dtypes.float32)
ssim_per_channel, _ = _ssim_per_channel(img1, img2, max_val, filter_size,
filter_sigma, k1, k2,
return_index_map)
# Compute average over color channels.
return math_ops.reduce_mean(ssim_per_channel, [-1])
# Default values obtained by Wang et al.
_MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333)
@tf_export('image.ssim_multiscale')
@dispatch.add_dispatch_support
def ssim_multiscale(img1,
img2,
max_val,
power_factors=_MSSSIM_WEIGHTS,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03):
"""Computes the MS-SSIM between img1 and img2.
This function assumes that `img1` and `img2` are image batches, i.e. the last
three dimensions are [height, width, channels].
Note: The true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. (If the input is already YUV, then it will
compute YUV SSIM average.)
Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale
structural similarity for image quality assessment." Signals, Systems and
Computers, 2004.
Args:
img1: First image batch with only Positive Pixel Values.
img2: Second image batch with only Positive Pixel Values. Must have the
same rank as img1.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
power_factors: Iterable of weights for each of the scales. The number of
scales used is the length of the list. Index 0 is the unscaled
resolution's weight and each increasing scale corresponds to the image
being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363,
0.1333), which are the values obtained in the original paper.
filter_size: Default value 11 (size of gaussian filter).
filter_sigma: Default value 1.5 (width of gaussian filter).
k1: Default value 0.01
k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so
it would be better if we took the values in the range of 0 < K2 < 0.4).
Returns:
A tensor containing an MS-SSIM value for each image in batch. The values
are in range [0, 1]. Returns a tensor with shape:
broadcast(img1.shape[:-3], img2.shape[:-3]).
"""
with ops.name_scope(None, 'MS-SSIM', [img1, img2]):
# Convert to tensor if needed.
img1 = ops.convert_to_tensor(img1, name='img1')
img2 = ops.convert_to_tensor(img2, name='img2')
# Shape checking.
shape1, shape2, checks = _verify_compatible_image_shapes(img1, img2)
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# Need to convert the images to float32. Scale max_val accordingly so that
# SSIM is computed correctly.
max_val = math_ops.cast(max_val, img1.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
img1 = convert_image_dtype(img1, dtypes.float32)
img2 = convert_image_dtype(img2, dtypes.float32)
imgs = [img1, img2]
shapes = [shape1, shape2]
# img1 and img2 are assumed to be a (multi-dimensional) batch of
# 3-dimensional images (height, width, channels). `heads` contain the batch
# dimensions, and `tails` contain the image dimensions.
heads = [s[:-3] for s in shapes]
tails = [s[-3:] for s in shapes]
divisor = [1, 2, 2, 1]
divisor_tensor = constant_op.constant(divisor[1:], dtype=dtypes.int32)
def do_pad(images, remainder):
padding = array_ops.expand_dims(remainder, -1)
padding = array_ops.pad(padding, [[1, 0], [1, 0]])
return [array_ops.pad(x, padding, mode='SYMMETRIC') for x in images]
mcs = []
for k in range(len(power_factors)):
with ops.name_scope(None, 'Scale%d' % k, imgs):
if k > 0:
# Avg pool takes rank 4 tensors. Flatten leading dimensions.
flat_imgs = [
array_ops.reshape(x, array_ops.concat([[-1], t], 0))
for x, t in zip(imgs, tails)
]
remainder = tails[0] % divisor_tensor
need_padding = math_ops.reduce_any(math_ops.not_equal(remainder, 0))
# pylint: disable=cell-var-from-loop
padded = tf_cond.cond(need_padding,
lambda: do_pad(flat_imgs, remainder),
lambda: flat_imgs)
# pylint: enable=cell-var-from-loop
downscaled = [
nn_ops.avg_pool(
x, ksize=divisor, strides=divisor, padding='VALID')
for x in padded
]
tails = [x[1:] for x in array_ops.shape_n(downscaled)]
imgs = [
array_ops.reshape(x, array_ops.concat([h, t], 0))
for x, h, t in zip(downscaled, heads, tails)
]
# Overwrite previous ssim value since we only need the last one.
ssim_per_channel, cs = _ssim_per_channel(
*imgs,
max_val=max_val,
filter_size=filter_size,
filter_sigma=filter_sigma,
k1=k1,
k2=k2)
mcs.append(nn_ops.relu(cs))
# Remove the cs score for the last scale. In the MS-SSIM calculation,
# we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p).
mcs.pop() # Remove the cs score for the last scale.
mcs_and_ssim = array_ops_stack.stack(
mcs + [nn_ops.relu(ssim_per_channel)], axis=-1)
# Take weighted geometric mean across the scale axis.
ms_ssim = math_ops.reduce_prod(
math_ops.pow(mcs_and_ssim, power_factors), [-1])
return math_ops.reduce_mean(ms_ssim, [-1]) # Avg over color channels.
@tf_export('image.image_gradients')
@dispatch.add_dispatch_support
def image_gradients(image):
"""Returns image gradients (dy, dx) for each color channel.
Both output tensors have the same shape as the input: [batch_size, h, w,
d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in
location (x, y). That means that dy will always have zeros in the last row,
and dx will always have zeros in the last column.
Usage Example:
```python
BATCH_SIZE = 1
IMAGE_HEIGHT = 5
IMAGE_WIDTH = 5
CHANNELS = 1
image = tf.reshape(tf.range(IMAGE_HEIGHT * IMAGE_WIDTH * CHANNELS,
delta=1, dtype=tf.float32),
shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS))
dy, dx = tf.image.image_gradients(image)
print(image[0, :,:,0])
tf.Tensor(
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]
[10. 11. 12. 13. 14.]
[15. 16. 17. 18. 19.]
[20. 21. 22. 23. 24.]], shape=(5, 5), dtype=float32)
print(dy[0, :,:,0])
tf.Tensor(
[[5. 5. 5. 5. 5.]
[5. 5. 5. 5. 5.]
[5. 5. 5. 5. 5.]
[5. 5. 5. 5. 5.]
[0. 0. 0. 0. 0.]], shape=(5, 5), dtype=float32)
print(dx[0, :,:,0])
tf.Tensor(
[[1. 1. 1. 1. 0.]
[1. 1. 1. 1. 0.]
[1. 1. 1. 1. 0.]
[1. 1. 1. 1. 0.]
[1. 1. 1. 1. 0.]], shape=(5, 5), dtype=float32)
```
Args:
image: Tensor with shape [batch_size, h, w, d].
Returns:
Pair of tensors (dy, dx) holding the vertical and horizontal image
gradients (1-step finite difference).
Raises:
ValueError: If `image` is not a 4D tensor.
"""
if image.get_shape().ndims != 4:
raise ValueError('image_gradients expects a 4D tensor '
'[batch_size, h, w, d], not {}.'.format(image.get_shape()))
image_shape = array_ops.shape(image)
batch_size, height, width, depth = array_ops_stack.unstack(image_shape)
dy = image[:, 1:, :, :] - image[:, :-1, :, :]
dx = image[:, :, 1:, :] - image[:, :, :-1, :]
# Return tensors with same size as original image by concatenating
# zeros. Place the gradient [I(x+1,y) - I(x,y)] on the base pixel (x, y).
shape = array_ops_stack.stack([batch_size, 1, width, depth])
dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1)
dy = array_ops.reshape(dy, image_shape)
shape = array_ops_stack.stack([batch_size, height, 1, depth])
dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2)
dx = array_ops.reshape(dx, image_shape)
return dy, dx
@tf_export('image.sobel_edges')
@dispatch.add_dispatch_support
def sobel_edges(image):
"""Returns a tensor holding Sobel edge maps.
Example usage:
For general usage, `image` would be loaded from a file as below:
```python
image_bytes = tf.io.read_file(path_to_image_file)
image = tf.image.decode_image(image_bytes)
image = tf.cast(image, tf.float32)
image = tf.expand_dims(image, 0)
```
But for demo purposes, we are using randomly generated values for `image`:
>>> image = tf.random.uniform(
... maxval=255, shape=[1, 28, 28, 3], dtype=tf.float32)
>>> sobel = tf.image.sobel_edges(image)
>>> sobel_y = np.asarray(sobel[0, :, :, :, 0]) # sobel in y-direction
>>> sobel_x = np.asarray(sobel[0, :, :, :, 1]) # sobel in x-direction
For displaying the sobel results, PIL's [Image Module](
https://pillow.readthedocs.io/en/stable/reference/Image.html) can be used:
```python
# Display edge maps for the first channel (at index 0)
Image.fromarray(sobel_y[..., 0] / 4 + 0.5).show()
Image.fromarray(sobel_x[..., 0] / 4 + 0.5).show()
```
Args:
image: Image tensor with shape [batch_size, h, w, d] and type float32 or
float64. The image(s) must be 2x2 or larger.
Returns:
Tensor holding edge maps for each channel. Returns a tensor with shape
[batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]],
[dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter.
"""
# Define vertical and horizontal Sobel filters.
static_image_shape = image.get_shape()
image_shape = array_ops.shape(image)
kernels = [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]],
[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]]
num_kernels = len(kernels)
kernels = np.transpose(numpy_compat.np_asarray(kernels), (1, 2, 0))
kernels = np.expand_dims(kernels, -2)
kernels_tf = constant_op.constant(kernels, dtype=image.dtype)
kernels_tf = array_ops.tile(
kernels_tf, [1, 1, image_shape[-1], 1], name='sobel_filters')
# Use depth-wise convolution to calculate edge maps per channel.
pad_sizes = [[0, 0], [1, 1], [1, 1], [0, 0]]
padded = array_ops.pad(image, pad_sizes, mode='REFLECT')
# Output tensor has shape [batch_size, h, w, d * num_kernels].
strides = [1, 1, 1, 1]
output = nn_impl.depthwise_conv2d(padded, kernels_tf, strides, 'VALID')
# Reshape to [batch_size, h, w, d, num_kernels].
shape = array_ops.concat([image_shape, [num_kernels]], 0)
output = array_ops.reshape(output, shape=shape)
output.set_shape(static_image_shape.concatenate([num_kernels]))
return output
@tf_export(v1=['image.resize_bicubic'])
@dispatch.add_dispatch_support
@deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.BICUBIC...)` instead.'
),
)
def resize_bicubic(images,
size,
align_corners=False,
name=None,
half_pixel_centers=False):
return gen_image_ops.resize_bicubic(
images=images,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
name=name)
@tf_export(v1=['image.resize_bilinear'])
@dispatch.add_dispatch_support
@deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.BILINEAR...)` instead.'
),
)
def resize_bilinear(images,
size,
align_corners=False,
name=None,
half_pixel_centers=False):
return gen_image_ops.resize_bilinear(
images=images,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
name=name)
@tf_export(v1=['image.resize_nearest_neighbor'])
@dispatch.add_dispatch_support
@deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` '
'instead.'
),
)
def resize_nearest_neighbor(images,
size,
align_corners=False,
name=None,
half_pixel_centers=False):
return gen_image_ops.resize_nearest_neighbor(
images=images,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
name=name)
resize_area_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.AREA...)` instead.'))
resize_area = tf_export(v1=['image.resize_area'])(
resize_area_deprecation(
dispatch.add_dispatch_support(gen_image_ops.resize_area)
)
)
@tf_export('image.crop_and_resize', v1=[])
@dispatch.add_dispatch_support
def crop_and_resize_v2(image,
boxes,
box_indices,
crop_size,
method='bilinear',
extrapolation_value=.0,
name=None):
"""Extracts crops from the input image tensor and resizes them.
Extracts crops from the input image tensor and resizes them using bilinear
sampling or nearest neighbor sampling (possibly with aspect ratio change) to a
common output size specified by `crop_size`. This is more general than the
`crop_to_bounding_box` op which extracts a fixed size slice from the input
image and does not allow resizing or aspect ratio change. The crops occur
first and then the resize.
Returns a tensor with `crops` from the input `image` at positions defined at
the bounding box locations in `boxes`. The cropped boxes are all resized (with
bilinear or nearest neighbor interpolation) to a fixed
`size = [crop_height, crop_width]`. The result is a 4-D tensor
`[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.
In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical
results to using `tf.compat.v1.image.resize_bilinear()` or
`tf.compat.v1.image.resize_nearest_neighbor()`(depends on the `method`
argument) with
`align_corners=True`.
Args:
image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is
specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized
coordinate value of `y` is mapped to the image coordinate at `y *
(image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1]` in image height coordinates.
We do allow `y1` > `y2`, in which case the sampled crop is an up-down
flipped version of the original image. The width dimension is treated
similarly. Normalized coordinates outside the `[0, 1]` range are allowed,
in which case we use `extrapolation_value` to extrapolate the input image
values.
box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0,
batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box
refers to.
crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`.
All cropped image patches are resized to this size. The aspect ratio of
the image content is not preserved. Both `crop_height` and `crop_width`
need to be positive.
method: An optional string specifying the sampling method for resizing. It
can be either `"bilinear"` or `"nearest"` and default to `"bilinear"`.
Currently two sampling methods are supported: Bilinear and Nearest
Neighbor.
extrapolation_value: An optional `float`. Defaults to `0.0`. Value used for
extrapolation, when applicable.
name: A name for the operation (optional).
Returns:
A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
Usage example:
>>> BATCH_SIZE = 1
>>> NUM_BOXES = 5
>>> IMAGE_HEIGHT = 256
>>> IMAGE_WIDTH = 256
>>> CHANNELS = 3
>>> CROP_SIZE = (24, 24)
>>> image = tf.random.normal(shape=(
... BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS) )
>>> boxes = tf.random.uniform(shape=(NUM_BOXES, 4))
>>> box_indices = tf.random.uniform(shape=(NUM_BOXES,), minval=0,
... maxval=BATCH_SIZE, dtype=tf.int32)
>>> output = tf.image.crop_and_resize(image, boxes, box_indices, CROP_SIZE)
>>> output.shape
TensorShape([5, 24, 24, 3])
Example with linear interpolation:
>>> image = np.arange(0, 18, 2).astype('float32').reshape(3, 3)
>>> result = tf.image.crop_and_resize(
... image[None, :, :, None],
... np.asarray([[0.5,0.5,1,1]]), [0], [3, 3], method='bilinear')
>>> result[0][:, :, 0]
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[ 8., 9., 10.],
[11., 12., 13.],
[14., 15., 16.]], dtype=float32)>
Example with nearest interpolation:
>>> image = np.arange(0, 18, 2).astype('float32').reshape(3, 3)
>>> result = tf.image.crop_and_resize(
... image[None, :, :, None],
... np.asarray([[0.5,0.5,1,1]]), [0], [3, 3], method='nearest')
>>> result[0][:, :, 0]
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[ 8., 10., 10.],
[14., 16., 16.],
[14., 16., 16.]], dtype=float32)>
"""
return gen_image_ops.crop_and_resize(image, boxes, box_indices, crop_size,
method, extrapolation_value, name)
@tf_export(v1=['image.crop_and_resize'])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
'box_ind is deprecated, use box_indices instead',
'box_ind')
def crop_and_resize_v1( # pylint: disable=missing-docstring
image,
boxes,
box_ind=None,
crop_size=None,
method='bilinear',
extrapolation_value=0,
name=None,
box_indices=None):
box_ind = deprecation.deprecated_argument_lookup('box_indices', box_indices,
'box_ind', box_ind)
return gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method,
extrapolation_value, name)
crop_and_resize_v1.__doc__ = gen_image_ops.crop_and_resize.__doc__
@tf_export(v1=['image.extract_glimpse'])
@dispatch.add_dispatch_support
def extract_glimpse(
input, # pylint: disable=redefined-builtin
size,
offsets,
centered=True,
normalized=True,
uniform_noise=True,
name=None):
"""Extracts a glimpse from the input tensor.
Returns a set of windows called glimpses extracted at location
`offsets` from the input tensor. If the windows only partially
overlaps the inputs, the non-overlapping areas will be filled with
random noise.
The result is a 4-D tensor of shape `[batch_size, glimpse_height,
glimpse_width, channels]`. The channels and batch dimensions are the
same as that of the input tensor. The height and width of the output
windows are specified in the `size` parameter.
The argument `normalized` and `centered` controls how the windows are built:
* If the coordinates are normalized but not centered, 0.0 and 1.0
correspond to the minimum and maximum of each height and width
dimension.
* If the coordinates are both normalized and centered, they range from
-1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
left corner, the lower right corner is located at (1.0, 1.0) and the
center is at (0, 0).
* If the coordinates are not normalized they are interpreted as
numbers of pixels.
Usage Example:
>>> x = [[[[0.0],
... [1.0],
... [2.0]],
... [[3.0],
... [4.0],
... [5.0]],
... [[6.0],
... [7.0],
... [8.0]]]]
>>> tf.compat.v1.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]],
... centered=False, normalized=False)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[0.],
[1.]],
[[3.],
[4.]]]], dtype=float32)>
Args:
input: A `Tensor` of type `float32`. A 4-D float tensor of shape
`[batch_size, height, width, channels]`.
size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the
size of the glimpses to extract. The glimpse height must be specified
first, following by the glimpse width.
offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape
`[batch_size, 2]` containing the y, x locations of the center of each
window.
centered: An optional `bool`. Defaults to `True`. indicates if the offset
coordinates are centered relative to the image, in which case the (0, 0)
offset is relative to the center of the input images. If false, the (0,0)
offset corresponds to the upper left corner of the input images.
normalized: An optional `bool`. Defaults to `True`. indicates if the offset
coordinates are normalized.
uniform_noise: An optional `bool`. Defaults to `True`. indicates if the
noise should be generated using a uniform distribution or a Gaussian
distribution.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
return gen_image_ops.extract_glimpse(
input=input,
size=size,
offsets=offsets,
centered=centered,
normalized=normalized,
uniform_noise=uniform_noise,
name=name)
@tf_export('image.extract_glimpse', v1=[])
@dispatch.add_dispatch_support
def extract_glimpse_v2(
input, # pylint: disable=redefined-builtin
size,
offsets,
centered=True,
normalized=True,
noise='uniform',
name=None):
"""Extracts a glimpse from the input tensor.
Returns a set of windows called glimpses extracted at location
`offsets` from the input tensor. If the windows only partially
overlaps the inputs, the non-overlapping areas will be filled with
random noise.
The result is a 4-D tensor of shape `[batch_size, glimpse_height,
glimpse_width, channels]`. The channels and batch dimensions are the
same as that of the input tensor. The height and width of the output
windows are specified in the `size` parameter.
The argument `normalized` and `centered` controls how the windows are built:
* If the coordinates are normalized but not centered, 0.0 and 1.0
correspond to the minimum and maximum of each height and width
dimension.
* If the coordinates are both normalized and centered, they range from
-1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
left corner, the lower right corner is located at (1.0, 1.0) and the
center is at (0, 0).
* If the coordinates are not normalized they are interpreted as
numbers of pixels.
Usage Example:
>>> x = [[[[0.0],
... [1.0],
... [2.0]],
... [[3.0],
... [4.0],
... [5.0]],
... [[6.0],
... [7.0],
... [8.0]]]]
>>> tf.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]],
... centered=False, normalized=False)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[4.],
[5.]],
[[7.],
[8.]]]], dtype=float32)>
Args:
input: A `Tensor` of type `float32`. A 4-D float tensor of shape
`[batch_size, height, width, channels]`.
size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the
size of the glimpses to extract. The glimpse height must be specified
first, following by the glimpse width.
offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape
`[batch_size, 2]` containing the y, x locations of the center of each
window.
centered: An optional `bool`. Defaults to `True`. indicates if the offset
coordinates are centered relative to the image, in which case the (0, 0)
offset is relative to the center of the input images. If false, the (0,0)
offset corresponds to the upper left corner of the input images.
normalized: An optional `bool`. Defaults to `True`. indicates if the offset
coordinates are normalized.
noise: An optional `string`. Defaults to `uniform`. indicates if the noise
should be `uniform` (uniform distribution), `gaussian` (gaussian
distribution), or `zero` (zero padding).
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
return gen_image_ops.extract_glimpse_v2(
input=input,
size=size,
offsets=offsets,
centered=centered,
normalized=normalized,
noise=noise,
uniform_noise=False,
name=name)
@tf_export('image.combined_non_max_suppression')
@dispatch.add_dispatch_support
def combined_non_max_suppression(boxes,
scores,
max_output_size_per_class,
max_total_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
pad_per_class=False,
clip_boxes=True,
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
This operation performs non_max_suppression on the inputs per batch, across
all classes.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
is agnostic to where the origin is in the coordinate system. Also note that
this algorithm is invariant to orthogonal transformations and translations
of the coordinate system; thus translating or reflections of the coordinate
system result in the same boxes being selected by the algorithm.
The output of this operation is the final boxes, scores and classes tensor
returned after performing non_max_suppression.
Args:
boxes: A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q`
is 1 then same boxes are used for all classes otherwise, if `q` is equal
to number of classes, class-specific boxes are used.
scores: A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]`
representing a single score corresponding to each box (each row of boxes).
max_output_size_per_class: A scalar integer `Tensor` representing the
maximum number of boxes to be selected by non-max suppression per class
max_total_size: A int32 scalar representing maximum number of boxes retained
over all classes. Note that setting this value to a large number may
result in OOM error depending on the system workload.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
score_threshold: A float representing the threshold for deciding when to
remove boxes based on score.
pad_per_class: If false, the output nmsed boxes, scores and classes are
padded/clipped to `max_total_size`. If true, the output nmsed boxes,
scores and classes are padded to be of length
`max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
which case it is clipped to `max_total_size`. Defaults to false.
clip_boxes: If true, the coordinates of output nmsed boxes will be clipped
to [0, 1]. If false, output the box coordinates as it is. Defaults to
true.
name: A name for the operation (optional).
Returns:
'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor
containing the non-max suppressed boxes.
'nmsed_scores': A [batch_size, max_detections] float32 tensor containing
the scores for the boxes.
'nmsed_classes': A [batch_size, max_detections] float32 tensor
containing the class for boxes.
'valid_detections': A [batch_size] int32 tensor indicating the number of
valid detections per batch item. Only the top valid_detections[i] entries
in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
entries are zero paddings.
"""
with ops.name_scope(name, 'combined_non_max_suppression'):
iou_threshold = ops.convert_to_tensor(
iou_threshold, dtype=dtypes.float32, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, dtype=dtypes.float32, name='score_threshold')
# Convert `max_total_size` to tensor *without* setting the `dtype` param.
# This allows us to catch `int32` overflow case with `max_total_size`
# whose expected dtype is `int32` by the op registration. Any number within
# `int32` will get converted to `int32` tensor. Anything larger will get
# converted to `int64`. Passing in `int64` for `max_total_size` to the op
# will throw dtype mismatch exception.
# TODO(b/173251596): Once there is a more general solution to warn against
# int overflow conversions, revisit this check.
max_total_size = ops.convert_to_tensor(max_total_size)
return gen_image_ops.combined_non_max_suppression(
boxes, scores, max_output_size_per_class, max_total_size, iou_threshold,
score_threshold, pad_per_class, clip_boxes)
def _bbox_overlap(boxes_a, boxes_b):
"""Calculates the overlap (iou - intersection over union) between boxes_a and boxes_b.
Args:
boxes_a: a tensor with a shape of [batch_size, N, 4]. N is the number of
boxes per image. The last dimension is the pixel coordinates in
[ymin, xmin, ymax, xmax] form.
boxes_b: a tensor with a shape of [batch_size, M, 4]. M is the number of
boxes. The last dimension is the pixel coordinates in
[ymin, xmin, ymax, xmax] form.
Returns:
intersection_over_union: a tensor with as a shape of [batch_size, N, M],
representing the ratio of intersection area over union area (IoU) between
two boxes
"""
with ops.name_scope('bbox_overlap'):
a_y_min, a_x_min, a_y_max, a_x_max = array_ops.split(
value=boxes_a, num_or_size_splits=4, axis=2)
b_y_min, b_x_min, b_y_max, b_x_max = array_ops.split(
value=boxes_b, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = math_ops.maximum(
a_x_min, array_ops.transpose(b_x_min, [0, 2, 1]))
i_xmax = math_ops.minimum(
a_x_max, array_ops.transpose(b_x_max, [0, 2, 1]))
i_ymin = math_ops.maximum(
a_y_min, array_ops.transpose(b_y_min, [0, 2, 1]))
i_ymax = math_ops.minimum(
a_y_max, array_ops.transpose(b_y_max, [0, 2, 1]))
i_area = math_ops.maximum(
(i_xmax - i_xmin), 0) * math_ops.maximum((i_ymax - i_ymin), 0)
# Calculates the union area.
a_area = (a_y_max - a_y_min) * (a_x_max - a_x_min)
b_area = (b_y_max - b_y_min) * (b_x_max - b_x_min)
EPSILON = 1e-8
# Adds a small epsilon to avoid divide-by-zero.
u_area = a_area + array_ops.transpose(b_area, [0, 2, 1]) - i_area + EPSILON
# Calculates IoU.
intersection_over_union = i_area / u_area
return intersection_over_union
def _self_suppression(iou, _, iou_sum, iou_threshold):
"""Suppress boxes in the same tile.
Compute boxes that cannot be suppressed by others (i.e.,
can_suppress_others), and then use them to suppress boxes in the same tile.
Args:
iou: a tensor of shape [batch_size, num_boxes_with_padding] representing
intersection over union.
iou_sum: a scalar tensor.
iou_threshold: a scalar tensor.
Returns:
iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding].
iou_diff: a scalar tensor representing whether any box is suppressed in
this step.
iou_sum_new: a scalar tensor of shape [batch_size] that represents
the iou sum after suppression.
iou_threshold: a scalar tensor.
"""
batch_size = array_ops.shape(iou)[0]
can_suppress_others = math_ops.cast(
array_ops.reshape(
math_ops.reduce_max(iou, 1) < iou_threshold, [batch_size, -1, 1]),
iou.dtype)
iou_after_suppression = array_ops.reshape(
math_ops.cast(
math_ops.reduce_max(can_suppress_others * iou, 1) < iou_threshold,
iou.dtype),
[batch_size, -1, 1]) * iou
iou_sum_new = math_ops.reduce_sum(iou_after_suppression, [1, 2])
return [
iou_after_suppression,
math_ops.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new,
iou_threshold
]
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size):
"""Suppress boxes between different tiles.
Args:
boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4]
box_slice: a tensor of shape [batch_size, tile_size, 4]
iou_threshold: a scalar tensor
inner_idx: a scalar tensor representing the tile index of the tile that is
used to suppress box_slice
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: unchanged boxes as input
box_slice_after_suppression: box_slice after suppression
iou_threshold: unchanged
"""
batch_size = array_ops.shape(boxes)[0]
new_slice = array_ops.slice(
boxes, [0, inner_idx * tile_size, 0],
[batch_size, tile_size, 4])
iou = _bbox_overlap(new_slice, box_slice)
box_slice_after_suppression = array_ops.expand_dims(
math_ops.cast(math_ops.reduce_all(iou < iou_threshold, [1]),
box_slice.dtype),
2) * box_slice
return boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1
def _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size):
"""Process boxes in the range [idx*tile_size, (idx+1)*tile_size).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
"""
with ops.name_scope('suppression_loop_body'):
num_tiles = array_ops.shape(boxes)[1] // tile_size
batch_size = array_ops.shape(boxes)[0]
def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx):
return _cross_suppression(boxes, box_slice, iou_threshold, inner_idx,
tile_size)
# Iterates over tiles that can possibly suppress the current tile.
box_slice = array_ops.slice(boxes, [0, idx * tile_size, 0],
[batch_size, tile_size, 4])
_, box_slice, _, _ = while_loop.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
cross_suppression_func,
[boxes, box_slice, iou_threshold,
constant_op.constant(0)])
# Iterates over the current tile to compute self-suppression.
iou = _bbox_overlap(box_slice, box_slice)
mask = array_ops.expand_dims(
array_ops.reshape(
math_ops.range(tile_size), [1, -1]) > array_ops.reshape(
math_ops.range(tile_size), [-1, 1]), 0)
iou *= math_ops.cast(
math_ops.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _, _ = while_loop.while_loop(
lambda _iou, loop_condition, _iou_sum, _: loop_condition,
_self_suppression, [
iou,
constant_op.constant(True),
math_ops.reduce_sum(iou, [1, 2]), iou_threshold
])
suppressed_box = math_ops.reduce_sum(suppressed_iou, 1) > 0
box_slice *= array_ops.expand_dims(
1.0 - math_ops.cast(suppressed_box, box_slice.dtype), 2)
# Uses box_slice to update the input boxes.
mask = array_ops.reshape(
math_ops.cast(
math_ops.equal(math_ops.range(num_tiles), idx), boxes.dtype),
[1, -1, 1, 1])
boxes = array_ops.tile(array_ops.expand_dims(
box_slice, [1]), [1, num_tiles, 1, 1]) * mask + array_ops.reshape(
boxes, [batch_size, num_tiles, tile_size, 4]) * (1 - mask)
boxes = array_ops.reshape(boxes, [batch_size, -1, 4])
# Updates output_size.
output_size += math_ops.reduce_sum(
math_ops.cast(
math_ops.reduce_any(box_slice > 0, [2]), dtypes.int32), [1])
return boxes, iou_threshold, output_size, idx + 1
@tf_export('image.non_max_suppression_padded')
@dispatch.add_dispatch_support
def non_max_suppression_padded(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
pad_to_max_output_size=False,
name=None,
sorted_input=False,
canonicalized_coordinates=False,
tile_size=512):
"""Greedily selects a subset of bounding boxes in descending order of score.
Performs algorithmically equivalent operation to tf.image.non_max_suppression,
with the addition of an optional parameter which zero-pads the output to
be of size `max_output_size`.
The output of this operation is a tuple containing the set of integers
indexing into the input collection of bounding boxes representing the selected
boxes and the number of valid indices in the index set. The bounding box
coordinates corresponding to the selected indices can then be obtained using
the `tf.slice` and `tf.gather` operations. For example:
```python
selected_indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold,
score_threshold, pad_to_max_output_size=True)
selected_indices = tf.slice(
selected_indices_padded, tf.constant([0]), num_valid)
selected_boxes = tf.gather(boxes, selected_indices)
```
Args:
boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4].
Dimensions except the last two are batch dimensions.
scores: a tensor of rank 1 or higher with a shape of [..., num_boxes].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression. Note that setting this
value to a large number may result in OOM error depending on the system
workload.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IoU (intersection over union).
score_threshold: a float representing the threshold for box scores. Boxes
with a score that is not larger than this threshold will be suppressed.
pad_to_max_output_size: whether to pad the output idx to max_output_size.
Must be set to True when the input is a batch of images.
name: name of operation.
sorted_input: a boolean indicating whether the input boxes and scores
are sorted in descending order by the score.
canonicalized_coordinates: if box coordinates are given as
`[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant
computation to canonicalize box coordinates.
tile_size: an integer representing the number of boxes in a tile, i.e.,
the maximum number of boxes per image that can be used to suppress other
boxes in parallel; larger tile_size means larger parallelism and
potentially more redundant work.
Returns:
idx: a tensor with a shape of [..., num_boxes] representing the
indices selected by non-max suppression. The leading dimensions
are the batch dimensions of the input boxes. All numbers are within
[0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i]
indices (i.e., idx[i][:num_valid[i]]) are valid.
num_valid: a tensor of rank 0 or higher with a shape of [...]
representing the number of valid indices in idx. Its dimensions are the
batch dimensions of the input boxes.
Raises:
ValueError: When set pad_to_max_output_size to False for batched input.
"""
with ops.name_scope(name, 'non_max_suppression_padded'):
if not pad_to_max_output_size:
# pad_to_max_output_size may be set to False only when the shape of
# boxes is [num_boxes, 4], i.e., a single image. We make best effort to
# detect violations at compile time. If `boxes` does not have a static
# rank, the check allows computation to proceed.
if boxes.get_shape().rank is not None and boxes.get_shape().rank > 2:
raise ValueError("'pad_to_max_output_size' (value {}) must be True for "
'batched input'.format(pad_to_max_output_size))
if name is None:
name = ''
idx, num_valid = non_max_suppression_padded_v2(
boxes, scores, max_output_size, iou_threshold, score_threshold,
sorted_input, canonicalized_coordinates, tile_size)
# def_function.function seems to lose shape information, so set it here.
if not pad_to_max_output_size:
idx = idx[0, :num_valid]
else:
batch_dims = array_ops.concat([
array_ops.shape(boxes)[:-2],
array_ops.expand_dims(max_output_size, 0)
], 0)
idx = array_ops.reshape(idx, batch_dims)
return idx, num_valid
# TODO(b/158709815): Improve performance regression due to
# def_function.function.
@def_function.function(
experimental_implements='non_max_suppression_padded_v2')
def non_max_suppression_padded_v2(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
sorted_input=False,
canonicalized_coordinates=False,
tile_size=512):
"""Non-maximum suppression.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
`[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval `[0, 1]`) or absolute. The bounding box
coordinates are cannonicalized to `[y_min, x_min, y_max, x_max]`,
where `(y_min, x_min)` and `(y_max, x_mas)` are the coordinates of the lower
left and upper right corner. User may indicate the input box coordinates are
already canonicalized to eliminate redundant work by setting
canonicalized_coordinates to `True`. Note that this algorithm is agnostic to
where the origin is in the coordinate system. Note that this algorithm is
invariant to orthogonal transformations and translations of the coordinate
system; thus translating or reflections of the coordinate system result in the
same boxes being selected by the algorithm.
Similar to tf.image.non_max_suppression, non_max_suppression_padded
implements hard NMS but can operate on a batch of images and improves
performance by titling the bounding boxes. Non_max_suppression_padded should
be preferred over tf.image_non_max_suppression when running on devices with
abundant parallelsim for higher computation speed. For soft NMS, refer to
tf.image.non_max_suppression_with_scores.
While a serial NMS algorithm iteratively uses the highest-scored unprocessed
box to suppress boxes, this algorithm uses many boxes to suppress other boxes
in parallel. The key idea is to partition boxes into tiles based on their
score and suppresses boxes tile by tile, thus achieving parallelism within a
tile. The tile size determines the degree of parallelism.
In cross suppression (using boxes of tile A to suppress boxes of tile B),
all boxes in A can independently suppress boxes in B.
Self suppression (suppressing boxes of the same tile) needs to be iteratively
applied until there's no more suppression. In each iteration, boxes that
cannot be suppressed are used to suppress boxes in the same tile.
boxes = boxes.pad_to_multiply_of(tile_size)
num_tiles = len(boxes) // tile_size
output_boxes = []
for i in range(num_tiles):
box_tile = boxes[i*tile_size : (i+1)*tile_size]
for j in range(i - 1):
# in parallel suppress boxes in box_tile using boxes from suppressing_tile
suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]
iou = _bbox_overlap(box_tile, suppressing_tile)
# if the box is suppressed in iou, clear it to a dot
box_tile *= _update_boxes(iou)
# Iteratively handle the diagnal tile.
iou = _box_overlap(box_tile, box_tile)
iou_changed = True
while iou_changed:
# boxes that are not suppressed by anything else
suppressing_boxes = _get_suppressing_boxes(iou)
# boxes that are suppressed by suppressing_boxes
suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)
# clear iou to 0 for boxes that are suppressed, as they cannot be used
# to suppress other boxes any more
new_iou = _clear_iou(iou, suppressed_boxes)
iou_changed = (new_iou != iou)
iou = new_iou
# remaining boxes that can still suppress others, are selected boxes.
output_boxes.append(_get_suppressing_boxes(iou))
if len(output_boxes) >= max_output_size:
break
Args:
boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4].
Dimensions except the last two are batch dimensions. The last dimension
represents box coordinates, given as [y_1, x_1, y_2, x_2]. The coordinates
on each dimension can be given in any order (see also
`canonicalized_coordinates`) but must describe a box with a positive area.
scores: a tensor of rank 1 or higher with a shape of [..., num_boxes].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IoU (intersection over union).
score_threshold: a float representing the threshold for box scores. Boxes
with a score that is not larger than this threshold will be suppressed.
sorted_input: a boolean indicating whether the input boxes and scores are
sorted in descending order by the score.
canonicalized_coordinates: if box coordinates are given as `[y_min, x_min,
y_max, x_max]`, setting to True eliminate redundant computation to
canonicalize box coordinates.
tile_size: an integer representing the number of boxes in a tile, i.e., the
maximum number of boxes per image that can be used to suppress other boxes
in parallel; larger tile_size means larger parallelism and potentially
more redundant work.
Returns:
idx: a tensor with a shape of [..., num_boxes] representing the
indices selected by non-max suppression. The leading dimensions
are the batch dimensions of the input boxes. All numbers are within
[0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i]
indices (i.e., idx[i][:num_valid[i]]) are valid.
num_valid: a tensor of rank 0 or higher with a shape of [...]
representing the number of valid indices in idx. Its dimensions are the
batch dimensions of the input boxes.
Raises:
ValueError: When set pad_to_max_output_size to False for batched input.
"""
def _sort_scores_and_boxes(scores, boxes):
"""Sort boxes based their score from highest to lowest.
Args:
scores: a tensor with a shape of [batch_size, num_boxes] representing
the scores of boxes.
boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing
the boxes.
Returns:
sorted_scores: a tensor with a shape of [batch_size, num_boxes]
representing the sorted scores.
sorted_boxes: a tensor representing the sorted boxes.
sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes]
representing the index of the scores in a sorted descending order.
"""
with ops.name_scope('sort_scores_and_boxes'):
sorted_scores_indices = sort_ops.argsort(
scores, axis=1, direction='DESCENDING')
sorted_scores = array_ops.gather(
scores, sorted_scores_indices, axis=1, batch_dims=1
)
sorted_boxes = array_ops.gather(
boxes, sorted_scores_indices, axis=1, batch_dims=1
)
return sorted_scores, sorted_boxes, sorted_scores_indices
batch_dims = array_ops.shape(boxes)[:-2]
num_boxes = array_ops.shape(boxes)[-2]
boxes = array_ops.reshape(boxes, [-1, num_boxes, 4])
scores = array_ops.reshape(scores, [-1, num_boxes])
batch_size = array_ops.shape(boxes)[0]
if score_threshold != float('-inf'):
with ops.name_scope('filter_by_score'):
score_mask = math_ops.cast(scores > score_threshold, scores.dtype)
scores *= score_mask
box_mask = array_ops.expand_dims(
math_ops.cast(score_mask, boxes.dtype), 2)
boxes *= box_mask
if not canonicalized_coordinates:
with ops.name_scope('canonicalize_coordinates'):
y_1, x_1, y_2, x_2 = array_ops.split(
value=boxes, num_or_size_splits=4, axis=2)
y_1_is_min = math_ops.reduce_all(
math_ops.less_equal(y_1[0, 0, 0], y_2[0, 0, 0]))
y_min, y_max = tf_cond.cond(
y_1_is_min, lambda: (y_1, y_2), lambda: (y_2, y_1))
x_1_is_min = math_ops.reduce_all(
math_ops.less_equal(x_1[0, 0, 0], x_2[0, 0, 0]))
x_min, x_max = tf_cond.cond(
x_1_is_min, lambda: (x_1, x_2), lambda: (x_2, x_1))
boxes = array_ops.concat([y_min, x_min, y_max, x_max], axis=2)
# TODO(@bhack): https://github.com/tensorflow/tensorflow/issues/56089
# this will be required after deprecation
# else:
# y_1, x_1, y_2, x_2 = array_ops.split(
# value=boxes, num_or_size_splits=4, axis=2)
if not sorted_input:
scores, boxes, sorted_indices = _sort_scores_and_boxes(scores, boxes)
else:
# Default value required for Autograph.
sorted_indices = array_ops.zeros_like(scores, dtype=dtypes.int32)
pad = math_ops.cast(
math_ops.ceil(
math_ops.cast(
math_ops.maximum(num_boxes, max_output_size), dtypes.float32) /
math_ops.cast(tile_size, dtypes.float32)),
dtypes.int32) * tile_size - num_boxes
boxes = array_ops.pad(
math_ops.cast(boxes, dtypes.float32), [[0, 0], [0, pad], [0, 0]])
scores = array_ops.pad(
math_ops.cast(scores, dtypes.float32), [[0, 0], [0, pad]])
num_boxes_after_padding = num_boxes + pad
num_iterations = num_boxes_after_padding // tile_size
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return math_ops.logical_and(
math_ops.reduce_min(output_size) < max_output_size,
idx < num_iterations)
def suppression_loop_body(boxes, iou_threshold, output_size, idx):
return _suppression_loop_body(
boxes, iou_threshold, output_size, idx, tile_size)
selected_boxes, _, output_size, _ = while_loop.while_loop(
_loop_cond,
suppression_loop_body,
[
boxes, iou_threshold,
array_ops.zeros([batch_size], dtypes.int32),
constant_op.constant(0)
],
shape_invariants=[
tensor_shape.TensorShape([None, None, 4]),
tensor_shape.TensorShape([]),
tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([]),
],
)
num_valid = math_ops.minimum(output_size, max_output_size)
idx = num_boxes_after_padding - math_ops.cast(
nn_ops.top_k(
math_ops.cast(math_ops.reduce_any(
selected_boxes > 0, [2]), dtypes.int32) *
array_ops.expand_dims(
math_ops.range(num_boxes_after_padding, 0, -1), 0),
max_output_size)[0], dtypes.int32)
idx = math_ops.minimum(idx, num_boxes - 1)
if not sorted_input:
index_offsets = math_ops.range(batch_size) * num_boxes
gather_idx = array_ops.reshape(
idx + array_ops.expand_dims(index_offsets, 1), [-1])
idx = array_ops.reshape(
array_ops.gather(array_ops.reshape(sorted_indices, [-1]),
gather_idx),
[batch_size, -1])
invalid_index = array_ops.zeros([batch_size, max_output_size],
dtype=dtypes.int32)
idx_index = array_ops.expand_dims(math_ops.range(max_output_size), 0)
num_valid_expanded = array_ops.expand_dims(num_valid, 1)
idx = array_ops.where(idx_index < num_valid_expanded,
idx, invalid_index)
num_valid = array_ops.reshape(num_valid, batch_dims)
return idx, num_valid
def non_max_suppression_padded_v1(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
pad_to_max_output_size=False,
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Performs algorithmically equivalent operation to tf.image.non_max_suppression,
with the addition of an optional parameter which zero-pads the output to
be of size `max_output_size`.
The output of this operation is a tuple containing the set of integers
indexing into the input collection of bounding boxes representing the selected
boxes and the number of valid indices in the index set. The bounding box
coordinates corresponding to the selected indices can then be obtained using
the `tf.slice` and `tf.gather` operations. For example:
```python
selected_indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold,
score_threshold, pad_to_max_output_size=True)
selected_indices = tf.slice(
selected_indices_padded, tf.constant([0]), num_valid)
selected_boxes = tf.gather(boxes, selected_indices)
```
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non-max suppression.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
score_threshold: A float representing the threshold for deciding when to
remove boxes based on score.
pad_to_max_output_size: bool. If True, size of `selected_indices` output is
padded to `max_output_size`.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the boxes tensor, where `M <= max_output_size`.
valid_outputs: A scalar integer `Tensor` denoting how many elements in
`selected_indices` are valid. Valid elements occur first, then padding.
"""
with ops.name_scope(name, 'non_max_suppression_padded'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
return gen_image_ops.non_max_suppression_v4(boxes, scores, max_output_size,
iou_threshold, score_threshold,
pad_to_max_output_size)
@tf_export('image.draw_bounding_boxes', v1=[])
@dispatch.add_dispatch_support
def draw_bounding_boxes_v2(images, boxes, colors, name=None):
"""Draw bounding boxes on a batch of images.
Outputs a copy of `images` but draws on top of the pixels zero or more
bounding boxes specified by the locations in `boxes`. The coordinates of the
each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and the height of the underlying image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
Parts of the bounding box may fall outside the image.
Args:
images: A `Tensor`. Must be one of the following types: `float32`, `half`.
4-D with shape `[batch, height, width, depth]`. A batch of images.
boxes: A `Tensor` of type `float32`. 3-D with shape `[batch,
num_bounding_boxes, 4]` containing bounding boxes.
colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle
through for the boxes.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `images`.
Usage Example:
>>> # create an empty image
>>> img = tf.zeros([1, 3, 3, 3])
>>> # draw a box around the image
>>> box = np.array([0, 0, 1, 1])
>>> boxes = box.reshape([1, 1, 4])
>>> # alternate between red and blue
>>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
>>> tf.image.draw_bounding_boxes(img, boxes, colors)
<tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy=
array([[[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[0., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]]]], dtype=float32)>
"""
if colors is None:
return gen_image_ops.draw_bounding_boxes(images, boxes, name)
return gen_image_ops.draw_bounding_boxes_v2(images, boxes, colors, name)
@tf_export(v1=['image.draw_bounding_boxes'])
@dispatch.add_dispatch_support
def draw_bounding_boxes(images, boxes, name=None, colors=None):
"""Draw bounding boxes on a batch of images.
Outputs a copy of `images` but draws on top of the pixels zero or more
bounding boxes specified by the locations in `boxes`. The coordinates of the
each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and the height of the underlying image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
Parts of the bounding box may fall outside the image.
Args:
images: A `Tensor`. Must be one of the following types: `float32`, `half`.
4-D with shape `[batch, height, width, depth]`. A batch of images.
boxes: A `Tensor` of type `float32`. 3-D with shape `[batch,
num_bounding_boxes, 4]` containing bounding boxes.
name: A name for the operation (optional).
colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle
through for the boxes.
Returns:
A `Tensor`. Has the same type as `images`.
Usage Example:
>>> # create an empty image
>>> img = tf.zeros([1, 3, 3, 3])
>>> # draw a box around the image
>>> box = np.array([0, 0, 1, 1])
>>> boxes = box.reshape([1, 1, 4])
>>> # alternate between red and blue
>>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
>>> tf.image.draw_bounding_boxes(img, boxes, colors)
<tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy=
array([[[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[0., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]]]], dtype=float32)>
"""
return draw_bounding_boxes_v2(images, boxes, colors, name)
@tf_export('image.generate_bounding_box_proposals')
@dispatch.add_dispatch_support
def generate_bounding_box_proposals(scores,
bbox_deltas,
image_info,
anchors,
nms_threshold=0.7,
pre_nms_topn=6000,
min_size=16,
post_nms_topn=300,
name=None):
"""Generate bounding box proposals from encoded bounding boxes.
Args:
scores: A 4-D float `Tensor` of shape
`[num_images, height, width, num_achors]` containing scores of
the boxes for given anchors, can be unsorted.
bbox_deltas: A 4-D float `Tensor` of shape
`[num_images, height, width, 4 x num_anchors]` encoding boxes
with respect to each anchor. Coordinates are given
in the form `[dy, dx, dh, dw]`.
image_info: A 2-D float `Tensor` of shape `[num_images, 5]`
containing image information Height, Width, Scale.
anchors: A 2-D float `Tensor` of shape `[num_anchors, 4]`
describing the anchor boxes.
Boxes are formatted in the form `[y1, x1, y2, x2]`.
nms_threshold: A scalar float `Tensor` for non-maximal-suppression
threshold. Defaults to 0.7.
pre_nms_topn: A scalar int `Tensor` for the number of
top scoring boxes to be used as input. Defaults to 6000.
min_size: A scalar float `Tensor`. Any box that has a smaller size
than min_size will be discarded. Defaults to 16.
post_nms_topn: An integer. Maximum number of rois in the output.
name: A name for this operation (optional).
Returns:
rois: Region of interest boxes sorted by their scores.
roi_probabilities: scores of the ROI boxes in the ROIs' `Tensor`.
"""
return gen_image_ops.generate_bounding_box_proposals(
scores=scores,
bbox_deltas=bbox_deltas,
image_info=image_info,
anchors=anchors,
nms_threshold=nms_threshold,
pre_nms_topn=pre_nms_topn,
min_size=min_size,
post_nms_topn=post_nms_topn,
name=name)
| ResizeMethod |
python | kamyu104__LeetCode-Solutions | Python/the-number-of-the-smallest-unoccupied-chair.py | {
"start": 48,
"end": 762
} | class ____(object):
def smallestChair(self, times, targetFriend):
"""
:type times: List[List[int]]
:type targetFriend: int
:rtype: int
"""
events = []
for i, (s, e) in enumerate(times):
events.append((s, True, i))
events.append((e, False, i))
events.sort()
lookup = {}
min_heap = []
for _, arrival, i in events:
if not arrival:
heapq.heappush(min_heap, lookup.pop(i))
continue
lookup[i] = heapq.heappop(min_heap) if min_heap else len(lookup)
if i == targetFriend:
break
return lookup[targetFriend]
| Solution |
python | openai__openai-python | src/openai/resources/responses/responses.py | {
"start": 155426,
"end": 156373
} | class ____:
def __init__(self, responses: Responses) -> None:
self._responses = responses
self.create = _legacy_response.to_raw_response_wrapper(
responses.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
responses.retrieve,
)
self.delete = _legacy_response.to_raw_response_wrapper(
responses.delete,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
responses.cancel,
)
self.parse = _legacy_response.to_raw_response_wrapper(
responses.parse,
)
@cached_property
def input_items(self) -> InputItemsWithRawResponse:
return InputItemsWithRawResponse(self._responses.input_items)
@cached_property
def input_tokens(self) -> InputTokensWithRawResponse:
return InputTokensWithRawResponse(self._responses.input_tokens)
| ResponsesWithRawResponse |
python | matplotlib__matplotlib | lib/matplotlib/contour.py | {
"start": 2158,
"end": 23794
} | class ____:
"""Mixin to provide labelling capability to `.ContourSet`."""
def clabel(self, levels=None, *,
fontsize=None, inline=True, inline_spacing=5, fmt=None,
colors=None, use_clabeltext=False, manual=False,
rightside_up=True, zorder=None):
"""
Label a contour plot.
Adds labels to line contours in this `.ContourSet` (which inherits from
this mixin class).
Parameters
----------
levels : array-like, optional
A list of level values, that should be labeled. The list must be
a subset of ``cs.levels``. If not given, all levels are labeled.
fontsize : str or float, default: :rc:`font.size`
Size in points or relative size e.g., 'small', 'x-large'.
See `.Text.set_size` for accepted string values.
colors : :mpltype:`color` or colors or None, default: None
The label colors:
- If *None*, the color of each label matches the color of
the corresponding contour.
- If one string color, e.g., *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color.
- If a tuple of colors (string, float, RGB, etc), different labels
will be plotted in different colors in the order specified.
inline : bool, default: True
If ``True`` the underlying contour is removed where the label is
placed.
inline_spacing : float, default: 5
Space in pixels to leave on each side of label when placing inline.
This spacing will be exact for labels at locations where the
contour is straight, less so for labels on curved contours.
fmt : `.Formatter` or str or callable or dict, optional
How the levels are formatted:
- If a `.Formatter`, it is used to format all levels at once, using
its `.Formatter.format_ticks` method.
- If a str, it is interpreted as a %-style format string.
- If a callable, it is called with one level at a time and should
return the corresponding label.
- If a dict, it should directly map levels to labels.
The default is to use a standard `.ScalarFormatter`.
manual : bool or iterable, default: False
If ``True``, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*manual* can also be an iterable object of (x, y) tuples.
Contour labels will be created as if mouse is clicked at each
(x, y) position.
rightside_up : bool, default: True
If ``True``, label rotations will always be plus
or minus 90 degrees from level.
use_clabeltext : bool, default: False
If ``True``, use `.Text.set_transform_rotates_text` to ensure that
label rotation is updated whenever the Axes aspect changes.
zorder : float or None, default: ``(2 + contour.get_zorder())``
zorder of the contour labels.
Returns
-------
labels
A list of `.Text` instances for the labels.
"""
# Based on the input arguments, clabel() adds a list of "label
# specific" attributes to the ContourSet object. These attributes are
# all of the form label* and names should be fairly self explanatory.
#
# Once these attributes are set, clabel passes control to the labels()
# method (for automatic label placement) or blocking_input_loop and
# _contour_labeler_event_handler (for manual label placement).
if fmt is None:
fmt = ticker.ScalarFormatter(useOffset=False)
fmt.create_dummy_axis()
self.labelFmt = fmt
self._use_clabeltext = use_clabeltext
self.labelManual = manual
self.rightside_up = rightside_up
self._clabel_zorder = 2 + self.get_zorder() if zorder is None else zorder
if levels is None:
levels = self.levels
indices = list(range(len(self.cvalues)))
else:
levlabs = list(levels)
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
raise ValueError(f"Specified levels {levlabs} don't match "
f"available levels {self.levels}")
self.labelLevelList = levels
self.labelIndiceList = indices
self._label_font_props = font_manager.FontProperties(size=fontsize)
if colors is None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
# handling of explicit colors for labels:
# make labelCValueList contain integers [0, 1, 2, ...] and a cmap
# so that cmap(i) == colors[i]
num_levels = len(self.labelLevelList)
colors = cbook._resize_sequence(mcolors.to_rgba_array(colors), num_levels)
self.labelMappable = cm.ScalarMappable(
cmap=mcolors.ListedColormap(colors), norm=mcolors.NoNorm())
self.labelCValueList = list(range(num_levels))
self.labelXYs = []
if np.iterable(manual):
for x, y in manual:
self.add_label_near(x, y, inline, inline_spacing)
elif manual:
print('Select label locations manually using first mouse button.')
print('End manual selection with second mouse button.')
if not inline:
print('Remove last label by clicking third mouse button.')
mpl._blocking_input.blocking_input_loop(
self.axes.get_figure(root=True),
["button_press_event", "key_press_event"],
timeout=-1, handler=functools.partial(
_contour_labeler_event_handler,
self, inline, inline_spacing))
else:
self.labels(inline, inline_spacing)
return cbook.silent_list('text.Text', self.labelTexts)
def print_label(self, linecontour, labelwidth):
"""Return whether a contour is long enough to hold a label."""
return (len(linecontour) > 10 * labelwidth
or (len(linecontour)
and (np.ptp(linecontour, axis=0) > 1.2 * labelwidth).any()))
def too_close(self, x, y, lw):
"""Return whether a label is already near this location."""
thresh = (1.2 * lw) ** 2
return any((x - loc[0]) ** 2 + (y - loc[1]) ** 2 < thresh
for loc in self.labelXYs)
def _get_nth_label_width(self, nth):
"""Return the width of the *nth* label, in pixels."""
fig = self.axes.get_figure(root=False)
renderer = fig.get_figure(root=True)._get_renderer()
return (Text(0, 0,
self.get_text(self.labelLevelList[nth], self.labelFmt),
figure=fig, fontproperties=self._label_font_props)
.get_window_extent(renderer).width)
def get_text(self, lev, fmt):
"""Get the text of the label."""
if isinstance(lev, str):
return lev
elif isinstance(fmt, dict):
return fmt.get(lev, '%1.3f')
elif callable(getattr(fmt, "format_ticks", None)):
return fmt.format_ticks([*self.labelLevelList, lev])[-1]
elif callable(fmt):
return fmt(lev)
else:
return fmt % lev
def locate_label(self, linecontour, labelwidth):
"""
Find good place to draw a label (relatively flat part of the contour).
"""
ctr_size = len(linecontour)
n_blocks = int(np.ceil(ctr_size / labelwidth)) if labelwidth > 1 else 1
block_size = ctr_size if n_blocks == 1 else int(labelwidth)
# Split contour into blocks of length ``block_size``, filling the last
# block by cycling the contour start (per `np.resize` semantics). (Due
# to cycling, the index returned is taken modulo ctr_size.)
xx = np.resize(linecontour[:, 0], (n_blocks, block_size))
yy = np.resize(linecontour[:, 1], (n_blocks, block_size))
yfirst = yy[:, :1]
ylast = yy[:, -1:]
xfirst = xx[:, :1]
xlast = xx[:, -1:]
s = (yfirst - yy) * (xlast - xfirst) - (xfirst - xx) * (ylast - yfirst)
l = np.hypot(xlast - xfirst, ylast - yfirst)
# Ignore warning that divide by zero throws, as this is a valid option
with np.errstate(divide='ignore', invalid='ignore'):
distances = (abs(s) / l).sum(axis=-1)
# Labels are drawn in the middle of the block (``hbsize``) where the
# contour is the closest (per ``distances``) to a straight line, but
# not `too_close()` to a preexisting label.
hbsize = block_size // 2
adist = np.argsort(distances)
# If all candidates are `too_close()`, go back to the straightest part
# (``adist[0]``).
for idx in np.append(adist, adist[0]):
x, y = xx[idx, hbsize], yy[idx, hbsize]
if not self.too_close(x, y, labelwidth):
break
return x, y, (idx * block_size + hbsize) % ctr_size
def _split_path_and_get_label_rotation(self, path, idx, screen_pos, lw, spacing=5):
"""
Prepare for insertion of a label at index *idx* of *path*.
Parameters
----------
path : Path
The path where the label will be inserted, in data space.
idx : int
The vertex index after which the label will be inserted.
screen_pos : (float, float)
The position where the label will be inserted, in screen space.
lw : float
The label width, in screen space.
spacing : float
Extra spacing around the label, in screen space.
Returns
-------
path : Path
The path, broken so that the label can be drawn over it.
angle : float
The rotation of the label.
Notes
-----
Both tasks are done together to avoid calculating path lengths multiple times,
which is relatively costly.
The method used here involves computing the path length along the contour in
pixel coordinates and then looking (label width / 2) away from central point to
determine rotation and then to break contour if desired. The extra spacing is
taken into account when breaking the path, but not when computing the angle.
"""
xys = path.vertices
codes = path.codes
# Insert a vertex at idx/pos (converting back to data space), if there isn't yet
# a vertex there. With infinite precision one could also always insert the
# extra vertex (it will get masked out by the label below anyways), but floating
# point inaccuracies (the point can have undergone a data->screen->data
# transform loop) can slightly shift the point and e.g. shift the angle computed
# below from exactly zero to nonzero.
pos = self.get_transform().inverted().transform(screen_pos)
if not np.allclose(pos, xys[idx]):
xys = np.insert(xys, idx, pos, axis=0)
codes = np.insert(codes, idx, Path.LINETO)
# Find the connected component where the label will be inserted. Note that a
# path always starts with a MOVETO, and we consider there's an implicit
# MOVETO (closing the last path) at the end.
movetos = (codes == Path.MOVETO).nonzero()[0]
start = movetos[movetos <= idx][-1]
try:
stop = movetos[movetos > idx][0]
except IndexError:
stop = len(codes)
# Restrict ourselves to the connected component.
cc_xys = xys[start:stop]
idx -= start
# If the path is closed, rotate it s.t. it starts at the label.
is_closed_path = codes[stop - 1] == Path.CLOSEPOLY
if is_closed_path:
cc_xys = np.concatenate([cc_xys[idx:-1], cc_xys[:idx+1]])
idx = 0
# Like np.interp, but additionally vectorized over fp.
def interp_vec(x, xp, fp): return [np.interp(x, xp, col) for col in fp.T]
# Use cumulative path lengths ("cpl") as curvilinear coordinate along contour.
screen_xys = self.get_transform().transform(cc_xys)
path_cpls = np.insert(
np.cumsum(np.hypot(*np.diff(screen_xys, axis=0).T)), 0, 0)
path_cpls -= path_cpls[idx]
# Use linear interpolation to get end coordinates of label.
target_cpls = np.array([-lw/2, lw/2])
if is_closed_path: # For closed paths, target from the other end.
target_cpls[0] += (path_cpls[-1] - path_cpls[0])
(sx0, sx1), (sy0, sy1) = interp_vec(target_cpls, path_cpls, screen_xys)
angle = np.rad2deg(np.arctan2(sy1 - sy0, sx1 - sx0)) # Screen space.
if self.rightside_up: # Fix angle so text is never upside-down
angle = (angle + 90) % 180 - 90
target_cpls += [-spacing, +spacing] # Expand range by spacing.
# Get indices near points of interest; use -1 as out of bounds marker.
i0, i1 = np.interp(target_cpls, path_cpls, range(len(path_cpls)),
left=-1, right=-1)
i0 = math.floor(i0)
i1 = math.ceil(i1)
(x0, x1), (y0, y1) = interp_vec(target_cpls, path_cpls, cc_xys)
# Actually break contours (dropping zero-len parts).
new_xy_blocks = []
new_code_blocks = []
if is_closed_path:
if i0 != -1 and i1 != -1:
# This is probably wrong in the case that the entire contour would
# be discarded, but ensures that a valid path is returned and is
# consistent with behavior of mpl <3.8
points = cc_xys[i1:i0+1]
new_xy_blocks.extend([[(x1, y1)], points, [(x0, y0)]])
nlines = len(points) + 1
new_code_blocks.extend([[Path.MOVETO], [Path.LINETO] * nlines])
else:
if i0 != -1:
new_xy_blocks.extend([cc_xys[:i0 + 1], [(x0, y0)]])
new_code_blocks.extend([[Path.MOVETO], [Path.LINETO] * (i0 + 1)])
if i1 != -1:
new_xy_blocks.extend([[(x1, y1)], cc_xys[i1:]])
new_code_blocks.extend([
[Path.MOVETO], [Path.LINETO] * (len(cc_xys) - i1)])
# Back to the full path.
xys = np.concatenate([xys[:start], *new_xy_blocks, xys[stop:]])
codes = np.concatenate([codes[:start], *new_code_blocks, codes[stop:]])
return angle, Path(xys, codes)
def add_label(self, x, y, rotation, lev, cvalue):
"""Add a contour label, respecting whether *use_clabeltext* was set."""
data_x, data_y = self.axes.transData.inverted().transform((x, y))
t = Text(
data_x, data_y,
text=self.get_text(lev, self.labelFmt),
rotation=rotation,
horizontalalignment='center', verticalalignment='center',
zorder=self._clabel_zorder,
color=self.labelMappable.to_rgba(cvalue, alpha=self.get_alpha()),
fontproperties=self._label_font_props,
clip_box=self.axes.bbox)
if self._use_clabeltext:
data_rotation, = self.axes.transData.inverted().transform_angles(
[rotation], [[x, y]])
t.set(rotation=data_rotation, transform_rotates_text=True)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x, y))
# Add label to plot here - useful for manual mode label selection
self.axes.add_artist(t)
def add_label_near(self, x, y, inline=True, inline_spacing=5,
transform=None):
"""
Add a label near the point ``(x, y)``.
Parameters
----------
x, y : float
The approximate location of the label.
inline : bool, default: True
If *True* remove the segment of the contour beneath the label.
inline_spacing : int, default: 5
Space in pixels to leave on each side of label when placing
inline. This spacing will be exact for labels at locations where
the contour is straight, less so for labels on curved contours.
transform : `.Transform` or `False`, default: ``self.axes.transData``
A transform applied to ``(x, y)`` before labeling. The default
causes ``(x, y)`` to be interpreted as data coordinates. `False`
is a synonym for `.IdentityTransform`; i.e. ``(x, y)`` should be
interpreted as display coordinates.
"""
if transform is None:
transform = self.axes.transData
if transform:
x, y = transform.transform((x, y))
idx_level_min, idx_vtx_min, proj = self._find_nearest_contour(
(x, y), self.labelIndiceList)
path = self._paths[idx_level_min]
level = self.labelIndiceList.index(idx_level_min)
label_width = self._get_nth_label_width(level)
rotation, path = self._split_path_and_get_label_rotation(
path, idx_vtx_min, proj, label_width, inline_spacing)
self.add_label(*proj, rotation, self.labelLevelList[idx_level_min],
self.labelCValueList[idx_level_min])
if inline:
self._paths[idx_level_min] = path
def pop_label(self, index=-1):
"""Defaults to removing last label, but any index can be supplied"""
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
for idx, (icon, lev, cvalue) in enumerate(zip(
self.labelIndiceList,
self.labelLevelList,
self.labelCValueList,
)):
trans = self.get_transform()
label_width = self._get_nth_label_width(idx)
additions = []
for subpath in self._paths[icon]._iter_connected_components():
screen_xys = trans.transform(subpath.vertices)
# Check if long enough for a label
if self.print_label(screen_xys, label_width):
x, y, idx = self.locate_label(screen_xys, label_width)
rotation, path = self._split_path_and_get_label_rotation(
subpath, idx, (x, y),
label_width, inline_spacing)
self.add_label(x, y, rotation, lev, cvalue) # Really add label.
if inline: # If inline, add new contours
additions.append(path)
else: # If not adding label, keep old path
additions.append(subpath)
# After looping over all segments on a contour, replace old path by new one
# if inlining.
if inline:
self._paths[icon] = Path.make_compound_path(*additions)
def remove(self):
super().remove()
for text in self.labelTexts:
text.remove()
def _find_closest_point_on_path(xys, p):
"""
Parameters
----------
xys : (N, 2) array-like
Coordinates of vertices.
p : (float, float)
Coordinates of point.
Returns
-------
d2min : float
Minimum square distance of *p* to *xys*.
proj : (float, float)
Projection of *p* onto *xys*.
imin : (int, int)
Consecutive indices of vertices of segment in *xys* where *proj* is.
Segments are considered as including their end-points; i.e. if the
closest point on the path is a node in *xys* with index *i*, this
returns ``(i-1, i)``. For the special case where *xys* is a single
point, this returns ``(0, 0)``.
"""
if len(xys) == 1:
return (((p - xys[0]) ** 2).sum(), xys[0], (0, 0))
dxys = xys[1:] - xys[:-1] # Individual segment vectors.
norms = (dxys ** 2).sum(axis=1)
norms[norms == 0] = 1 # For zero-length segment, replace 0/0 by 0/1.
rel_projs = np.clip( # Project onto each segment in relative 0-1 coords.
((p - xys[:-1]) * dxys).sum(axis=1) / norms,
0, 1)[:, None]
projs = xys[:-1] + rel_projs * dxys # Projs. onto each segment, in (x, y).
d2s = ((projs - p) ** 2).sum(axis=1) # Squared distances.
imin = np.argmin(d2s)
return (d2s[imin], projs[imin], (imin, imin+1))
_docstring.interpd.register(contour_set_attributes=r"""
Attributes
----------
levels : array
The values of the contour levels.
layers : array
Same as levels for line contours; half-way between
levels for filled contours. See ``ContourSet._process_colors``.
""")
@_docstring.interpd
| ContourLabeler |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/owners.py | {
"start": 286,
"end": 443
} | class ____(graphene.ObjectType):
class Meta:
name = "TeamDefinitionOwner"
team = graphene.NonNull(graphene.String)
| GrapheneTeamDefinitionOwner |
python | kamyu104__LeetCode-Solutions | Python/partition-array-for-maximum-xor-and-and.py | {
"start": 60,
"end": 1532
} | class ____(object):
def maximizeXorAndXor(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def max_xor_subset(nums): # Time: O(nlogr)
base = [0]*l
for x in nums: # gaussian elimination over GF(2)
for i in reversed(xrange(len(base))):
if not x&(1<<i):
continue
if base[i] == 0:
base[i] = x
break
x ^= base[i]
max_xor = 0
for b in reversed(base): # greedy
if (max_xor^b) > max_xor:
max_xor ^= b
return max_xor
l = max(nums).bit_length()
n = len(nums)
and_arr = [0]*(1<<n)
xor_arr = [0]*(1<<n)
for mask in xrange(1, 1<<n):
lb = mask&-mask
i = lb.bit_length()-1
and_arr[mask] = and_arr[mask^lb]&nums[i] if mask^lb else nums[i]
xor_arr[mask] = xor_arr[mask^lb]^nums[i]
result = 0
full_mask = (1<<n)-1
for mask in xrange(1, 1<<n):
total_and = and_arr[mask]
total_xor = xor_arr[full_mask^mask]
max_xor = max_xor_subset(((nums[i]&~total_xor) for i in xrange(n) if not (mask&(1<<i))))
result = max(result, total_and+total_xor+2*max_xor)
return result
# Time: O(nlogr * 2^n)
# Space: O(1)
# bitmasks, greedy
| Solution |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 49906,
"end": 52776
} | class ____(Operation):
def __init__(
self, num_classes, axis=-1, dtype=None, sparse=False, *, name=None
):
super().__init__(name=name)
self.num_classes = num_classes
self.axis = axis
self.dtype = backend.standardize_dtype(dtype)
self.sparse = sparse
def call(self, x):
return backend.nn.one_hot(
x,
self.num_classes,
axis=self.axis,
dtype=self.dtype,
sparse=self.sparse,
)
def compute_output_spec(self, x):
x_shape = list(getattr(x, "shape", []))
if self.axis == -1:
x_shape.append(self.num_classes)
elif self.axis >= 0 and self.axis < len(x_shape):
x_shape.insert(self.axis, self.num_classes)
else:
raise ValueError(
f"axis must be -1 or between [0, {len(x.shape)}), but "
f"received {self.axis}."
)
return KerasTensor(x_shape, dtype=self.dtype, sparse=self.sparse)
@keras_export(["keras.ops.one_hot", "keras.ops.nn.one_hot"])
def one_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
"""Converts integer tensor `x` into a one-hot tensor.
The one-hot encoding is a representation where each integer value is
converted into a binary vector with a length equal to `num_classes`,
and the index corresponding to the integer value is marked as 1, while
all other indices are marked as 0.
Args:
x: Integer tensor to be encoded. The shape can be
arbitrary, but the dtype should be integer.
num_classes: Number of classes for the one-hot encoding.
axis: Axis along which the encoding is performed.
`-1` represents the last axis. Defaults to `-1`.
dtype: (Optional) Data type of the output tensor. If not
provided, it defaults to the default data type of the backend.
sparse: Whether to return a sparse tensor; for backends that support
sparse tensors.
Returns:
Integer tensor: One-hot encoded tensor with the same shape as `x`
except for the specified `axis` dimension, which will have
a length of `num_classes`. The dtype of the output tensor
is determined by `dtype` or the default data type of the backend.
Example:
>>> x = keras.ops.convert_to_tensor([1, 3, 2, 0])
>>> one_hot(x, num_classes=4)
array([[0. 1. 0. 0.]
[0. 0. 0. 1.]
[0. 0. 1. 0.]
[1. 0. 0. 0.]], shape=(4, 4), dtype=float32)
"""
if any_symbolic_tensors((x,)):
return OneHot(
num_classes, axis=axis, dtype=dtype, sparse=sparse
).symbolic_call(x)
return backend.nn.one_hot(
x,
num_classes,
axis=axis,
dtype=dtype or backend.floatx(),
sparse=sparse,
)
| OneHot |
python | openai__openai-python | src/openai/types/responses/response_input_file_content.py | {
"start": 229,
"end": 743
} | class ____(BaseModel):
type: Literal["input_file"]
"""The type of the input item. Always `input_file`."""
file_data: Optional[str] = None
"""The base64-encoded data of the file to be sent to the model."""
file_id: Optional[str] = None
"""The ID of the file to be sent to the model."""
file_url: Optional[str] = None
"""The URL of the file to be sent to the model."""
filename: Optional[str] = None
"""The name of the file to be sent to the model."""
| ResponseInputFileContent |
python | pytorch__pytorch | torch/_inductor/codegen/rocm/ck_universal_gemm_template.py | {
"start": 1864,
"end": 39672
} | class ____(CKTemplate):
# the JINJA template for rendering CK Universal GEMMs
gemm_template = r"""{{version_comment}}
{{headers}}
{{globals}}
{{instance_definition}}
extern "C" {
PT_EXPORT {{kernel_definition}} {
auto gemm = {{instance_type}} {};
auto invoker = gemm.MakeInvoker();
{% if is_batched %}
auto argument = gemm.MakeArgument(
reinterpret_cast<const {{a_element_dtype}}*>(X),
reinterpret_cast<const {{b_element_dtype}}*>(W),
std::array<const void*, {{ds_size}}>{ {{ds_names}} },
reinterpret_cast<{{c_element_dtype}}*>(Y),
M,
N,
K,
B,
LDA,
LDB,
std::array<ck::index_t, {{ds_size}}>{ {{ds_strides}} },
LDC,
M * K, // batch_stride_A
N * K, // batch_stride_B
std::array<ck::index_t, {{ds_size}}>{ {{ds_batch_strides}} },
M * N, // batch_stride_C
{{a_elementwise_op}},
{{b_elementwise_op}},
{{epilogue}} // c_elementwise_op
);
{% else %}
auto argument = gemm.MakeArgument(
reinterpret_cast<const {{a_element_dtype}}*>(X),
reinterpret_cast<const {{b_element_dtype}}*>(W),
std::array<const void*, {{ds_size}}>{ {{ds_names}} },
reinterpret_cast<{{c_element_dtype}}*>(Y),
M,
N,
K,
LDA,
LDB,
std::array<ck::index_t, {{ds_size}}>{ {{ds_strides}} },
LDC,
kBatch, // kBatch
{{a_elementwise_op}},
{{b_elementwise_op}},
{{epilogue}} // c_elementwise_op
);
{% endif %}
if (!gemm.IsSupportedArgument(argument)) {
// we do our best to statically avoid this case in `filter_op`
std::cerr << "invalid argument for gemm instance " << gemm.GetTypeString() << std::endl;
argument.Print();
return -23;
}
if (workspace_size) {
*workspace_size = gemm.GetWorkSpaceSize(&argument);
return 0;
}
// run the kernel
#ifdef GENERATE_CK_STANDALONE_RUNNER
const auto stream_config = StreamConfig{
stream,
/* time kernel */ 1,
/* log level */ 1,
/* n_cold_iter */ 100,
/* n_hot_iter */ 100,
/* flush_l2_cache */ 1,
/* rotate_count */ 5};
#else
const auto stream_config = StreamConfig{stream, /* time kernel */ false, /* log level */ 0};
#endif
const float elapsed_time = invoker.Run(argument, stream_config);
#ifdef GENERATE_CK_STANDALONE_RUNNER
std::cout << "elapsed time: " << elapsed_time << " ms" << std::endl;
#else
(void)elapsed_time;
#endif
return 0;
} // kernel definition
} // extern C
"""
standalone_runner_template = r"""
#ifdef GENERATE_CK_STANDALONE_RUNNER
// standalone runner for the generated CK GEMM kernel
{{inline_utils}}
extern "C" {
int run_main(int argc, char** argv) {
{% if is_batched %}
const int32_t B = {{B}};
{% endif %}
const int32_t M = {{M}};
const int32_t N = {{N}};
const int32_t K = {{K}};
const int32_t LDA = {{LDA}};
const int32_t LDB = {{LDB}};
const int32_t LDC = {{LDC}};
const int32_t LDD = {{LDD}};
const int32_t kBatch = {{kBatch}};
using AElementType = {{a_ck_dtype}};
using BElementType = {{b_ck_dtype}};
using CElementType = {{c_ck_dtype}};
{% if has_bias %}
using BiasElementType = {{bias_ck_dtype}};
{% endif %}
{% if has_scale %}
using ScaleAElementType = {{scale_a_ck_dtype}};
using ScaleBElementType = {{scale_b_ck_dtype}};
{% endif %}
using AArgType = {{a_torch_dtype}};
using BArgType = {{b_torch_dtype}};
using CArgType = {{c_torch_dtype}};
{% if has_bias %}
using BiasArgType = {{bias_torch_dtype}};
{% endif %}
{% if has_scale %}
using ScaleAArgType = {{scale_a_torch_dtype}};
using ScaleBArgType = {{scale_b_torch_dtype}};
{% endif %}
using ALayout = {{a_layout}};
using BLayout = {{b_layout}};
using CLayout = {{c_layout}};
{% if has_bias %}
using BiasLayout = {{bias_layout}};
{% endif %}
{% if is_batched %}
using strides_t = std::array<int32_t, 3>;
auto get_strides = [](int32_t batch_stride, int32_t leading_dimension, auto layout) constexpr -> strides_t {
if constexpr (std::is_same_v<decltype(layout), Row>) {
return {batch_stride, leading_dimension, 1};
}
return {batch_stride, 1, leading_dimension};
};
auto a_size = strides_t{B, M, K};
auto a_stride = get_strides(M * K, LDA, ALayout{});
auto b_size = strides_t{B, N, K};
auto b_stride = get_strides(N * K, LDB, BLayout{});
auto c_size = strides_t{B, M, N};
auto c_stride = get_strides(M * N, LDC, CLayout{});
{% else %}
using strides_t = std::array<int32_t, 2>;
auto get_strides = [](int32_t leading_dimension, auto layout) constexpr -> strides_t {
if constexpr (std::is_same_v<decltype(layout), Row>) {
return {leading_dimension, 1};
}
return {1, leading_dimension};
};
auto a_size = strides_t{M, K};
auto a_stride = get_strides(LDA, ALayout{});
auto b_size = strides_t{N, K};
auto b_stride = get_strides(LDB, BLayout{});
auto c_size = strides_t{M, N};
auto c_stride = get_strides(LDC, CLayout{});
{% endif %}
Tensor<AElementType> a_m_k ( HostTensorDescriptor ( a_size, a_stride ) );
Tensor<BElementType> b_k_n ( HostTensorDescriptor ( b_size, b_stride ) );
{% if has_bias %}
Tensor<BiasElementType> d_m_n ( HostTensorDescriptor ( c_size, get_strides(LDD, BiasLayout{}) ) );
{% endif %}
{% if has_scale %}
// NB: these are hardcoded
Tensor<ScaleAElementType> s_a_m_n ( HostTensorDescriptor ( strides_t{M, N}, get_strides(0, Row{}) ));
Tensor<ScaleAElementType> s_b_m_n ( HostTensorDescriptor ( strides_t{M, N}, get_strides(0, Col{}) ));
{% endif %}
Tensor<CElementType> c_m_n_host ( HostTensorDescriptor ( c_size, c_stride ) );
Tensor<CElementType> c_m_n_device ( HostTensorDescriptor ( c_size, c_stride ) );
a_m_k.GenerateTensorValue(GeneratorTensor_2<AElementType>());
b_k_n.GenerateTensorValue(GeneratorTensor_2<BElementType>());
{% if has_bias %}
d_m_n.GenerateTensorValue(GeneratorTensor_2<BiasElementType>());
{% endif %}
{% if has_scale %}
s_a_m_n.GenerateTensorValue(GeneratorTensor_2<ScaleAElementType>());
s_b_m_n.GenerateTensorValue(GeneratorTensor_2<ScaleBElementType>());
{% endif %}
DeviceMem a_m_k_device_buf(sizeof(AElementType) * a_m_k.mDesc.GetElementSpaceSize());
DeviceMem b_k_n_device_buf(sizeof(BElementType) * b_k_n.mDesc.GetElementSpaceSize());
{% if has_bias %}
DeviceMem d_m_n_device_buf(sizeof(BiasElementType) * d_m_n.mDesc.GetElementSpaceSize());
{% endif %}
{% if has_scale %}
DeviceMem s_a_m_n_device_buf(sizeof(ScaleAElementType) * s_a_m_n.mDesc.GetElementSpaceSize());
DeviceMem s_b_m_n_device_buf(sizeof(ScaleBElementType) * s_b_m_n.mDesc.GetElementSpaceSize());
{% endif %}
DeviceMem c_m_n_device_buf(sizeof(CElementType) * c_m_n_device.mDesc.GetElementSpaceSize());
a_m_k_device_buf.ToDevice(a_m_k.mData.data());
b_k_n_device_buf.ToDevice(b_k_n.mData.data());
{% if has_bias %}
d_m_n_device_buf.ToDevice(d_m_n.mData.data());
{% endif %}
{% if has_scale %}
s_a_m_n_device_buf.ToDevice(s_a_m_n.mData.data());
s_b_m_n_device_buf.ToDevice(s_b_m_n.mData.data());
{% endif %}
{{kernel_name}}(
static_cast<const AArgType*>(a_m_k_device_buf.GetDeviceBuffer()),
static_cast<const BArgType*>(b_k_n_device_buf.GetDeviceBuffer()),
{% if has_scale %}
static_cast<const ScaleAArgType*>(s_a_m_n_device_buf.GetDeviceBuffer()),
static_cast<const ScaleBArgType*>(s_b_m_n_device_buf.GetDeviceBuffer()),
{% endif %}
{% if has_bias %}
static_cast<const BiasArgType*>(d_m_n_device_buf.GetDeviceBuffer()),
{% endif %}
static_cast<CArgType*>(c_m_n_device_buf.GetDeviceBuffer()),
{% if is_batched %}
B,
{% endif %}
M,
N,
K,
LDA,
LDB,
LDC,
LDD,
nullptr, // workspace_size
nullptr, // workspace
nullptr); // stream
hip_check_error(hipDeviceSynchronize());
return 0;
} // run_main
} // extern C
int main(int argc, char** argv) {
return run_main(argc, argv);
}
// compile with: {{compile_cmd}}
#endif // GENERATE_CK_STANDALONE_RUNNER
"""
def __init__(
self,
input_nodes: list[Buffer],
layout: Layout,
alpha: float,
beta: float,
input_reorder: Optional[list[int]] = None,
) -> None:
is_batched = len(layout.size) == 3
name = "ck_batched_gemm_template" if is_batched else "ck_gemm_template"
super().__init__(
name=name,
input_nodes=input_nodes,
layout=layout,
input_reorder=input_reorder,
)
self.alpha = alpha
self.beta = beta
self.is_batched = is_batched
def header(self) -> IndentedBuffer:
res = super().header()
if self.is_batched:
res.splice(
"""
// CK GEMM header(s)
#include "ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_xdl_cshuffle_v3.hpp"
"""
)
else:
res.splice(
"""
// CK GEMM header(s)
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle_v3.hpp"
"""
)
return res
def globals(self) -> IndentedBuffer:
res = super().globals()
res.splice(
"""
// CK GEMM globals
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
using BlockGemmPipelineScheduler = ck::BlockGemmPipelineScheduler;
using GemmSpecialization = ck::tensor_operation::device::GemmSpecialization;
using BlockGemmPipelineVersion = ck::BlockGemmPipelineVersion;
struct MultiplyMultiplyAdd {
template <typename E, typename C, typename D0, typename D1, typename D2>
__host__ __device__ constexpr void
operator()(E& e, const C& c, const D0& d0, const D1& d1, const D2& d2) const {
e = ck::type_convert<E>(
ck::type_convert<float>(c)
* ck::type_convert<float>(d0)
* ck::type_convert<float>(d1)
+ ck::type_convert<float>(d2)
);
}
};
"""
)
return res
def inline_utils(self):
res = IndentedBuffer()
res.splice(
"""
#include "host_tensor.cpp"
#include "device_memory.cpp"
"""
)
return res
def _has_padding(self, dimension, gemm_specialization):
# Get the relevant padding map for the given dimension
dimension_padding = padding_lookup.get(dimension, {})
# Check if the specialization is in the dimension's padding map
return dimension_padding.get(gemm_specialization, False)
def filter_op(self, op_info: InductorROCmOp):
"""
Determines whether a given op definition is suitable for the current
input / output of the operation that this template implements.
Filter is based on inputs' dtype, layout and statically inferred size.
Returns None if the op is not suitable, otherwise returns the op to be used.
"""
op, kBatch = op_info.op, op_info.kBatch
metas = [T.get_layout() for T in [*self.input_nodes, self.output_node]]
X_meta = metas[0]
W_meta = metas[1]
Y_meta = metas[-1]
# disable the instance if dtypes don't match
if op.a_element_dtype != self._TORCH_DTYPE_TO_CK[X_meta.dtype]:
return None
if op.b_element_dtype != self._TORCH_DTYPE_TO_CK[W_meta.dtype]:
return None
if op.c_element_dtype != self._TORCH_DTYPE_TO_CK[Y_meta.dtype]:
return None
# disable the instance if layouts don't match
if op.a_layout != torch_layout_to_ck_layout(X_meta):
return None
if op.b_layout != torch_layout_to_ck_layout(W_meta):
return None
if op.c_layout != torch_layout_to_ck_layout(Y_meta):
return None
# try to avoid launching the instance with invalid problem size
# see GridwiseGemm_xdl_cshuffle_v3::CheckValidity
M = X_meta.size[-2]
K = X_meta.size[-1]
N = W_meta.size[-1]
if is_static_int(M):
if not self._has_padding("M", op.gemm_specialization):
if M % op.m_per_block != 0:
return None
if is_static_int(N):
if not self._has_padding("N", op.gemm_specialization):
if N % op.n_per_block != 0:
return None
if is_static_int(K):
if not self._has_padding("K", op.gemm_specialization):
if K % op.k_per_block != 0:
return None
K_t = kBatch * op.k_per_block
if K % K_t != 0:
return None
else:
# need another kBatch check here
lcm = abs(op.a_k1 * op.b_k1) // math.gcd(op.a_k1, op.b_k1)
K_t = kBatch * lcm
k_read_pad_splited = math.ceil(K / K_t) * lcm
if (k_read_pad_splited * (kBatch - 1)) >= K:
return None
a_contig_size = (
K if op.a_layout == "Row" else M if op.a_layout == "Col" else None
)
if (
is_static_int(a_contig_size)
and a_contig_size % op.a_block_transfer_src_scalar_per_vector != 0
):
return None
b_contig_size = (
N if op.b_layout == "Row" else K if op.b_layout == "Col" else None
)
if (
is_static_int(b_contig_size)
and b_contig_size % op.b_block_transfer_src_scalar_per_vector != 0
):
return None
c_contig_size = (
N if op.c_layout == "Row" else M if op.c_layout == "Col" else None
)
c_shuffle_block_transfer_scalar_per_vector_n_per_block = (
op.c_shuffle_block_transfer_scalar_per_vector_n_per_block[0]
if isinstance(
op.c_shuffle_block_transfer_scalar_per_vector_n_per_block, tuple
)
else op.c_shuffle_block_transfer_scalar_per_vector_n_per_block
)
if (
is_static_int(c_contig_size)
and c_contig_size % c_shuffle_block_transfer_scalar_per_vector_n_per_block
!= 0
):
return None
if not self._check_num_k_loops(op, kBatch):
return None
# TBD disable instances with invalid number of pipeline prefetch stages
# It will avoid compiling a small percentage of unrunnable instances which fail the gemm argument check
return op
def _check_num_k_loops(self, op, kBatch):
# Additional splitK scenario check
metas = [T.get_layout() for T in [*self.input_nodes]]
X_meta = metas[0]
W_meta = metas[1]
K = X_meta.size[-1]
if kBatch > 1:
if op.block_gemm_pipeline_version != "BlockGemmPipelineVersion::v1":
try:
prefetch_stages = self._prefetch_stages(
op,
torch.empty((), dtype=X_meta.dtype).element_size(),
torch.empty((), dtype=W_meta.dtype).element_size(),
torch.cuda.get_device_properties(X_meta.device).warp_size,
)
except Exception as e:
log.debug( # noqa: G200
"Failed to prefetch_stages for %s with exception %s", op.name, e
)
# be conservative here and disable the op
return False
K_t = op.k_per_block * kBatch
ak0 = (K + K_t - 1) // K_t * (op.k_per_block // op.a_k1)
num_k_loop = ak0 // (op.k_per_block // op.a_k1)
if num_k_loop <= prefetch_stages:
log.debug(
"Op %s is not compatible due to invalid number of pipeline prefetch stages. "
"Parameters: kBatch=%s, block_gemm_pipeline_version=%s, prefetch_stages=%s, num_k_loop=%s",
op.name(),
kBatch,
op.block_gemm_pipeline_version,
prefetch_stages,
num_k_loop,
)
return False
return True
# small helper to figure out the prefetch stages on AMD
def _prefetch_stages(self, op, a_dtype_size, b_dtype_size, warp_size: int = 64):
version_str = op.block_gemm_pipeline_version.split("::")[-1]
try:
version = int(version_str[1:]) # Assuming the format is always 'vX'
except ValueError as e:
raise ValueError(f"Invalid version string: {version_str}") from e
if version not in [1, 2, 3, 4, 5]:
raise ValueError(
f"unknown prefetch stages for {op.block_gemm_pipeline_version}"
)
# Define the mapping of versions to stages
version_to_stages = {1: 1, 3: 2, 4: 4, 5: 3}
# Get the stages for the given version
stages = version_to_stages.get(version)
if stages is None:
# This means we're at stage 2, and this requires computation
# See github.com/ROCm/composable_kernel/blob/d6a4605/include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_v2.hpp#L143 # noqa: B950
wgp_per_cu = max(4 * warp_size // op.block_size, 1)
full_mem_band_prefetch_stages = math.ceil(
32768
/ wgp_per_cu
/ (
(op.m_per_block * a_dtype_size + op.n_per_block * b_dtype_size)
* op.k_per_block
)
)
stages = min(max(full_mem_band_prefetch_stages, 2), 8)
return stages
def emit_ck_instance(self, op: "CKGemmOperation"):
# The Jinja template for generating a C++ type alias *definition* for a Universal GEMM instance
struct_name = (
"DeviceBatchedGemmMultiD_Xdl_CShuffle_V3"
if self.is_batched
else "DeviceGemmMultiD_Xdl_CShuffle_V3"
)
template_definition = r"""
// Gemm operator {{operation_name}}
using Operation_{{operation_name}} =
ck::tensor_operation::device::{{struct_name}}<
{{template_params}}>;
"""
# The Jinja template for generating a C++ type alias *usage* for a Universal GEMM instance
template_type = r"""
Operation_{{operation_name}}
"""
template_params = []
for field_name, field_value in op.dict_items():
if isinstance(field_value, tuple):
tuple_elements = ", ".join(map(str, iter(field_value)))
if "ds" in field_name: # element type and layout for bias
arg = f"/* {field_name} */ Tuple<{tuple_elements}>"
else: # tile shape
arg = f"/* {field_name} */ S<{tuple_elements}>"
# pyrefly: ignore [bad-argument-type]
template_params.append(arg)
else:
if field_value is not None:
# pyrefly: ignore [bad-argument-type]
template_params.append(f"/* {field_name} */ {field_value}")
operation_name = op.name().replace("(", "").replace(",", "").replace(")", "")
return self._template_from_string(template_definition).render(
operation_name=operation_name,
template_params=(",\n" + 12 * " ").join(template_params),
struct_name=struct_name,
), self._template_from_string(template_type).render(
operation_name=operation_name
)
def render( # type: ignore[override]
self,
kernel: ROCmTemplateKernel,
op: "CKGemmOperation",
**kwargs,
) -> str:
"""
The primary entry point for the code rendering process used in this template.
"""
epilogue_nodes = kwargs.get("epilogue_nodes")
assert epilogue_nodes is None or 0 == len(epilogue_nodes)
template_buffer_node = kwargs.get("template_buffer_node")
if template_buffer_node is not None:
self.output_node = template_buffer_node
# input nodes:
# * X, W for matmul
# * X, W, Bias for addmm
# * X, W, inv_scale_x, inv_scale_w for scaled_mm
# * X, W, inv_scale_x, inv_scale_w, Bias for scaled_mm with bias
X, W = self.input_nodes[0], self.input_nodes[1]
Y = self.output_node
Bias = (
self.input_nodes[2]
if 3 == len(self.input_nodes)
else self.input_nodes[4]
if 5 == len(self.input_nodes)
else None
)
has_bias = Bias is not None
has_scale = len(self.input_nodes) in (4, 5)
op = copy.deepcopy(op)
# This parameter is converted into tuple because of change
# from DeviceGemm_Xdl_CShuffleV3 to DeviceGemmMultiD_Xdl_CShuffle_V3.
# The first tuple element corresponds to matmul result...
if not isinstance(
op.c_shuffle_block_transfer_scalar_per_vector_n_per_block, tuple
):
op.c_shuffle_block_transfer_scalar_per_vector_n_per_block = (
op.c_shuffle_block_transfer_scalar_per_vector_n_per_block,
)
if has_scale:
scale_x = self.input_nodes[2]
scale_w = self.input_nodes[3]
if 1 == scale_x.get_numel() and 1 == scale_w.get_numel():
# tensorwise scale for both X, W
if has_bias:
op.c_elementwise_op = "ScaleAdd"
else:
op.c_elementwise_op = "Scale"
else:
# rowwise scale for both X, W
if has_bias:
op.c_elementwise_op = "MultiplyMultiplyAdd"
else:
op.c_elementwise_op = "MultiplyMultiply"
op.c_shuffle_dtype = "F32"
op.ds_layouts = (
torch_layout_to_ck_layout(scale_x.get_layout()),
torch_layout_to_ck_layout(scale_w.get_layout()),
)
op.ds_element_dtypes = (
self._TORCH_DTYPE_TO_CK[scale_x.get_layout().dtype],
self._TORCH_DTYPE_TO_CK[scale_w.get_layout().dtype],
)
op.c_shuffle_block_transfer_scalar_per_vector_n_per_block += (1, 1)
else:
scale_x = None
scale_w = None
bias_dtype = ""
if Bias is not None:
bias_layout = torch_layout_to_ck_layout(Bias.get_layout())
bias_dtype = self._TORCH_DTYPE_TO_CK[Bias.get_layout().dtype]
op.ds_layouts += (bias_layout,)
op.ds_element_dtypes += (bias_dtype,)
if not has_scale:
op.c_elementwise_op = "Bilinear"
# c_shuffle_dtype is also used for adding bias to matmul result
# before converting down to the result dtype
op.c_shuffle_dtype = op.acc_dtype
# this parameter needs to be set accordingly to bias stride for correct accumulation
if bias_layout == "Row":
# bias has (N, ) shape
bias_shuffle_block_transfer_scalar_per_vector_n_per_block = (
op.c_shuffle_block_transfer_scalar_per_vector_n_per_block
)
elif bias_layout == "Col":
# bias has (M, 1) shape
bias_shuffle_block_transfer_scalar_per_vector_n_per_block = (1,)
else:
raise AssertionError(
"Bias layout is neither row-major nor column-major"
)
# ...and the second tuple element corresponds to the bias
op.c_shuffle_block_transfer_scalar_per_vector_n_per_block += (
bias_shuffle_block_transfer_scalar_per_vector_n_per_block
)
instance_definition, instance_type = self.emit_ck_instance(op)
version_comment = rf"""/**
* Generated code for CK inductor backend
* See {type(self).__module__}.{type(self).__qualname__}
*
* Template instance {op}
*
* {torch.__version__=}
* torch.version.git_version={getattr(torch.version, "git_version", "None")}
*/
"""
epilogue = None
if op.c_elementwise_op == "Bilinear" and scale_w is None:
epilogue = f"Bilinear {{ {self.alpha}, {self.beta} }}"
elif op.c_elementwise_op == "Scale":
epilogue = "Scale { (inv_scale_w && inv_scale_x) ? (*inv_scale_w * *inv_scale_x) : 1.0f }"
elif op.c_elementwise_op == "ScaleAdd":
epilogue = "ScaleAdd { (inv_scale_w && inv_scale_x) ? (*inv_scale_w * *inv_scale_x) : 1.0f }"
elif op.c_elementwise_op == "MultiplyMultiply":
epilogue = "MultiplyMultiply {}"
elif op.c_elementwise_op == "MultiplyMultiplyAdd":
epilogue = "MultiplyMultiplyAdd {}"
elif op.c_elementwise_op == "PassThrough":
epilogue = "PassThrough {}"
assert epilogue is not None, "CK GEMM epilogue is not set"
size_arg_strs = ["M", "N", "K", "LDA", "LDB", "LDC", "LDD"]
if self.is_batched:
size_arg_strs.insert(0, "B")
res = self._template_from_string(self.gemm_template).render(
inline_utils=self.inline_utils(),
headers=self.header().getvalue(),
globals=self.globals().getvalue(),
instance_definition=instance_definition,
kernel_definition=kernel.def_kernel(
inputs=[X, W, scale_x, scale_w, Bias], # type: ignore[list-item]
outputs=[Y],
names_str="X, W, inv_scale_x, inv_scale_w, Bias, Y",
input_reorder=self.input_reorder,
size_args=[f"int32_t {arg}" for arg in size_arg_strs],
),
instance_type=instance_type,
a_element_dtype=op.a_element_dtype,
b_element_dtype=op.b_element_dtype,
c_element_dtype=op.c_element_dtype,
bias_element_dtype=bias_dtype,
alpha=self.alpha,
beta=self.beta,
a_elementwise_op="PassThrough {}",
b_elementwise_op="PassThrough {}",
epilogue=epilogue,
has_bias=has_bias,
ds_size=1
if op.c_elementwise_op in ("Bilinear", "ScaleAdd")
else 2
if op.c_elementwise_op == "MultiplyMultiply"
else 3
if op.c_elementwise_op == "MultiplyMultiplyAdd"
else 0,
ds_names=", ".join(
["Bias"]
if op.c_elementwise_op in ("Bilinear", "ScaleAdd")
else ["inv_scale_x", "inv_scale_w"]
if op.c_elementwise_op == "MultiplyMultiply"
else ["inv_scale_x", "inv_scale_w", "Bias"]
if op.c_elementwise_op == "MultiplyMultiplyAdd"
else []
),
ds_strides=", ".join(
["LDD"]
if op.c_elementwise_op in ("Bilinear", "ScaleAdd")
else ["0", "0"]
if op.c_elementwise_op == "MultiplyMultiply"
else ["0", "0", "LDD"]
if op.c_elementwise_op == "MultiplyMultiplyAdd"
else []
),
version_comment=version_comment,
is_batched=self.is_batched,
ds_batch_strides=", ".join([]), # FIXME when supporting baddbmm
)
if config.rocm.generate_test_runner:
is_static_problem = all(is_static_int(arg) for arg in self.size_args())
# NOTE: size_arg_strs is defined above
size_arg_vals = (
self.size_args()
if is_static_problem
else (
f"std::stoi(argv[{k}])" for k, _ in enumerate(self.size_args(), 1)
)
)
size_args = dict(zip(size_arg_strs, size_arg_vals, strict=True))
runtime_args = dict(
zip(
[a.name for a in self.get_runtime_arg_info()],
self.get_runtime_arg_values(),
)
)
runner_code = self._template_from_string(
self.standalone_runner_template
).render(
inline_utils=self.inline_utils().getvalue(),
kernel_name=kernel.kernel_name,
has_bias=has_bias,
has_scale=has_scale,
is_batched=self.is_batched,
a_ck_dtype=op.a_element_dtype,
b_ck_dtype=op.b_element_dtype,
c_ck_dtype=op.c_element_dtype,
bias_ck_dtype=op.ds_element_dtypes[0] if has_bias else "",
scale_a_ck_dtype=op.ds_element_dtypes[0]
if has_scale and 2 == len(op.ds_element_dtypes)
else "BF16",
scale_b_ck_dtype=op.ds_element_dtypes[1]
if has_scale and 2 == len(op.ds_element_dtypes)
else "BF16",
a_torch_dtype=DTYPE_TO_CPP[X.get_layout().dtype],
b_torch_dtype=DTYPE_TO_CPP[W.get_layout().dtype],
c_torch_dtype=DTYPE_TO_CPP[Y.get_layout().dtype],
bias_torch_dtype=DTYPE_TO_CPP[Bias.get_layout().dtype]
if Bias is not None
else "",
scale_a_torch_dtype=DTYPE_TO_CPP[scale_x.get_layout().dtype]
if scale_x is not None
else "",
scale_b_torch_dtype=DTYPE_TO_CPP[scale_w.get_layout().dtype]
if scale_w is not None
else "",
a_layout=torch_layout_to_ck_layout(X.get_layout()),
b_layout=torch_layout_to_ck_layout(W.get_layout()),
c_layout=torch_layout_to_ck_layout(Y.get_layout()),
bias_layout=torch_layout_to_ck_layout(Bias.get_layout())
if Bias is not None
else "",
compile_cmd=rocm_compile_command(
["<source_file_name>"], "<executable_name>", "exe"
),
**size_args,
**runtime_args,
)
res += runner_code
return res
def _is_rcr_f16(self):
X_meta, W_meta, Y_meta = (
T.get_layout() for T in [*self.input_nodes, self.output_node]
)
X_dtype, W_dtype, Y_dtype = (
self._TORCH_DTYPE_TO_CK[m.dtype] for m in (X_meta, W_meta, Y_meta)
)
X_layout, W_layout, Y_layout = (
torch_layout_to_ck_layout(m) for m in (X_meta, W_meta, Y_meta)
)
return (
X_dtype == "F16"
and W_dtype == "F16"
and Y_dtype == "F16"
and X_layout == "Row"
and W_layout == "Col"
and Y_layout == "Row"
)
# helper to calculate a potentially optimal kBatch(es) for a problem
def _get_kBatch(self, op):
# we only set a higher kBatch if K > 16 * the larger of M and N
# this is a hand-tuned heuristic to start
metas = [T.get_layout() for T in [*self.input_nodes]]
X_meta = metas[0]
W_meta = metas[1]
M = X_meta.size[-2]
K = X_meta.size[-1]
N = W_meta.size[-1]
if is_dynamic(*self.input_nodes):
return [1]
if K // max(M, N) < config.rocm.split_k_threshold:
return [1]
# if the user is telling us which kBatches to sweep, just use those
if config.rocm.kBatch_sweep is not None:
return config.rocm.kBatch_sweep
# Calculate the number of blocks needed for each dimension
total_k_blocks = math.ceil(K / op.k_per_block)
# we want to calculate how many blocks we need to fit per CU
cus = torch.cuda.get_device_properties(X_meta.device).multi_processor_count
# again, manual heuristics as much larger kBatch are significantly worse in
# initial testing
kBatch = min(max(next_power_of_2(total_k_blocks // cus), 1), 128)
return [kBatch]
def gen_ops(self) -> list[InductorROCmOp]:
"""
Creates a list of `CKGemmOperation` instances that match the GEMM operation this template represents.
The instances are guaranteed to have the correct layout, dtype and dimension padding for the GEMM input arguments.
An instance may invalidate the GEMM configuration at runtime.
Such instances will be assigned +inf runtime by the autotune process.
"""
try:
from ck4inductor.batched_universal_gemm.gen_instances import ( # type: ignore[import]
gen_ops_library as gen_batched_gemm_ops_library,
)
from ck4inductor.universal_gemm.gen_instances import ( # type: ignore[import]
gen_ops_library as gen_gemm_ops_library,
gen_ops_preselected as gen_gemm_ops_preselected,
)
except ImportError:
return []
generator = None
if self.is_batched:
generator = gen_batched_gemm_ops_library
else:
generator = gen_gemm_ops_library
if config.rocm.use_preselected_instances and self._is_rcr_f16():
generator = gen_gemm_ops_preselected
assert generator is not None
rops = generator()
ops = []
for o in rops:
kBatches = self._get_kBatch(o)
for kBatch in kBatches:
# pyrefly: ignore [bad-argument-type]
ops.append(InductorROCmOp(op=o, kBatch=kBatch))
filtered_instances = list(filter(lambda op: self.filter_op(op), ops))
# NB: when using a fixed list order, most likely we will pick the subset of instances
# which are very similar to each other. Randomizing the choice seems to solve this.
random.seed(-11)
chosen_instances = (
random.sample(
filtered_instances,
min(len(filtered_instances), config.rocm.ck_max_profiling_configs),
)
if config.rocm.ck_max_profiling_configs
else filtered_instances
)
log.debug(
"generated %d ck instances after filter: %s",
len(chosen_instances),
chosen_instances,
)
return chosen_instances
@staticmethod
def add_ck_gemm_choices(
choices,
layout,
input_nodes,
alpha=1,
beta=0,
input_reorder=None,
):
"""
Add Composable Kernel Universal GEMM instance choices to the auto-tuning list.
"""
template = CKGemmTemplate(
input_nodes,
layout,
alpha=alpha,
beta=beta,
input_reorder=input_reorder,
)
ops = template.gen_ops()
for op in ops:
template.maybe_append_choice(
choices,
op=op.op,
kBatch=op.kBatch,
)
def size_args(self):
X = self.input_nodes[0]
W = self.input_nodes[1]
Bias = (
self.input_nodes[2]
if len(self.input_nodes) == 3
else self.input_nodes[4]
if len(self.input_nodes) == 5
else None
)
Y = self.output_node
M = X.get_size()[-2]
K = X.get_size()[-1]
N = W.get_size()[-1]
LDA = X.get_stride()[-2 if X.get_stride()[-1] == 1 else -1]
LDB = W.get_stride()[-2 if W.get_stride()[-1] == 1 else -1]
LDC = Y.get_stride()[-2 if Y.get_stride()[-1] == 1 else -1]
LDD = (
0
if (Bias is None or len(Bias.get_size()) == 1)
else Bias.get_stride()[-2 if Bias.get_stride()[-1] == 1 else -1]
)
if self.is_batched:
B = X.get_size()[0]
return B, M, N, K, LDA, LDB, LDC, LDD
else:
return M, N, K, LDA, LDB, LDC, LDD
| CKGemmTemplate |
python | django__django | tests/auth_tests/test_auth_backends.py | {
"start": 20639,
"end": 22915
} | class ____(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the default User model.
"""
UserModel = User
user_credentials = {"username": "test", "password": "test"}
def create_users(self):
self.user = User.objects.create_user(
email="test@example.com", **self.user_credentials
)
self.superuser = User.objects.create_superuser(
username="test2",
email="test2@example.com",
password="test",
)
def test_authenticate_inactive(self):
"""
An inactive user can't authenticate.
"""
self.assertEqual(authenticate(**self.user_credentials), self.user)
self.user.is_active = False
self.user.save()
self.assertIsNone(authenticate(**self.user_credentials))
async def test_aauthenticate_inactive(self):
"""
An inactive user can't authenticate.
"""
self.assertEqual(await aauthenticate(**self.user_credentials), self.user)
self.user.is_active = False
await self.user.asave()
self.assertIsNone(await aauthenticate(**self.user_credentials))
@override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithoutIsActiveField")
def test_authenticate_user_without_is_active_field(self):
"""
A custom user without an `is_active` field is allowed to authenticate.
"""
user = CustomUserWithoutIsActiveField.objects._create_user(
username="test",
email="test@example.com",
password="test",
)
self.assertEqual(authenticate(username="test", password="test"), user)
@override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithoutIsActiveField")
async def test_aauthenticate_user_without_is_active_field(self):
"""
A custom user without an `is_active` field is allowed to authenticate.
"""
user = await CustomUserWithoutIsActiveField.objects._acreate_user(
username="test",
email="test@example.com",
password="test",
)
self.assertEqual(await aauthenticate(username="test", password="test"), user)
@override_settings(AUTH_USER_MODEL="auth_tests.ExtensionUser")
| ModelBackendTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 53925,
"end": 54977
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"path",
"body",
"pull_request_id",
"pull_request_review_id",
"line",
"side",
"start_line",
"start_side",
"client_mutation_id",
)
path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path")
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
pull_request_id = sgqlc.types.Field(ID, graphql_name="pullRequestId")
pull_request_review_id = sgqlc.types.Field(ID, graphql_name="pullRequestReviewId")
line = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="line")
side = sgqlc.types.Field(DiffSide, graphql_name="side")
start_line = sgqlc.types.Field(Int, graphql_name="startLine")
start_side = sgqlc.types.Field(DiffSide, graphql_name="startSide")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| AddPullRequestReviewThreadInput |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 97233,
"end": 98599
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("endpoint_service.EndpointServiceHook"))
def test_execute(self, mock_hook):
page_token = "page_token"
page_size = 42
filter = "filter"
read_mask = "read_mask"
order_by = "order_by"
op = ListEndpointsOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
filter=filter,
read_mask=read_mask,
order_by=order_by,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.list_endpoints.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
filter=filter,
read_mask=read_mask,
order_by=order_by,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestVertexAIListEndpointsOperator |
python | tiangolo__fastapi | fastapi/exceptions.py | {
"start": 4334,
"end": 4527
} | class ____(Exception):
def __init__(self, errors: Sequence[Any]) -> None:
self._errors = errors
def errors(self) -> Sequence[Any]:
return self._errors
| ValidationException |
python | python-pillow__Pillow | src/PIL/BufrStubImagePlugin.py | {
"start": 691,
"end": 1730
} | class ____(ImageFile.StubImageFile):
format = "BUFR"
format_description = "BUFR"
def _open(self) -> None:
if not _accept(self.fp.read(4)):
msg = "Not a BUFR file"
raise SyntaxError(msg)
self.fp.seek(-4, os.SEEK_CUR)
# make something up
self._mode = "F"
self._size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self) -> ImageFile.StubHandler | None:
return _handler
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if _handler is None or not hasattr(_handler, "save"):
msg = "BUFR save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
Image.register_save(BufrStubImageFile.format, _save)
Image.register_extension(BufrStubImageFile.format, ".bufr")
| BufrStubImageFile |
python | django__django | tests/view_tests/tests/test_debug.py | {
"start": 1800,
"end": 1887
} | class ____:
urlpatterns = [path("url/", index_page, name="url")]
| WithoutEmptyPathUrls |
python | pytorch__pytorch | torch/_inductor/codegen/multi_kernel.py | {
"start": 19961,
"end": 23403
} | class ____(MultiKernelCall):
"""
Runtime class for size-hint multi-kernels.
Instead of having a plain list of kernels to benchmark over, keys them by input & output shapes,
and optionally perform shape-based selection. The pre-generated kernel is chosen based on the shape keys,
with the heuristic being log2 l1 distance between the pre-generated / runtime input & output shapes.
"""
def __init__(self, multi_kernel_name, kernels, arg_index):
super().__init__(multi_kernel_name, list(kernels.values()), arg_index)
self._kernel_hints = list(kernels.keys())
# Caches results for unique shapes.
self._shape_cache = {}
def _get_shape_cache_key(self, *args, **kwargs):
"""
Generate a cache key based on tensor shapes for shape-specialized dispatch.
"""
shapes = []
for arg in args:
if hasattr(arg, "shape"):
shapes.append(tuple(arg.shape))
return tuple(shapes)
def _get_cached_shape_choice(self, cache_key):
"""
Get cached kernel choice for a specific shape.
"""
return self._shape_cache.get(cache_key)
def _cache_shape_choice(self, cache_key, kernel_idx):
"""
Cache kernel choice for a specific shape.
"""
self._shape_cache[cache_key] = kernel_idx
def _dist_heuristic(self, k1, k2):
"""
log2 L1 distance heuristic for kernel selection.
"""
def dist(x, y):
lx = math.log2(x) if x > 0 else -1
ly = math.log2(y) if y > 0 else -1
return abs(lx - ly)
out = 0
for s1, s2 in zip(k1, k2):
out += sum(dist(x, y) for x, y in zip(s1, s2))
return out
def run(self, *args, **kwargs):
cache_key = self._get_shape_cache_key(*args, **kwargs)
cached_choice = self._get_cached_shape_choice(cache_key)
if cached_choice is not None:
self.picked_kernel = cached_choice
log.debug(
"using cached shape-specialized choice %dth sub-kernel in %s. Cache key: %s",
self.picked_kernel,
[k.inductor_meta.get("kernel_name") for k in self.kernels],
cache_key,
)
else:
self._select_kernel_by_shape(*args, **kwargs)
if not self._recorded:
self._recorded = True
picked_kernel_name = self.kernels[self.picked_kernel].inductor_meta.get(
"kernel_name"
)
assert picked_kernel_name is not None
self.record_choice(self.multi_kernel_name, picked_kernel_name)
run = self.kernels[self.picked_kernel].run # type: ignore[method-assign]
filtered_args = self._get_filtered_args(args, self.picked_kernel)
run(*filtered_args, **kwargs)
def _select_kernel_by_shape(self, *args, **kwargs):
"""
Benchmark kernels for a particular shape and return the
best kernel for this shape.
"""
shape_key = self._get_shape_cache_key(*args, **kwargs)
dists = [
self._dist_heuristic(shape_key, key) if key is not None else 2**62
for key in self._kernel_hints
]
# pyrefly: ignore [bad-assignment]
self.picked_kernel = dists.index(min(dists))
self._cache_shape_choice(shape_key, self.picked_kernel)
| SizeHintMultiKernelCall |
python | getsentry__sentry | tests/sentry/utils/test_ratelimits.py | {
"start": 698,
"end": 2673
} | class ____(TestCase):
def test_by_email(self) -> None:
organization = Organization(id=1)
email = "foo@example.com"
for n in range(2):
assert not ratelimits.for_organization_member_invite(
organization, email, config=RELAXED_CONFIG
)
assert ratelimits.for_organization_member_invite(organization, email, config=RELAXED_CONFIG)
def test_by_organization(self) -> None:
organization = Organization(id=1)
for n in range(5):
assert not ratelimits.for_organization_member_invite(
organization, f"{randint(0, 1000000)}@example.com", config=RELAXED_CONFIG
)
assert ratelimits.for_organization_member_invite(
organization, "anything@example.com", config=RELAXED_CONFIG
)
def test_by_api_token(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
token = AuthenticatedToken.from_token(ApiToken(id=1))
for n in range(5):
assert not ratelimits.for_organization_member_invite(
Organization(id=randint(0, 100000)),
f"{randint(0, 1000000)}@example.com",
auth=token,
config=RELAXED_CONFIG,
)
assert ratelimits.for_organization_member_invite(
Organization(id=1), "anything@example.com", auth=token, config=RELAXED_CONFIG
)
def test_by_user(self) -> None:
user = User(email="biz@example.com")
for n in range(5):
assert not ratelimits.for_organization_member_invite(
Organization(id=randint(0, 100000)),
f"{randint(0, 1000000)}@example.com",
user=user,
config=RELAXED_CONFIG,
)
assert ratelimits.for_organization_member_invite(
Organization(id=1), "anything@example.com", user=user, config=RELAXED_CONFIG
)
| ForOrganizationMemberTestCase |
python | pytest-dev__pytest | doc/en/example/nonpython/conftest.py | {
"start": 257,
"end": 555
} | class ____(pytest.File):
def collect(self):
# We need a yaml parser, e.g. PyYAML.
import yaml
raw = yaml.safe_load(self.path.open(encoding="utf-8"))
for name, spec in sorted(raw.items()):
yield YamlItem.from_parent(self, name=name, spec=spec)
| YamlFile |
python | google__pytype | pytype_extensions/instrumentation_for_testing_test.py | {
"start": 685,
"end": 1045
} | class ____(i4t.ProductionType[NoCtor]):
def __init__(self, state):
self.state = state
def Mul100(self, i):
return self.state * i * 103
# When access to instrumented_type is needed and the fake __init__ signature is
# IDENTICAL to that of production_type (or __init__ has no arguments if
# production_type has no __init__).
| FakeNoCtorInitArgUnsealed |
python | plotly__plotly.py | plotly/graph_objs/histogram2d/_legendgrouptitle.py | {
"start": 233,
"end": 2967
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2d"
_path_str = "histogram2d.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2d.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.histogram2d.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2d.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 17597,
"end": 19186
} | class ____(nn.Module):
def __init__(self, config: GroupViTTextConfig):
super().__init__()
embed_dim = config.hidden_size
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
max_position_embedding = self.position_embedding.weight.shape[0]
if seq_length > max_position_embedding:
raise ValueError(
f"Sequence length must be less than max_position_embeddings (got `sequence length`: "
f"{seq_length} and max_position_embeddings: {max_position_embedding}"
)
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = inputs_embeds + position_embeddings
return embeddings
| GroupViTTextEmbeddings |
python | scikit-learn__scikit-learn | sklearn/model_selection/_classification_threshold.py | {
"start": 6914,
"end": 17363
} | class ____(BaseThresholdClassifier):
"""Binary classifier that manually sets the decision threshold.
This classifier allows to change the default decision threshold used for
converting posterior probability estimates (i.e. output of `predict_proba`) or
decision scores (i.e. output of `decision_function`) into a class label.
Here, the threshold is not optimized and is set to a constant value.
Read more in the :ref:`User Guide <FixedThresholdClassifier>`.
.. versionadded:: 1.5
Parameters
----------
estimator : estimator instance
The binary classifier, fitted or not, for which we want to optimize
the decision threshold used during `predict`.
threshold : {"auto"} or float, default="auto"
The decision threshold to use when converting posterior probability estimates
(i.e. output of `predict_proba`) or decision scores (i.e. output of
`decision_function`) into a class label. When `"auto"`, the threshold is set
to 0.5 if `predict_proba` is used as `response_method`, otherwise it is set to
0 (i.e. the default threshold for `decision_function`).
pos_label : int, float, bool or str, default=None
The label of the positive class. Used to process the output of the
`response_method` method. When `pos_label=None`, if `y_true` is in `{-1, 1}` or
`{0, 1}`, `pos_label` is set to 1, otherwise an error will be raised.
response_method : {"auto", "decision_function", "predict_proba"}, default="auto"
Methods by the classifier `estimator` corresponding to the
decision function for which we want to find a threshold. It can be:
* if `"auto"`, it will try to invoke `"predict_proba"` or `"decision_function"`
in that order.
* otherwise, one of `"predict_proba"` or `"decision_function"`.
If the method is not implemented by the classifier, it will raise an
error.
Attributes
----------
estimator_ : estimator instance
The fitted classifier used when predicting.
classes_ : ndarray of shape (n_classes,)
The class labels.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
See Also
--------
sklearn.model_selection.TunedThresholdClassifierCV : Classifier that post-tunes
the decision threshold based on some metrics and using cross-validation.
sklearn.calibration.CalibratedClassifierCV : Estimator that calibrates
probabilities.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.metrics import confusion_matrix
>>> from sklearn.model_selection import FixedThresholdClassifier, train_test_split
>>> X, y = make_classification(
... n_samples=1_000, weights=[0.9, 0.1], class_sep=0.8, random_state=42
... )
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, stratify=y, random_state=42
... )
>>> classifier = LogisticRegression(random_state=0).fit(X_train, y_train)
>>> print(confusion_matrix(y_test, classifier.predict(X_test)))
[[217 7]
[ 19 7]]
>>> classifier_other_threshold = FixedThresholdClassifier(
... classifier, threshold=0.1, response_method="predict_proba"
... ).fit(X_train, y_train)
>>> print(confusion_matrix(y_test, classifier_other_threshold.predict(X_test)))
[[184 40]
[ 6 20]]
"""
_parameter_constraints: dict = {
**BaseThresholdClassifier._parameter_constraints,
"threshold": [StrOptions({"auto"}), Real],
"pos_label": [Real, str, "boolean", None],
}
def __init__(
self,
estimator,
*,
threshold="auto",
pos_label=None,
response_method="auto",
):
super().__init__(estimator=estimator, response_method=response_method)
self.pos_label = pos_label
self.threshold = threshold
@property
def classes_(self):
if estimator := getattr(self, "estimator_", None):
return estimator.classes_
try:
check_is_fitted(self.estimator)
return self.estimator.classes_
except NotFittedError:
raise AttributeError(
"The underlying estimator is not fitted yet."
) from NotFittedError
def _fit(self, X, y, **params):
"""Fit the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
routed_params = process_routing(self, "fit", **params)
self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit)
return self
def predict(self, X):
"""Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
y_score, _, response_method_used = _get_response_values_binary(
estimator,
X,
self._get_response_method(),
pos_label=self.pos_label,
return_response_method_used=True,
)
if self.threshold == "auto":
decision_threshold = 0.5 if response_method_used == "predict_proba" else 0.0
else:
decision_threshold = self.threshold
return _threshold_scores_to_class_labels(
y_score, decision_threshold, self.classes_, self.pos_label
)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
estimator=self.estimator,
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
return router
def _fit_and_score_over_thresholds(
classifier,
X,
y,
*,
fit_params,
train_idx,
val_idx,
curve_scorer,
score_params,
):
"""Fit a classifier and compute the scores for different decision thresholds.
Parameters
----------
classifier : estimator instance
The classifier to fit and use for scoring. If `classifier` is already fitted,
it will be used as is.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The entire dataset.
y : array-like of shape (n_samples,)
The entire target vector.
fit_params : dict
Parameters to pass to the `fit` method of the underlying classifier.
train_idx : ndarray of shape (n_train_samples,) or None
The indices of the training set. If `None`, `classifier` is expected to be
already fitted.
val_idx : ndarray of shape (n_val_samples,)
The indices of the validation set used to score `classifier`. If `train_idx`,
the entire set will be used.
curve_scorer : scorer instance
The scorer taking `classifier` and the validation set as input and outputting
decision thresholds and scores as a curve. Note that this is different from
the usual scorer that outputs a single score value as `curve_scorer`
outputs a single score value for each threshold.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
scores : ndarray of shape (thresholds,) or tuple of such arrays
The scores computed for each decision threshold. When TPR/TNR or precision/
recall are computed, `scores` is a tuple of two arrays.
potential_thresholds : ndarray of shape (thresholds,)
The decision thresholds used to compute the scores. They are returned in
ascending order.
"""
if train_idx is not None:
X_train, X_val = _safe_indexing(X, train_idx), _safe_indexing(X, val_idx)
y_train, y_val = _safe_indexing(y, train_idx), _safe_indexing(y, val_idx)
fit_params_train = _check_method_params(X, fit_params, indices=train_idx)
score_params_val = _check_method_params(X, score_params, indices=val_idx)
classifier.fit(X_train, y_train, **fit_params_train)
else: # prefit estimator, only a validation set is provided
X_val, y_val, score_params_val = X, y, score_params
return curve_scorer(classifier, X_val, y_val, **score_params_val)
def _mean_interpolated_score(target_thresholds, cv_thresholds, cv_scores):
"""Compute the mean interpolated score across folds by defining common thresholds.
Parameters
----------
target_thresholds : ndarray of shape (thresholds,)
The thresholds to use to compute the mean score.
cv_thresholds : ndarray of shape (n_folds, thresholds_fold)
The thresholds used to compute the scores for each fold.
cv_scores : ndarray of shape (n_folds, thresholds_fold)
The scores computed for each threshold for each fold.
Returns
-------
mean_score : ndarray of shape (thresholds,)
The mean score across all folds for each target threshold.
"""
return np.mean(
[
np.interp(target_thresholds, split_thresholds, split_score)
for split_thresholds, split_score in zip(cv_thresholds, cv_scores)
],
axis=0,
)
| FixedThresholdClassifier |
python | walkccc__LeetCode | solutions/2283. Check if Number Has Equal Digit Count and Digit Value/2283.py | {
"start": 0,
"end": 185
} | class ____:
def digitCount(self, num: str) -> bool:
count = collections.Counter(num)
return all(count[str(i)] == int(digit)
for i, digit in enumerate(num))
| Solution |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 9592,
"end": 9741
} | class ____:
"""Table 8.2 of the PDF 1.7 reference."""
LEFT = "/Left"
RIGHT = "/Right"
BOTTOM = "/Bottom"
TOP = "/Top"
| TypArguments |
python | keras-team__keras | keras/src/ops/core_test.py | {
"start": 5474,
"end": 12237
} | class ____(testing.TestCase):
def test_associative_scan(self):
xs = (KerasTensor((5, 10)), KerasTensor((5, 10)))
ys = core.associative_scan(
f=lambda x, y: (x[0] + y[0], x[1] + y[1]), elems=xs, axis=0
)
self.assertEqual(ys[0].shape, (5, 10))
# sum two tuples of unknown (but same) length at axis
def _fn(x, y):
return tuple([x[i] + y[i] for i in range(len(x))])
ys = core.associative_scan(f=_fn, elems=xs, axis=1)
self.assertEqual(ys[0].shape, (5, 10))
def test_cast(self):
x = KerasTensor((3, 5, 7), dtype="float32")
self.assertEqual(core.cast(x, "float16").shape, (3, 5, 7))
def test_cond(self):
pred = KerasTensor((), dtype="bool")
self.assertEqual(
ops.cond(
pred, lambda: ops.ones((1, 3)), lambda: ops.zeros((1, 3))
).shape,
(1, 3),
)
def test_convert_to_tensor(self):
x = KerasTensor((2, 3))
out = core.convert_to_tensor(x)
self.assertEqual(out.shape, x.shape)
self.assertFalse(out.sparse)
out = core.convert_to_tensor(x, sparse=True)
self.assertFalse(out.sparse)
x = KerasTensor((2, 3), sparse=True)
out = core.convert_to_tensor(x)
self.assertTrue(out.sparse)
out = core.convert_to_tensor(x, sparse=True)
self.assertTrue(out.sparse)
out = core.convert_to_tensor(x, sparse=False)
self.assertFalse(out.sparse)
def test_fori_loop(self):
def body_fun(i, x):
return x + i
initial_value = KerasTensor((3, 5, 7))
result = core.fori_loop(0, 10, body_fun, initial_value)
self.assertEqual(result.shape, (3, 5, 7))
def test_map(self):
def f(x):
return x**2
xs = KerasTensor((6, 5))
ys = core.map(f, xs)
self.assertEqual(ys.shape, (6, 5))
# Test nested output
def f2(x):
return {"a": x**2, "b": x * 10}
xs = KerasTensor((6, 5))
ys = core.map(f2, xs)
self.assertEqual(ys["a"].shape, (6, 5))
self.assertEqual(ys["b"].shape, (6, 5))
# Test nested input
def f3(x):
return x[0] + x[1]
xs = (KerasTensor((6, 5)), KerasTensor((6, 5)))
self.assertEqual(core.map(f3, xs).shape, (6, 5))
def test_saturate_cast(self):
x = KerasTensor((3, 5, 7), dtype="float32")
self.assertEqual(core.saturate_cast(x, "float16").shape, (3, 5, 7))
def test_scan(self):
def f(carry, xs):
xs = xs + carry
return carry, carry
init = KerasTensor(())
xs = KerasTensor((6,))
carry, result = core.scan(f, init, xs)
self.assertEqual(carry.shape, ())
self.assertEqual(result.shape, (6,))
def f2(carry, _):
return carry, carry
carry, result = core.scan(f2, init, xs=None, length=3)
self.assertEqual(carry.shape, ())
self.assertEqual(result.shape, (3,))
def test_scatter(self):
indices = KerasTensor((5, 2))
values = KerasTensor((5,))
shape = (4, 4)
self.assertEqual(core.scatter(indices, values, shape).shape, (4, 4))
def test_scatter_update(self):
inputs = KerasTensor((4, 4))
indices = KerasTensor((5, 2))
updates = KerasTensor((5,))
self.assertEqual(
core.scatter_update(inputs, indices, updates).shape, (4, 4)
)
inputs = KerasTensor((4, 4, 4))
indices = KerasTensor((5, 2))
updates = KerasTensor((5, 4))
self.assertEqual(
core.scatter_update(inputs, indices, updates).shape, (4, 4, 4)
)
def test_slice(self):
inputs = KerasTensor(shape=(3, 3), dtype="float32")
start_indices = KerasTensor(shape=(2,), dtype="int32")
shape = (2, 2)
self.assertEqual(core.slice(inputs, start_indices, shape).shape, (2, 2))
def test_slice_negative_one_shape(self):
inputs = KerasTensor(shape=(3, 3), dtype="float32")
start_indices = (1, 1)
shape = (-1, -1)
self.assertEqual(core.slice(inputs, start_indices, shape).shape, (2, 2))
def test_slice_negative_one_shape_raises(self):
inputs = KerasTensor(shape=(3, 3), dtype="float32")
start_indices = KerasTensor(shape=(2,), dtype="int32")
shape = (-1, -1)
with self.assertRaises(ValueError):
core.slice(inputs, start_indices, shape)
def test_slice_update(self):
inputs = KerasTensor((4, 4))
start_indices = KerasTensor((2,))
updates = KerasTensor((2, 2))
self.assertEqual(
core.slice_update(inputs, start_indices, updates).shape, (4, 4)
)
inputs = KerasTensor((4, 4, 4))
start_indices = KerasTensor((3,))
updates = KerasTensor((2, 2, 2))
self.assertEqual(
core.slice_update(inputs, start_indices, updates).shape, (4, 4, 4)
)
def test_stop_gradient(self):
variable = KerasTensor(shape=(3, 3), dtype="float32")
self.assertEqual(core.stop_gradient(variable).shape, (3, 3))
def test_switch(self):
def fn(x, y):
return x[:, 0], y[0, :]
index = KerasTensor(())
x = KerasTensor((5, 2))
y = KerasTensor((5, 2))
self.assertEqual(core.switch(index, [fn], x, y)[0].shape, (5,))
self.assertEqual(core.switch(index, [fn], x, y)[1].shape, (2,))
def test_vectorized_map(self):
def f(x):
return x**2
xs = KerasTensor((6, 5))
ys = core.vectorized_map(f, xs)
self.assertEqual(ys.shape, (6, 5))
# Test nested output
def f2(x):
return {"a": x**2, "b": x * 10}
xs = KerasTensor((6, 5))
ys = core.vectorized_map(f2, xs)
self.assertEqual(ys["a"].shape, (6, 5))
self.assertEqual(ys["b"].shape, (6, 5))
# Test nested input
def f3(x):
return x[0] + x[1]
xs = (KerasTensor((6, 5)), KerasTensor((6, 5)))
self.assertEqual(core.vectorized_map(f3, xs).shape, (6, 5))
def test_while_loop(self):
def cond(args):
return tree.flatten(args)[0] < 10
def body(args):
return tree.map_structure(lambda x: x + 1, args)
loop_vars = KerasTensor((10,))
self.assertEqual(core.while_loop(cond, body, loop_vars).shape, (10,))
def test_unstack(self):
x = KerasTensor((2, 3, 4))
axis = 1
out = core.unstack(x, axis=axis)
self.assertEqual(len(out), 3)
for o in out:
self.assertEqual(o.shape, (2, 4))
| CoreOpsStaticShapeTest |
python | protocolbuffers__protobuf | python/google/protobuf/internal/thread_safe_test.py | {
"start": 1637,
"end": 3125
} | class ____(unittest.TestCase):
def RunThreads(self, thread_size, func):
threads = []
for i in range(0, thread_size):
threads.append(threading.Thread(target=func))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def testDoNothing(self):
thread_size = 10
def DoNothing():
return
self.RunThreads(thread_size, DoNothing)
@unittest.skipIf(
api_implementation.Type() != 'cpp',
'Only cpp supports free threading for now',
)
def testDescriptorPoolMap(self):
thread_size = 20
self.success_count = 0
lock = threading.Lock()
def CreatePool():
def DoCreate():
pool = descriptor_pool.DescriptorPool()
file_proto = descriptor_pb2.FileDescriptorProto(name='foo')
message_proto = file_proto.message_type.add(name='SomeMessage')
message_proto.field.add(
name='int_field',
number=1,
type=descriptor_pb2.FieldDescriptorProto.TYPE_INT32,
label=descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL,
)
pool.Add(file_proto)
desc = pool.FindMessageTypeByName('SomeMessage')
msg = message_factory.GetMessageClass(desc)()
msg.int_field = 1
DoCreate()
with lock:
self.success_count += 1
self.RunThreads(thread_size, CreatePool)
self.assertEqual(thread_size, self.success_count)
if __name__ == '__main__':
unittest.main()
| FreeThreadingTest |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 91680,
"end": 91753
} | class ____(Binop):
operation = operator.le
_operator_repr = "<="
| LE |
python | spyder-ide__spyder | spyder/plugins/layout/layouts.py | {
"start": 343,
"end": 566
} | class ____:
SpyderLayout = "Spyder Default Layout"
HorizontalSplitLayout = "Horizontal split"
VerticalSplitLayout = "Vertical split"
RLayout = "Rstudio layout"
MatlabLayout = "Matlab layout"
| DefaultLayouts |
python | lxml__lxml | src/lxml/html/tests/test_html5parser.py | {
"start": 13577,
"end": 13707
} | class ____:
def __init__(self, root):
self.root = root
def getroot(self):
return self.root
| DummyElementTree |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_combined09.py | {
"start": 315,
"end": 1633
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_combined09.xlsx")
self.ignore_elements = {
"xl/charts/chart1.xml": ["<c:dispBlanksAs", "<c:tickLblPos"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart1 = workbook.add_chart({"type": "column"})
chart2 = workbook.add_chart({"type": "line"})
chart1.axis_ids = [114984064, 114985600]
chart2.axis2_ids = [114988928, 114987392]
data = [
[2, 7, 3, 6, 2],
[20, 25, 10, 10, 20],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart1.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart2.add_series({"values": "=Sheet1!$B$1:$B$5", "y2_axis": 1})
chart1.set_y_axis({"num_font": {"bold": 1, "baseline": -1}})
chart2.set_y2_axis({"num_font": {"bold": 1, "baseline": -1}})
chart1.combine(chart2)
worksheet.insert_chart("E9", chart1)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | coleifer__peewee | tests/fields.py | {
"start": 1083,
"end": 1208
} | class ____(TestModel):
data = IntegerField(default=17)
data_callable = IntegerField(default=lambda: 1337)
| DefaultValues |
python | vyperlang__vyper | vyper/semantics/analysis/base.py | {
"start": 9311,
"end": 11172
} | class ____:
"""
Class which represents the analysis associated with an expression
"""
typ: VyperType
var_info: Optional[VarInfo] = None
module_info: Optional[ModuleInfo] = None
location: DataLocation = DataLocation.UNSET
modifiability: Modifiability = Modifiability.MODIFIABLE
attr: Optional[str] = None
def __post_init__(self):
should_match = ("typ", "location", "modifiability")
if self.var_info is not None:
for attr in should_match:
if getattr(self.var_info, attr) != getattr(self, attr):
raise CompilerPanic(f"Bad analysis: non-matching {attr}: {self}")
self._writes: OrderedSet[VarAccess] = OrderedSet()
self._reads: OrderedSet[VarAccess] = OrderedSet()
@classmethod
def from_varinfo(cls, var_info: VarInfo, **kwargs) -> "ExprInfo":
return cls(
var_info.typ,
var_info=var_info,
location=var_info.location,
modifiability=var_info.modifiability,
**kwargs,
)
@classmethod
def from_moduleinfo(cls, module_info: ModuleInfo, **kwargs) -> "ExprInfo":
modifiability = Modifiability.RUNTIME_CONSTANT
if module_info.ownership >= ModuleOwnership.USES:
modifiability = Modifiability.MODIFIABLE
return cls(
module_info.module_t, module_info=module_info, modifiability=modifiability, **kwargs
)
def copy_with_type(self, typ: VyperType, **kwargs) -> "ExprInfo":
"""
Return a copy of the ExprInfo but with the type set to something else
"""
to_copy = ("location", "modifiability")
fields = {k: getattr(self, k) for k in to_copy}
for t in to_copy:
assert t not in kwargs
return self.__class__(typ=typ, **fields, **kwargs)
| ExprInfo |
python | django-guardian__django-guardian | guardian/migrations/0003_remove_groupobjectpermission_guardian_gr_content_ae6aec_idx_and_more.py | {
"start": 125,
"end": 1649
} | class ____(migrations.Migration):
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
("contenttypes", "0002_remove_content_type_name"),
("guardian", "0002_generic_permissions_index"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.RemoveIndex(
model_name="groupobjectpermission",
name="guardian_gr_content_ae6aec_idx",
),
migrations.RemoveIndex(
model_name="userobjectpermission",
name="guardian_us_content_179ed2_idx",
),
migrations.AddIndex(
model_name="groupobjectpermission",
index=models.Index(
fields=["permission", "group", "content_type", "object_pk"], name="guardian_gr_permiss_83545c_idx"
),
),
migrations.AddIndex(
model_name="groupobjectpermission",
index=models.Index(fields=["group", "content_type", "object_pk"], name="guardian_gr_group_i_9e7d12_idx"),
),
migrations.AddIndex(
model_name="userobjectpermission",
index=models.Index(
fields=["permission", "user", "content_type", "object_pk"], name="guardian_us_permiss_e5749c_idx"
),
),
migrations.AddIndex(
model_name="userobjectpermission",
index=models.Index(fields=["user", "content_type", "object_pk"], name="guardian_us_user_id_8eae14_idx"),
),
]
| Migration |
python | pytorch__pytorch | test/higher_order_ops/test_local_map.py | {
"start": 6613,
"end": 8993
} | class ____(TestCase):
def setUp(self):
torch._dynamo.reset()
self.exit_stack = ExitStack()
self.exit_stack.enter_context(sdpa_kernel(backends=[SDPBackend.MATH]))
if torch.distributed.is_available():
from torch.testing._internal.distributed.fake_pg import FakeStore
self.fake_store = FakeStore()
self.world_size = 2048
torch.distributed.init_process_group(
"fake", store=self.fake_store, rank=0, world_size=self.world_size
)
self.mesh = torch.distributed.device_mesh.init_device_mesh(
"cpu",
(8, 8, 4, 8),
mesh_dim_names=(
"dp",
"tp",
"cp",
"ep",
),
)
def tearDown(self):
self.exit_stack.close()
if torch.distributed.is_available():
torch.distributed.destroy_process_group()
@unittest.skipIf(*get_skip_reasons())
def test_simple(self):
cp_decorated, cp_function = get_local_mapped_functions(self.mesh)
bs = 8 * 1
dim1 = 96
dim2 = dim1 * 4
nheads = 16
seq_len = 16
from torch._dynamo.testing import EagerAndRecordGraphs, normalize_gm
backend = EagerAndRecordGraphs()
model = create_model(cp_decorated, nheads, dim1, dim2)
inputs = (torch.randn(bs, seq_len, dim1, requires_grad=True),)
with LocalMapWrappedHigherOrderVariable.enable():
out = torch.compile(model, backend=backend)(*inputs)
out.sum().backward()
model = create_model(cp_function, nheads, dim1, dim2)
inputs = (torch.randn(bs, seq_len, dim1, requires_grad=True),)
with LocalMapWrappedHigherOrderVariable.enable():
out = torch.compile(model, backend=backend)(*inputs)
out.sum().backward()
if not TEST_WITH_CROSSREF:
self.assertEqual(len(backend.graphs), 2)
self.assertEqual(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
normalize_gm(backend.graphs[1].print_readable(print_output=False)),
)
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| TestLocalMap |
python | getsentry__sentry | src/sentry/status_checks/base.py | {
"start": 389,
"end": 1831
} | class ____:
# Used for issues that may render the system inoperable or have effects on
# data integrity (e.g. issues in the processing pipeline.)
SEVERITY_CRITICAL: Final = "critical"
# Used for issues that may cause the system to operate in a degraded (but
# still operational) state, as well as configuration options that are set
# in unexpected ways or deprecated in future versions.
SEVERITY_WARNING: Final = "warning"
# Mapping of severity level to a priority score, where the greater the
# score, the more critical the issue. (The numeric values should only be
# used for comparison purposes, and are subject to change as levels are
# modified.)
SEVERITY_LEVELS = {SEVERITY_CRITICAL: 2, SEVERITY_WARNING: 1}
def __init__(
self,
message: str,
severity: Literal["critical", "warning"] = SEVERITY_CRITICAL,
url: str | None = None,
):
assert severity in self.SEVERITY_LEVELS
self.message = str(message)
self.severity = severity
self.url = url
def __str__(self) -> str:
return self.message
@classmethod
def threshold(cls, severity: Literal["critical", "warning"]) -> Callable[[Problem], bool]:
threshold = cls.SEVERITY_LEVELS[severity]
def predicate(problem: Problem) -> bool:
return cls.SEVERITY_LEVELS[problem.severity] >= threshold
return predicate
| Problem |
python | graphql-python__graphene | graphene/relay/tests/test_connection_async.py | {
"start": 448,
"end": 2754
} | class ____(ObjectType):
letters = ConnectionField(LetterConnection)
connection_letters = ConnectionField(LetterConnection)
async_letters = ConnectionField(LetterConnection)
node = Node.Field()
def resolve_letters(self, info, **args):
return list(letters.values())
async def resolve_async_letters(self, info, **args):
return list(letters.values())
def resolve_connection_letters(self, info, **args):
return LetterConnection(
page_info=PageInfo(has_next_page=True, has_previous_page=False),
edges=[
LetterConnection.Edge(node=Letter(id=0, letter="A"), cursor="a-cursor")
],
)
schema = Schema(Query)
letters = {letter: Letter(id=i, letter=letter) for i, letter in enumerate(letter_chars)}
def edges(selected_letters):
return [
{
"node": {"id": base64("Letter:%s" % letter.id), "letter": letter.letter},
"cursor": base64("arrayconnection:%s" % letter.id),
}
for letter in [letters[i] for i in selected_letters]
]
def cursor_for(ltr):
letter = letters[ltr]
return base64("arrayconnection:%s" % letter.id)
def execute(args=""):
if args:
args = "(" + args + ")"
return schema.execute(
"""
{
letters%s {
edges {
node {
id
letter
}
cursor
}
pageInfo {
hasPreviousPage
hasNextPage
startCursor
endCursor
}
}
}
"""
% args
)
@mark.asyncio
async def test_connection_async():
result = await schema.execute_async(
"""
{
asyncLetters(first:1) {
edges {
node {
id
letter
}
}
pageInfo {
hasPreviousPage
hasNextPage
}
}
}
"""
)
assert not result.errors
assert result.data == {
"asyncLetters": {
"edges": [{"node": {"id": "TGV0dGVyOjA=", "letter": "A"}}],
"pageInfo": {"hasPreviousPage": False, "hasNextPage": True},
}
}
| Query |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 10046,
"end": 10207
} | class ____:
class __init__:
def __init__(self, a):
self.a = a
def y(self):
return self.a
#?
WithWeirdInit(1).y()
| WithWeirdInit |
python | getsentry__sentry | src/sentry/models/grouprulestatus.py | {
"start": 219,
"end": 846
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
ACTIVE = 0
INACTIVE = 1
project = FlexibleForeignKey("sentry.Project")
rule = FlexibleForeignKey("sentry.Rule")
group = FlexibleForeignKey("sentry.Group")
status = models.PositiveSmallIntegerField(default=ACTIVE)
date_added = models.DateTimeField(default=timezone.now)
last_active = models.DateTimeField(null=True)
class Meta:
db_table = "sentry_grouprulestatus"
app_label = "sentry"
unique_together = (("rule", "group"),)
__repr__ = sane_repr("rule_id", "group_id", "status")
| GroupRuleStatus |
python | kamyu104__LeetCode-Solutions | Python/longest-common-prefix-of-k-strings-after-removal.py | {
"start": 2826,
"end": 4563
} | class ____(object):
def longestCommonPrefix(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[int]
"""
class Trie(object):
def __init__(self):
self.__nodes = []
self.__cnt = []
self.__mx = []
self.__new_node()
def __new_node(self):
self.__nodes.append([-1]*26)
self.__cnt.append(0)
self.__mx.append(0)
return len(self.__nodes)-1
def update(self, w, d, k):
path = [-1]*(len(w)+1)
path[0] = curr = 0
for i, c in enumerate(w, 1):
x = ord(c)-ord('a')
if self.__nodes[curr][x] == -1:
self.__nodes[curr][x] = self.__new_node()
path[i] = curr = self.__nodes[curr][x]
for i in reversed(xrange(len(path))):
curr = path[i]
self.__cnt[curr] += d
self.__mx[curr] = i if self.__cnt[curr] >= k else 0
for x in xrange(len(self.__nodes[curr])):
if self.__nodes[curr][x] != -1:
self.__mx[curr]= max(self.__mx[curr], self.__mx[self.__nodes[curr][x]])
def query(self):
return self.__mx[0]
result = [0]*len(words)
trie = Trie()
for w in words:
trie.update(w, +1, k)
for i in xrange(len(words)):
trie.update(words[i], -1, k)
result[i] = trie.query()
trie.update(words[i], +1, k)
return result
| Solution_TLE |
python | pypa__pip | src/pip/_vendor/rich/prompt.py | {
"start": 10146,
"end": 10414
} | class ____(PromptBase[float]):
"""A prompt that returns a float.
Example:
>>> temperature = FloatPrompt.ask("Enter desired temperature")
"""
response_type = float
validate_error_message = "[prompt.invalid]Please enter a number"
| FloatPrompt |
python | wandb__wandb | tools/cloud_tool.py | {
"start": 3978,
"end": 10260
} | class ____:
def __init__(
self,
config: GCEConfig,
verbose: bool = False,
log_level: int = logging.INFO,
) -> None:
self.config = config
self.logger = Logger(__name__.lower(), verbose, log_level)
self.logger.print(f"Initialized {__name__} CLI")
self.logger.print(self.config)
self.update_components()
@staticmethod
def update_components() -> None:
subprocess.run(["gcloud", "--quiet", "components", "update"])
def create_vm(self) -> int:
"""Create the VM.
- The first command creates a VM similar to the one
the user can get from the GCP marketplace.
- There is apparently no way to "interact" with the
GCP marketplace directly.
- The VMI explicitly asks to install GPU drivers on the first boot,
so the second command does it.
:return:
"""
cmd = [
"gcloud",
"compute",
"instances",
"create",
self.config.instance_name,
"--machine-type",
self.config.machine_type,
"--maintenance-policy",
self.config.maintenance_policy,
"--image",
f"projects/{self.config.project}/global/images/{self.config.vm_image_name}",
"--boot-disk-size",
self.config.disk_size,
"--boot-disk-type",
self.config.disk_type,
# "--accelerator",
# f"type={self.config.accelerator_type},"
# f"count={self.config.accelerator_count}",
]
self.logger.print(" ".join(cmd))
p = subprocess.run(cmd)
return p.returncode
# # Agree to NVIDIA's prompt and install the GPU driver.
# # This monster below is here bc the yes command
# # and a gazillion alternatives do not work on circleci.
# # reverse-engineered from /usr/bin/gcp-ngc-login.sh
# cmd = [
# "gcloud",
# "compute",
# "ssh",
# self.config.instance_name,
# "--command",
# "source /etc/nvidia-vmi-version.txt; "
# 'REGISTRY="nvcr.io"; NVIDIA_DIR="/var/tmp/nvidia"; '
# "sudo gsutil cp "
# "gs://nvidia-ngc-drivers-us-public/TESLA/shim/NVIDIA-Linux-x86_64-"
# "${NVIDIA_DRIVER_VERSION}-${NVIDIA_GCP_VERSION}-shim.run "
# "${NVIDIA_DIR}; "
# "sudo chmod u+x ${NVIDIA_DIR}/NVIDIA-Linux-x86_64-"
# "${NVIDIA_DRIVER_VERSION}-${NVIDIA_GCP_VERSION}-shim.run; "
# "sudo ${NVIDIA_DIR}/NVIDIA-Linux-x86_64-${NVIDIA_DRIVER_VERSION}-"
# "${NVIDIA_GCP_VERSION}-shim.run --no-cc-version-check "
# "--kernel-module-only --silent --dkms; "
# "sudo dkms add nvidia/${NVIDIA_DRIVER_VERSION} || true; "
# "cd /usr/share/doc/NVIDIA_GLX-1.0/samples/; "
# "sudo tar xvjf nvidia-persistenced-init.tar.bz2; "
# "sudo nvidia-persistenced-init/install.sh && "
# "sudo rm -rf nvidia-persistenced-init; ",
# ]
# self.logger.print(cmd)
# for _ in range(6):
# p = subprocess.run(cmd)
# if p.returncode == 0:
# self.logger.print("GPU driver installed")
# break
# else:
# # allow some time for the VM to boot
# self.logger.print("Waiting for VM to boot...")
# time.sleep(10)
#
# return p.returncode
def run(self) -> int:
"""Run the VM.
:return:
"""
cmd = [
"gcloud",
"compute",
"ssh",
self.config.instance_name,
"--command",
"sudo apt update; "
"sudo apt install -y python3-pip; "
"pip3 install --upgrade pip; "
"pip3 install --upgrade wheel; "
"pip3 install --upgrade wandb distributed; ",
# "wandb login; ",
]
self.logger.print(" ".join(cmd))
p = subprocess.run(cmd)
return p.returncode
def delete_vm(self) -> int:
"""Delete the VM.
:return:
"""
p = subprocess.run(
[
"gcloud",
"compute",
"instances",
"delete",
self.config.instance_name,
"--quiet",
]
)
return p.returncode
if __name__ == "__main__":
commands: List[Command] = ["gke", "gce"]
parser = argparse.ArgumentParser()
# add verbose option
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="print verbose output",
)
subparsers = parser.add_subparsers(
dest="target", title="target", description="target platform"
)
subparsers_store = {command: subparsers.add_parser(command) for command in commands}
for command, subparser in subparsers_store.items():
try:
cli = getattr(sys.modules[__name__], command.upper())
except AttributeError:
continue
actions = [
func
for func in dir(cli)
if callable(getattr(cli, func)) and not func.startswith("__")
]
subparser.add_argument("command", choices=actions, help="command to run")
target_config = getattr(sys.modules[__name__], f"{command.upper()}Config")
for field in fields(target_config):
subparser.add_argument(
f"--{field.name}",
type=field.type,
default=field.default,
help=f"type: {field.type.__name__}; default: {field.default}",
)
parser_arguments = vars(parser.parse_args())
print(parser_arguments)
target = parser_arguments.pop("target")
v = parser_arguments.pop("verbose")
command = parser_arguments.pop("command")
cli_class = getattr(sys.modules[__name__], target.upper())
config_class = getattr(sys.modules[__name__], f"{target.upper()}Config")
cli = cli_class(config=config_class(**parser_arguments), verbose=v)
exit_code = getattr(cli, command)()
sys.exit(exit_code)
| GCE |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/clients/base.py | {
"start": 802,
"end": 26368
} | class ____:
"""Base OAuth2 client responsible for access token management.
This class also acts as a generic interface providing methods common to all
client types such as ``prepare_authorization_request`` and
``prepare_token_revocation_request``. The ``prepare_x_request`` methods are
the recommended way of interacting with clients (as opposed to the abstract
prepare uri/body/etc methods). They are recommended over the older set
because they are easier to use (more consistent) and add a few additional
security checks, such as HTTPS and state checking.
Some of these methods require further implementation only provided by the
specific purpose clients such as
:py:class:`oauthlib.oauth2.MobileApplicationClient` and thus you should always
seek to use the client class matching the OAuth workflow you need. For
Python, this is usually :py:class:`oauthlib.oauth2.WebApplicationClient`.
"""
refresh_token_key = 'refresh_token'
def __init__(self, client_id,
default_token_placement=AUTH_HEADER,
token_type='Bearer',
access_token=None,
refresh_token=None,
mac_key=None,
mac_algorithm=None,
token=None,
scope=None,
state=None,
redirect_url=None,
state_generator=generate_token,
code_verifier=None,
code_challenge=None,
code_challenge_method=None,
**kwargs):
"""Initialize a client with commonly used attributes.
:param client_id: Client identifier given by the OAuth provider upon
registration.
:param default_token_placement: Tokens can be supplied in the Authorization
header (default), the URL query component (``query``) or the request
body (``body``).
:param token_type: OAuth 2 token type. Defaults to Bearer. Change this
if you specify the ``access_token`` parameter and know it is of a
different token type, such as a MAC, JWT or SAML token. Can
also be supplied as ``token_type`` inside the ``token`` dict parameter.
:param access_token: An access token (string) used to authenticate
requests to protected resources. Can also be supplied inside the
``token`` dict parameter.
:param refresh_token: A refresh token (string) used to refresh expired
tokens. Can also be supplied inside the ``token`` dict parameter.
:param mac_key: Encryption key used with MAC tokens.
:param mac_algorithm: Hashing algorithm for MAC tokens.
:param token: A dict of token attributes such as ``access_token``,
``token_type`` and ``expires_at``.
:param scope: A list of default scopes to request authorization for.
:param state: A CSRF protection string used during authorization.
:param redirect_url: The redirection endpoint on the client side to which
the user returns after authorization.
:param state_generator: A no argument state generation callable. Defaults
to :py:meth:`oauthlib.common.generate_token`.
:param code_verifier: PKCE parameter. A cryptographically random string that is used to correlate the
authorization request to the token request.
:param code_challenge: PKCE parameter. A challenge derived from the code verifier that is sent in the
authorization request, to be verified against later.
:param code_challenge_method: PKCE parameter. A method that was used to derive code challenge.
Defaults to "plain" if not present in the request.
"""
self.client_id = client_id
self.default_token_placement = default_token_placement
self.token_type = token_type
self.access_token = access_token
self.refresh_token = refresh_token
self.mac_key = mac_key
self.mac_algorithm = mac_algorithm
self.token = token or {}
self.scope = scope
self.state_generator = state_generator
self.state = state
self.redirect_url = redirect_url
self.code_verifier = code_verifier
self.code_challenge = code_challenge
self.code_challenge_method = code_challenge_method
self.code = None
self.expires_in = None
self._expires_at = None
self.populate_token_attributes(self.token)
@property
def token_types(self):
"""Supported token types and their respective methods
Additional tokens can be supported by extending this dictionary.
The Bearer token spec is stable and safe to use.
The MAC token spec is not yet stable and support for MAC tokens
is experimental and currently matching version 00 of the spec.
"""
return {
'Bearer': self._add_bearer_token,
'MAC': self._add_mac_token
}
def prepare_request_uri(self, *args, **kwargs):
"""Abstract method used to create request URIs."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def prepare_request_body(self, *args, **kwargs):
"""Abstract method used to create request bodies."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def parse_request_uri_response(self, *args, **kwargs):
"""Abstract method used to parse redirection responses."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
"""Add token to the request uri, body or authorization header.
The access token type provides the client with the information
required to successfully utilize the access token to make a protected
resource request (along with type-specific attributes). The client
MUST NOT use an access token if it does not understand the token
type.
For example, the "bearer" token type defined in
[`I-D.ietf-oauth-v2-bearer`_] is utilized by simply including the access
token string in the request:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: Bearer mF_9.B5f-4.1JqM
while the "mac" token type defined in [`I-D.ietf-oauth-v2-http-mac`_] is
utilized by issuing a MAC key together with the access token which is
used to sign certain components of the HTTP requests:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: MAC id="h480djs93hd8",
nonce="274312:dj83hs9s",
mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
.. _`I-D.ietf-oauth-v2-bearer`: https://tools.ietf.org/html/rfc6749#section-12.2
.. _`I-D.ietf-oauth-v2-http-mac`: https://tools.ietf.org/html/rfc6749#section-12.2
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
token_placement = token_placement or self.default_token_placement
case_insensitive_token_types = {
k.lower(): v for k, v in self.token_types.items()}
if self.token_type.lower() not in case_insensitive_token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not (self.access_token or self.token.get('access_token')):
raise ValueError("Missing access token.")
if self._expires_at and self._expires_at < time.time():
raise TokenExpiredError()
return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
headers, token_placement, **kwargs)
def prepare_authorization_request(self, authorization_url, state=None,
redirect_url=None, scope=None, **kwargs):
"""Prepare the authorization request.
This is the first step in many OAuth flows in which the user is
redirected to a certain authorization URL. This method adds
required parameters to the authorization URL.
:param authorization_url: Provider authorization endpoint URL.
:param state: CSRF protection string. Will be automatically created if
not provided. The generated state is available via the ``state``
attribute. Clients should verify that the state is unchanged and
present in the authorization response. This verification is done
automatically if using the ``authorization_response`` parameter
with ``prepare_token_request``.
:param redirect_url: Redirect URL to which the user will be returned
after authorization. Must be provided unless previously setup with
the provider. If provided then it must also be provided in the
token request.
:param scope: List of scopes to request. Must be equal to
or a subset of the scopes granted when obtaining the refresh
token. If none is provided, the ones provided in the constructor are
used.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(authorization_url):
raise InsecureTransportError()
self.state = state or self.state_generator()
self.redirect_url = redirect_url or self.redirect_url
# do not assign scope to self automatically anymore
scope = self.scope if scope is None else scope
auth_url = self.prepare_request_uri(
authorization_url, redirect_uri=self.redirect_url,
scope=scope, state=self.state, **kwargs)
return auth_url, FORM_ENC_HEADERS, ''
def prepare_token_request(self, token_url, authorization_response=None,
redirect_url=None, state=None, body='', **kwargs):
"""Prepare a token creation request.
Note that these requests usually require client authentication, either
by including client_id or a set of provider specific authentication
credentials.
:param token_url: Provider token creation endpoint URL.
:param authorization_response: The full redirection URL string, i.e.
the location to which the user was redirected after successful
authorization. Used to mine credentials needed to obtain a token
in this step, such as authorization code.
:param redirect_url: The redirect_url supplied with the authorization
request (if there was one).
:param state:
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra parameters. Default ''.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
state = state or self.state
if authorization_response:
self.parse_request_uri_response(
authorization_response, state=state)
self.redirect_url = redirect_url or self.redirect_url
body = self.prepare_request_body(body=body,
redirect_uri=self.redirect_url, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_refresh_token_request(self, token_url, refresh_token=None,
body='', scope=None, **kwargs):
"""Prepare an access token refresh request.
Expired access tokens can be replaced by new access tokens without
going through the OAuth dance if the client obtained a refresh token.
This refresh token and authentication credentials can be used to
obtain a new access token, and possibly a new refresh token.
:param token_url: Provider token refresh endpoint URL.
:param refresh_token: Refresh token string.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra parameters. Default ''.
:param scope: List of scopes to request. Must be equal to
or a subset of the scopes granted when obtaining the refresh
token. If none is provided, the ones provided in the constructor are
used.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
# do not assign scope to self automatically anymore
scope = self.scope if scope is None else scope
body = self.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=scope, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_token_revocation_request(self, revocation_url, token,
token_type_hint="access_token", body='', callback=None, **kwargs):
"""Prepare a token revocation request.
:param revocation_url: Provider token revocation endpoint URL.
:param token: The access or refresh token to be revoked (string).
:param token_type_hint: ``"access_token"`` (default) or
``"refresh_token"``. This is optional and if you wish to not pass it you
must provide ``token_type_hint=None``.
:param body:
:param callback: A jsonp callback such as ``package.callback`` to be invoked
upon receiving the response. Not that it should not include a () suffix.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
Note that JSONP request may use GET requests as the parameters will
be added to the request URL query as opposed to the request body.
An example of a revocation request
.. code-block:: http
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
An example of a jsonp revocation request
.. code-block:: http
GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
and an error response
.. code-block:: javascript
package.myCallback({"error":"unsupported_token_type"});
Note that these requests usually require client credentials, client_id in
the case for public clients and provider specific authentication
credentials for confidential clients.
"""
if not is_secure_transport(revocation_url):
raise InsecureTransportError()
return prepare_token_revocation_request(revocation_url, token,
token_type_hint=token_type_hint, body=body, callback=callback,
**kwargs)
def parse_request_body_response(self, body, scope=None, **kwargs):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested. If none is provided, the ones
provided in the constructor are used.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. :py:class:`oauthlib.oauth2.errors.OAuth2Error`
if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
scope = self.scope if scope is None else scope
self.token = parse_token_response(body, scope=scope)
self.populate_token_attributes(self.token)
return self.token
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the `application/x-www-form-urlencoded`
format in the HTTP request entity-body:
:param refresh_token: REQUIRED. The refresh token issued to the client.
:param scope: OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner. Note that if none is provided, the ones provided
in the constructor are used if any.
"""
refresh_token = refresh_token or self.refresh_token
scope = self.scope if scope is None else scope
return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
refresh_token=refresh_token, **kwargs)
def _add_bearer_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=None):
"""Add a bearer token to the request uri, body or authorization header."""
if token_placement == AUTH_HEADER:
headers = tokens.prepare_bearer_headers(self.access_token, headers)
elif token_placement == URI_QUERY:
uri = tokens.prepare_bearer_uri(self.access_token, uri)
elif token_placement == BODY:
body = tokens.prepare_bearer_body(self.access_token, body)
else:
raise ValueError("Invalid token placement.")
return uri, headers, body
def create_code_verifier(self, length):
"""Create PKCE **code_verifier** used in computing **code_challenge**.
See `RFC7636 Section 4.1`_
:param length: REQUIRED. The length of the code_verifier.
The client first creates a code verifier, "code_verifier", for each
OAuth 2.0 [RFC6749] Authorization Request, in the following manner:
.. code-block:: text
code_verifier = high-entropy cryptographic random STRING using the
unreserved characters [A-Z] / [a-z] / [0-9] / "-" / "." / "_" / "~"
from Section 2.3 of [RFC3986], with a minimum length of 43 characters
and a maximum length of 128 characters.
.. _`RFC7636 Section 4.1`: https://tools.ietf.org/html/rfc7636#section-4.1
"""
code_verifier = None
if not length >= 43:
raise ValueError("Length must be greater than or equal to 43")
if not length <= 128:
raise ValueError("Length must be less than or equal to 128")
code_verifier = generate_token(length, UNICODE_ASCII_CHARACTER_SET + "-._~")
self.code_verifier = code_verifier
return code_verifier
def create_code_challenge(self, code_verifier, code_challenge_method=None):
"""Create PKCE **code_challenge** derived from the **code_verifier**.
See `RFC7636 Section 4.2`_
:param code_verifier: REQUIRED. The **code_verifier** generated from `create_code_verifier()`.
:param code_challenge_method: OPTIONAL. The method used to derive the **code_challenge**. Acceptable values include `S256`. DEFAULT is `plain`.
The client then creates a code challenge derived from the code
verifier by using one of the following transformations on the code
verifier::
plain
code_challenge = code_verifier
S256
code_challenge = BASE64URL-ENCODE(SHA256(ASCII(code_verifier)))
If the client is capable of using `S256`, it MUST use `S256`, as
`S256` is Mandatory To Implement (MTI) on the server. Clients are
permitted to use `plain` only if they cannot support `S256` for some
technical reason and know via out-of-band configuration that the
server supports `plain`.
The plain transformation is for compatibility with existing
deployments and for constrained environments that can't use the S256 transformation.
.. _`RFC7636 Section 4.2`: https://tools.ietf.org/html/rfc7636#section-4.2
"""
code_challenge = None
if code_verifier is None:
raise ValueError("Invalid code_verifier")
if code_challenge_method is None:
code_challenge_method = "plain"
self.code_challenge_method = code_challenge_method
code_challenge = code_verifier
self.code_challenge = code_challenge
if code_challenge_method == "S256":
h = hashlib.sha256()
h.update(code_verifier.encode(encoding='ascii'))
sha256_val = h.digest()
code_challenge = bytes.decode(base64.urlsafe_b64encode(sha256_val))
# replace '+' with '-', '/' with '_', and remove trailing '='
code_challenge = code_challenge.replace("+", "-").replace("/", "_").replace("=", "")
self.code_challenge = code_challenge
return code_challenge
def _add_mac_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
"""Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable.
"""
if token_placement != AUTH_HEADER:
raise ValueError("Invalid token placement.")
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body
def _populate_attributes(self, response):
warnings.warn("Please switch to the public method "
"populate_token_attributes.", DeprecationWarning)
return self.populate_token_attributes(response)
def populate_code_attributes(self, response):
"""Add attributes from an auth code response to self."""
if 'code' in response:
self.code = response.get('code')
def populate_token_attributes(self, response):
"""Add attributes from a token exchange response to self."""
if 'access_token' in response:
self.access_token = response.get('access_token')
if 'refresh_token' in response:
self.refresh_token = response.get('refresh_token')
if 'token_type' in response:
self.token_type = response.get('token_type')
vin, vat, v_at = parse_expires(response)
if vin:
self.expires_in = vin
if vat:
self.expires_at = vat
if v_at:
self._expires_at = v_at
if 'mac_key' in response:
self.mac_key = response.get('mac_key')
if 'mac_algorithm' in response:
self.mac_algorithm = response.get('mac_algorithm')
| Client |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/processing_phi4_multimodal.py | {
"start": 1036,
"end": 1196
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"audio_kwargs": {
"device": "cpu",
},
}
| Phi4MultimodalProcessorKwargs |
python | tensorflow__tensorflow | tensorflow/core/function/runtime_client/runtime_client_test.py | {
"start": 7367,
"end": 9379
} | class ____(test.TestCase):
@test_util.run_v2_only
def setUp(self):
super().setUp()
workers, _ = test_util.create_local_cluster(2, 0)
remote.connect_to_remote_host(
[workers[0].target, workers[1].target])
self.device0 = "/job:worker/replica:0/task:0/device:CPU:0"
self.device1 = "/job:worker/replica:0/task:1/device:CPU:0"
@test_util.run_v2_only
def tearDown(self):
super().tearDown()
# Clear the current device scope to avoid polluting other test cases.
ops.device(None).__enter__()
# Reset the context to avoid polluting other test cases.
context._reset_context()
@test_util.run_v2_only
def test_transform_function_in_remote_contexts(self):
"""Tests if function_defs in remote contexts could be transformed."""
@def_function.function
def add(x, y):
return math_ops.add(x, y, name="x_plus_y")
inputs = [1.0, 2.0]
with ops.device(self.device0):
result = add(*inputs)
self.assertAllEqual(result, 3.0)
self.assertEqual(result.device, self.device0)
with ops.device(self.device1):
result = add(*inputs)
self.assertAllEqual(result, 3.0)
self.assertEqual(result.device, self.device1)
cf = add.get_concrete_function(*inputs)
function_name = cf.function_def.signature.name
ctx = runtime_client.GlobalPythonEagerContext()
rt = runtime_client.Runtime(ctx)
# "test-pass" converts add to multiply.
rt.TransformFunction(function_name, "test-pass")
fndef = rt.GetFunctionProto(function_name)
rt.CreateFunction(fndef)
with ops.device(self.device0):
result = add(*inputs)
self.assertAllEqual(result, 2.0)
self.assertEqual(result.device, self.device0)
with ops.device(self.device1):
result = add(*inputs)
self.assertAllEqual(result, 2.0)
self.assertEqual(result.device, self.device1)
if __name__ == "__main__":
context.set_soft_device_placement(False)
test_pass.RegisterTestPass()
test.main()
| RuntimeClientMultiWorkersTest |
python | keras-team__keras | keras/src/utils/audio_dataset_utils_test.py | {
"start": 169,
"end": 16560
} | class ____(testing.TestCase):
def _get_audio_samples(self, count=16, different_sequence_lengths=False):
sequence_length = 30
num_channels = 1
audio_samples = []
for _ in range(count):
if different_sequence_lengths:
random_sequence_length = np.random.randint(
10, sequence_length + 1
)
audio = np.random.random((random_sequence_length, num_channels))
else:
audio = np.random.random((sequence_length, num_channels))
audio_samples.append(tf.audio.encode_wav(audio, 1000))
return audio_samples
def _prepare_directory(
self,
num_classes=2,
nested_dirs=False,
count=16,
different_sequence_lengths=False,
):
# Get a unique temp directory
temp_dir = self.get_temp_dir()
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save audio samples to the paths
i = 0
for audio in self._get_audio_samples(
count=count, different_sequence_lengths=different_sequence_lengths
):
path = paths[i % len(paths)]
ext = "wav"
filename = os.path.join(path, f"audio_{i}.{ext}")
with open(os.path.join(temp_dir, filename), "wb") as f:
f.write(audio.numpy())
i += 1
return temp_dir
def test_audio_dataset_from_directory_standalone(self):
# Test retrieving audio samples without labels from a directory and its
# subdirs.
# Save a few extra audio in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, audio in enumerate(self._get_audio_samples(3)):
filename = f"audio_{i}.wav"
with open(os.path.join(directory, filename), "wb") as f:
f.write(audio.numpy())
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=5, output_sequence_length=30, labels=None
)
batch = next(iter(dataset))
# We return plain audio
self.assertEqual(batch.shape, (5, 30, 1))
self.assertEqual(batch.dtype.name, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_audio_dataset_from_directory_binary(self):
directory = self._prepare_directory(num_classes=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="binary",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, "float32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, "float32")
def test_static_shape_in_graph(self):
directory = self._prepare_directory(num_classes=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
test_case = self
@tf.function
def symbolic_fn(ds):
for x, _ in ds.take(1):
test_case.assertListEqual(x.shape.as_list(), [None, 30, None])
symbolic_fn(dataset)
def test_sample_count(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_audio_dataset_from_directory_multiclass(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8, 30, 1))
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, "float32")
def test_audio_dataset_from_directory_validation_split(self):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=10,
output_sequence_length=30,
validation_split=0.2,
subset="training",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=10,
output_sequence_length=30,
validation_split=0.2,
subset="validation",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 30, 1))
def test_audio_dataset_from_directory_manual_labels(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
labels=[0, 1],
shuffle=False,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_audio_dataset_from_directory_follow_links(self):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode=None,
follow_links=True,
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_audio_dataset_from_directory_no_audio(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(
ValueError, "No audio files found in directory"
):
_ = audio_dataset_utils.audio_dataset_from_directory(directory)
def test_audio_dataset_from_directory_ragged(self):
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=True
)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, ragged=True, batch_size=8
)
batch = next(iter(dataset))
self.assertEqual(batch[0].shape.as_list(), [8, None, None])
def test_audio_dataset_from_directory_no_output_sequence_length_no_ragged(
self,
):
# This test case tests `audio_dataset_from_directory` when `ragged` and
# `output_sequence_length` are not passed while the input sequence
# lengths are different.
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=True
)
# The tensor shapes are different and output_sequence_length is None
# should work fine and pad each sequence to the length of the longest
# sequence in it's batch
min_sequence_length, max_sequence_length = 10, 30
possible_sequence_lengths = [
i for i in range(min_sequence_length, max_sequence_length + 1)
]
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=2
)
sequence_lengths = list(set([b.shape[1] for b, _ in dataset]))
for seq_len in sequence_lengths:
self.assertIn(seq_len, possible_sequence_lengths)
def test_audio_dataset_from_directory_no_output_sequence_length_same_lengths( # noqa: E501
self,
):
# This test case tests `audio_dataset_from_directory` when `ragged` and
# `output_sequence_length` are not passed while the input sequence
# lengths are the same
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=False
)
# The tensor shapes are different and output_sequence_length is None
# should work fine and pad each sequence to the length of the longest
# sequence in it's batch
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=2
)
sequence_lengths = list(set([batch[0].shape[1] for batch in dataset]))
self.assertEqual(len(sequence_lengths), 1)
def test_audio_dataset_from_directory_errors(self):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(
ValueError, "`sampling_rate` should be higher than 0. Received:"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=-1,
)
with self.assertRaisesRegex(
ValueError,
"`sampling_rate` should have an integer value. Received:",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=1.2,
)
# Only run this test case when we don't have tensorflow_io.
try:
import tensorflow_io # noqa: F401
except ImportError:
with self.assertRaisesRegex(
ImportError,
"To use the argument `sampling_rate`.*tensorflow_io.*",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=44100,
)
with self.assertRaisesRegex(
ValueError, "Cannot set both `ragged` and `output_sequence_length`"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, ragged=True, output_sequence_length=30
)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, labels="other"
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, label_mode="other"
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, labels=[0, 0, 1, 1]
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"]
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, label_mode="binary"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=2
)
with self.assertRaisesRegex(
ValueError, '`subset` must be either "training",'
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.2, subset="other"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.0, subset="training"
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.2, subset="training"
)
def test_audio_dataset_from_directory_not_batched(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=None,
output_sequence_length=30,
label_mode=None,
shuffle=False,
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 2)
| AudioDatasetFromDirectoryTest |
python | crytic__slither | slither/slithir/tmp_operations/tmp_call.py | {
"start": 750,
"end": 3455
} | class ____(OperationWithLValue): # pylint: disable=too-many-instance-attributes
def __init__(
self,
called: SourceMapping,
nbr_arguments: int,
result: Union[TupleVariable, TemporaryVariable],
type_call: str,
names: Optional[List[str]] = None,
) -> None:
# pylint: disable=too-many-arguments
"""
#### Parameters
names -
For calls of the form f({argName1 : arg1, ...}), the names of parameters listed in call order.
Otherwise, None.
"""
assert isinstance(
called,
(
Contract,
Variable,
SolidityVariableComposed,
SolidityFunction,
Structure,
Event,
CustomError,
),
)
super().__init__()
self._called = called
self._nbr_arguments = nbr_arguments
self._type_call = type_call
self._names = names
self._lvalue = result
self._ori = None #
self._callid = None
self._gas = None
self._value = None
self._salt = None
@property
def names(self) -> Optional[List[str]]:
"""
For calls of the form f({argName1 : arg1, ...}), the names of parameters listed in call order.
Otherwise, None.
"""
return self._names
@property
def call_value(self):
return self._value
@call_value.setter
def call_value(self, v):
self._value = v
@property
def call_gas(self):
return self._gas
@call_gas.setter
def call_gas(self, gas):
self._gas = gas
@property
def call_salt(self):
return self._salt
@call_salt.setter
def call_salt(self, salt):
self._salt = salt
@property
def call_id(self):
return self._callid
@call_id.setter
def call_id(self, c):
self._callid = c
@property
def read(self):
return [self.called]
@property
def called(self):
return self._called
@called.setter
def called(self, c):
self._called = c
@property
def nbr_arguments(self) -> int:
return self._nbr_arguments
@property
def type_call(self) -> str:
return self._type_call
@property
def ori(self) -> Optional[Union[TmpNewContract, TmpNewArray, "TmpCall", Member]]:
return self._ori
def set_ori(self, ori: Union["TmpCall", TmpNewContract, TmpNewArray, Member]) -> None:
self._ori = ori
def __str__(self):
return str(self.lvalue) + f" = TMPCALL{self.nbr_arguments} " + str(self._called)
| TmpCall |
python | google__python-fire | fire/test_components.py | {
"start": 7530,
"end": 8404
} | class ____:
"""Test class for testing help text output with multiline docstring.
This is a test class that has a long docstring description that spans across
multiple lines for testing line breaking in help text.
"""
@staticmethod
def example_generator(n):
"""Generators have a ``Yields`` section instead of a ``Returns`` section.
Args:
n (int): The upper limit of the range to generate, from 0 to `n` - 1.
Yields:
int: The next number in the range of 0 to `n` - 1.
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
yield from range(n)
def simple_set():
return {1, 2, 'three'}
def simple_frozenset():
return frozenset({1, 2, 'three'})
| ClassWithMultilineDocstring |
python | kamyu104__LeetCode-Solutions | Python/count-good-triplets-in-an-array.py | {
"start": 540,
"end": 1093
} | class ____(object):
def goodTriplets(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
lookup = [0]*len(nums1)
for i, x in enumerate(nums1):
lookup[x] = i
result = 0
bit = BIT(len(nums1))
for i, x in enumerate(nums2):
smaller = bit.query(lookup[x]-1)
larger = (len(nums1)-(lookup[x]+1))-(i-smaller)
result += smaller*larger
bit.add(lookup[x], 1)
return result
| Solution |
python | kamyu104__LeetCode-Solutions | Python/find-duplicate-file-in-system.py | {
"start": 99,
"end": 751
} | class ____(object):
def findDuplicate(self, paths):
"""
:type paths: List[str]
:rtype: List[List[str]]
"""
files = collections.defaultdict(list)
for path in paths:
s = path.split(" ")
for i in xrange(1,len(s)):
file_name = s[0] + "/" + s[i][0:s[i].find("(")]
file_content = s[i][s[i].find("(")+1:s[i].find(")")]
files[file_content].append(file_name)
result = []
for file_content, file_names in files.iteritems():
if len(file_names)>1:
result.append(file_names)
return result
| Solution |
python | ZoranPandovski__al-go-rithms | math/RunningMedian/python/runningMedian.py | {
"start": 0,
"end": 3582
} | class ____:
def __init__(self,key):
self.left=None
self.right=None
self.data=key
self.bf=0
self.count=1
self.parent=None
self.lkids=0
self.rkids=0
def rotateNBalance(node):
if node.bf < 0:
if node.right.bf <= 0:
rotateLeft(node)
else:
rotateRight(node.right)
rotateLeft(node)
elif node.bf > 0:
if node.left.bf >=0:
rotateRight(node)
else:
rotateLeft(node.left)
rotateRight(node)
def rotateLeft(node):
newNode=node.right
node.right=newNode.left
if newNode.left:
newNode.left.parent=node
if node.parent:
newNode.parent=node.parent
if node.parent is None: # means node is root
global rootNode
newNode.parent=None
rootNode=newNode
else:
if node.parent.left==node:
node.parent.left=newNode
else:
node.parent.right=newNode
newNode.left=node
node.parent=newNode
node.rkids=newNode.lkids
newNode.lkids+=node.lkids+node.count
node.bf=node.bf+1-min(0,newNode.bf)
newNode.bf=newNode.bf+1+max(0,node.bf)
def rotateRight(node):
newNode=node.left
node.left=newNode.right
if newNode.right:
newNode.right.parent=node
if node.parent:
newNode.parent=node.parent
if node.parent is None:
global rootNode
newNode.parent=None
rootNode=newNode
else:
if node.parent.left==node:
node.parent.left=newNode
else:
node.parent.right=newNode
newNode.right=node
node.parent=newNode
node.lkids=newNode.rkids
newNode.rkids+=node.rkids+node.count
node.bf=node.bf-1-max(0,newNode.bf)
newNode.bf=newNode.bf-1+min(0,node.bf)
def updateBF(node):
if(node.bf > 1 or node.bf < -1):
rotateNBalance(node)
return
if node.parent:
if node==node.parent.left:
node.parent.bf+=1
elif node==node.parent.right:
node.parent.bf-=1
if node.parent.bf!=0:
updateBF(node.parent)
def insert(root,node):
if root is None:
global rootNode
rootNode = node
else:
if(node.data==root.data):
root.count+=1
return
if(node.data < root.data):
root.lkids+=1
if root.left is None:
node.parent=root
root.left=node
updateBF(node)
else:
insert(root.left,node)
else:
root.rkids+=1
if root.right is None:
node.parent=root
root.right=node
updateBF(node)
else:
insert(root.right,node)
def kth(root,x):
if(x == root.lkids+root.count):
return root
elif(x<root.lkids+root.count):
if(root.lkids==0 or root.lkids<x):
return root
return kth(root.left,x)
else:
if(root.rkids==0):
return root
return kth(root.right,x-root.lkids-root.count)
N=int(input()) # number of elements to be inserted
rootNode=None
for i in range(1,N+1):
x=int(input())
insert(rootNode,Node(x)) # insert an element in AVL tree
m=kth(rootNode,(i//2)+1) #find k+1th minimum
if(i&1):
print(float(m.data)) # print if total nodes are odd
else:
m2=kth(rootNode,(i//2)) # find kth minimum
print(float((m.data+m2.data)/2)) # print avg of 2 median if total nodes are even | Node |
python | pallets__werkzeug | tests/test_wrappers.py | {
"start": 41216,
"end": 43916
} | class ____:
def test_secure(self):
response = wrappers.Response()
response.set_cookie(
"foo",
value="bar",
max_age=60,
expires=0,
path="/blub",
domain="example.org",
secure=True,
samesite=None,
)
assert response.headers.to_wsgi_list() == [
("Content-Type", "text/plain; charset=utf-8"),
(
"Set-Cookie",
"foo=bar; Domain=example.org;"
" Expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=60;"
" Secure; Path=/blub",
),
]
def test_httponly(self):
response = wrappers.Response()
response.set_cookie(
"foo",
value="bar",
max_age=60,
expires=0,
path="/blub",
domain="example.org",
secure=False,
httponly=True,
samesite=None,
)
assert response.headers.to_wsgi_list() == [
("Content-Type", "text/plain; charset=utf-8"),
(
"Set-Cookie",
"foo=bar; Domain=example.org;"
" Expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=60;"
" HttpOnly; Path=/blub",
),
]
def test_secure_and_httponly(self):
response = wrappers.Response()
response.set_cookie(
"foo",
value="bar",
max_age=60,
expires=0,
path="/blub",
domain="example.org",
secure=True,
httponly=True,
samesite=None,
)
assert response.headers.to_wsgi_list() == [
("Content-Type", "text/plain; charset=utf-8"),
(
"Set-Cookie",
"foo=bar; Domain=example.org;"
" Expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=60;"
" Secure; HttpOnly; Path=/blub",
),
]
def test_samesite(self):
response = wrappers.Response()
response.set_cookie(
"foo",
value="bar",
max_age=60,
expires=0,
path="/blub",
domain="example.org",
secure=False,
samesite="strict",
)
assert response.headers.to_wsgi_list() == [
("Content-Type", "text/plain; charset=utf-8"),
(
"Set-Cookie",
"foo=bar; Domain=example.org;"
" Expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=60;"
" Path=/blub; SameSite=Strict",
),
]
| TestSetCookie |
python | ansible__ansible | lib/ansible/galaxy/api.py | {
"start": 7598,
"end": 9295
} | class ____(AnsibleError):
""" Error for bad Galaxy server responses. """
def __init__(self, http_error, message):
super(GalaxyError, self).__init__(message)
self.http_code = http_error.code
self.url = http_error.geturl()
try:
http_msg = to_text(http_error.read())
err_info = json.loads(http_msg)
except (AttributeError, ValueError):
err_info = {}
url_split = self.url.split('/')
if 'v3' in url_split:
errors = err_info.get('errors', [])
if not errors:
errors = [{}] # Defaults are set below, we just need to make sure 1 error is present.
message_lines = []
for error in errors:
error_msg = error.get('detail') or error.get('title') or http_error.reason
error_code = error.get('code') or 'Unknown'
message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code)
message_lines.append(message_line)
full_error_msg = "%s %s" % (message, ', '.join(message_lines))
else:
# v1 and unknown API endpoints
galaxy_msg = err_info.get('default', http_error.reason)
full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg)
self.message = to_native(full_error_msg)
# Keep the raw string results for the date. It's too complex to parse as a datetime object and the various APIs return
# them in different formats.
CollectionMetadata = collections.namedtuple('CollectionMetadata', ['namespace', 'name', 'created_str', 'modified_str'])
| GalaxyError |
python | sympy__sympy | sympy/series/sequences.py | {
"start": 11521,
"end": 12320
} | class ____(SeqBase, metaclass=Singleton):
"""Represents an empty sequence.
The empty sequence is also available as a singleton as
``S.EmptySequence``.
Examples
========
>>> from sympy import EmptySequence, SeqPer
>>> from sympy.abc import x
>>> EmptySequence
EmptySequence
>>> SeqPer((1, 2), (x, 0, 10)) + EmptySequence
SeqPer((1, 2), (x, 0, 10))
>>> SeqPer((1, 2)) * EmptySequence
EmptySequence
>>> EmptySequence.coeff_mul(-1)
EmptySequence
"""
@property
def interval(self):
return S.EmptySet
@property
def length(self):
return S.Zero
def coeff_mul(self, coeff):
"""See docstring of SeqBase.coeff_mul"""
return self
def __iter__(self):
return iter([])
| EmptySequence |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 13659,
"end": 13805
} | class ____(OpcodeWithArg): # Arg: Number of raise args (1, 2, or 3)
_FLAGS = HAS_ARGUMENT | HAS_JUNKNOWN | NO_NEXT
__slots__ = ()
| RAISE_VARARGS |
python | langchain-ai__langchain | libs/partners/exa/langchain_exa/retrievers.py | {
"start": 1205,
"end": 4341
} | class ____(BaseRetriever):
"""Exa Search retriever."""
k: int = 10 # num_results
"""The number of search results to return (1 to 100)."""
include_domains: list[str] | None = None
"""A list of domains to include in the search."""
exclude_domains: list[str] | None = None
"""A list of domains to exclude from the search."""
start_crawl_date: str | None = None
"""The start date for the crawl (in YYYY-MM-DD format)."""
end_crawl_date: str | None = None
"""The end date for the crawl (in YYYY-MM-DD format)."""
start_published_date: str | None = None
"""The start date for when the document was published (in YYYY-MM-DD format)."""
end_published_date: str | None = None
"""The end date for when the document was published (in YYYY-MM-DD format)."""
use_autoprompt: bool | None = None
"""Whether to use autoprompt for the search."""
type: str = "neural"
"""The type of search, 'keyword', 'neural', or 'auto'. Default: neural"""
highlights: HighlightsContentsOptions | bool | None = None
"""Whether to set the page content to the highlights of the results."""
text_contents_options: TextContentsOptions | dict[str, Any] | Literal[True] = True
"""How to set the page content of the results. Can be True or a dict with options
like max_characters."""
livecrawl: Literal["always", "fallback", "never"] | None = None
"""Option to crawl live webpages if content is not in the index. Options: "always",
"fallback", "never"."""
summary: bool | dict[str, str] | None = None
"""Whether to include a summary of the content. Can be a boolean or a dict with a
custom query."""
client: Exa = Field(default=None) # type: ignore[assignment]
exa_api_key: SecretStr = Field(default=SecretStr(""))
exa_base_url: str | None = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate the environment."""
return initialize_client(values)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
response = self.client.search_and_contents( # type: ignore[call-overload]
query,
num_results=self.k,
text=self.text_contents_options,
highlights=self.highlights,
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
start_crawl_date=self.start_crawl_date,
end_crawl_date=self.end_crawl_date,
start_published_date=self.start_published_date,
end_published_date=self.end_published_date,
use_autoprompt=self.use_autoprompt,
livecrawl=self.livecrawl,
summary=self.summary,
type=self.type,
) # type: ignore[call-overload, misc]
results = response.results
return [
Document(
page_content=(result.text),
metadata=_get_metadata(result),
)
for result in results
]
| ExaSearchRetriever |
python | django__django | tests/distinct_on_fields/models.py | {
"start": 31,
"end": 346
} | class ____(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
"self",
models.SET_NULL,
blank=True,
null=True,
related_name="children",
)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
| Tag |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/cli/worker.py | {
"start": 2593,
"end": 17898
} | class ____:
"""Runner instance which executes the Edge Worker."""
jobs: list[Job] = []
"""List of jobs that the worker is running currently."""
last_hb: datetime | None = None
"""Timestamp of last heart beat sent to server."""
drain: bool = False
"""Flag if job processing should be completed and no new jobs fetched for a graceful stop/shutdown."""
maintenance_mode: bool = False
"""Flag if job processing should be completed and no new jobs fetched for maintenance mode. """
maintenance_comments: str | None = None
"""Comments for maintenance mode."""
edge_instance: EdgeWorker | None = None
"""Singleton instance of the worker."""
def __init__(
self,
pid_file_path: str,
hostname: str,
queues: list[str] | None,
concurrency: int,
job_poll_interval: int,
heartbeat_interval: int,
daemon: bool = False,
):
self.pid_file_path = pid_file_path
self.job_poll_interval = job_poll_interval
self.hb_interval = heartbeat_interval
self.hostname = hostname
self.queues = queues
self.concurrency = concurrency
self.free_concurrency = concurrency
self.daemon = daemon
EdgeWorker.edge_instance = self
@staticmethod
def signal_handler(sig: signal.Signals, frame):
if sig == SIG_STATUS:
marker_path = Path(maintenance_marker_file_path(None))
if marker_path.exists():
request = MaintenanceMarker.from_json(marker_path.read_text())
logger.info("Requested to set maintenance mode to %s", request.maintenance)
EdgeWorker.maintenance_mode = request.maintenance == "on"
if EdgeWorker.maintenance_mode and request.comments:
logger.info("Comments: %s", request.comments)
EdgeWorker.maintenance_comments = request.comments
marker_path.unlink()
# send heartbeat immediately to update state
if EdgeWorker.edge_instance:
EdgeWorker.edge_instance.heartbeat(EdgeWorker.maintenance_comments)
else:
logger.info("Request to get status of Edge Worker received.")
status_path = Path(status_file_path(None))
status_path.write_text(
WorkerStatus(
job_count=len(EdgeWorker.jobs),
jobs=[job.edge_job.key for job in EdgeWorker.jobs],
state=EdgeWorker._get_state(),
maintenance=EdgeWorker.maintenance_mode,
maintenance_comments=EdgeWorker.maintenance_comments,
drain=EdgeWorker.drain,
).json
)
else:
logger.info("Request to shut down Edge Worker received, waiting for jobs to complete.")
EdgeWorker.drain = True
def shutdown_handler(self, sig, frame):
logger.info("SIGTERM received. Terminating all jobs and quit")
for job in EdgeWorker.jobs:
os.killpg(job.process.pid, signal.SIGTERM)
EdgeWorker.drain = True
def _get_sysinfo(self) -> dict:
"""Produce the sysinfo from worker to post to central site."""
return {
"airflow_version": airflow_version,
"edge_provider_version": edge_provider_version,
"concurrency": self.concurrency,
"free_concurrency": self.free_concurrency,
}
@staticmethod
def _get_state() -> EdgeWorkerState:
"""State of the Edge Worker."""
if EdgeWorker.jobs:
if EdgeWorker.drain:
return EdgeWorkerState.TERMINATING
if EdgeWorker.maintenance_mode:
return EdgeWorkerState.MAINTENANCE_PENDING
return EdgeWorkerState.RUNNING
if EdgeWorker.drain:
if EdgeWorker.maintenance_mode:
return EdgeWorkerState.OFFLINE_MAINTENANCE
return EdgeWorkerState.OFFLINE
if EdgeWorker.maintenance_mode:
return EdgeWorkerState.MAINTENANCE_MODE
return EdgeWorkerState.IDLE
@staticmethod
@cache
def _execution_api_server_url() -> str:
"""Get the execution api server url from config or environment."""
api_url = conf.get("edge", "api_url")
execution_api_server_url = conf.get("core", "execution_api_server_url", fallback="")
if not execution_api_server_url and api_url:
# Derive execution api url from edge api url as fallback
execution_api_server_url = api_url.replace("edge_worker/v1/rpcapi", "execution")
logger.info("Using execution api server url: %s", execution_api_server_url)
return execution_api_server_url
@staticmethod
def _run_job_via_supervisor(workload, execution_api_server_url) -> int:
from airflow.sdk.execution_time.supervisor import supervise
# Ignore ctrl-c in this process -- we don't want to kill _this_ one. we let tasks run to completion
signal.signal(signal.SIGINT, signal.SIG_IGN)
logger.info("Worker starting up pid=%d", os.getpid())
setproctitle(f"airflow edge worker: {workload.ti.key}")
try:
supervise(
# This is the "wrong" ti type, but it duck types the same. TODO: Create a protocol for this.
# Same like in airflow/executors/local_executor.py:_execute_work()
ti=workload.ti, # type: ignore[arg-type]
dag_rel_path=workload.dag_rel_path,
bundle_info=workload.bundle_info,
token=workload.token,
server=execution_api_server_url,
log_path=workload.log_path,
)
return 0
except Exception as e:
logger.exception("Task execution failed: %s", e)
return 1
@staticmethod
def _launch_job_af3(edge_job: EdgeJobFetched) -> tuple[Process, Path]:
if TYPE_CHECKING:
from airflow.executors.workloads import ExecuteTask
workload: ExecuteTask = edge_job.command
process = Process(
target=EdgeWorker._run_job_via_supervisor,
kwargs={"workload": workload, "execution_api_server_url": EdgeWorker._execution_api_server_url()},
)
process.start()
base_log_folder = conf.get("logging", "base_log_folder", fallback="NOT AVAILABLE")
if TYPE_CHECKING:
assert workload.log_path # We need to assume this is defined in here
logfile = Path(base_log_folder, workload.log_path)
return process, logfile
@staticmethod
def _launch_job_af2_10(edge_job: EdgeJobFetched) -> tuple[Popen, Path]:
"""Compatibility for Airflow 2.10 Launch."""
env = os.environ.copy()
env["AIRFLOW__CORE__DATABASE_ACCESS_ISOLATION"] = "True"
env["AIRFLOW__CORE__INTERNAL_API_URL"] = conf.get("edge", "api_url")
env["_AIRFLOW__SKIP_DATABASE_EXECUTOR_COMPATIBILITY_CHECK"] = "1"
command: list[str] = edge_job.command # type: ignore[assignment]
process = Popen(command, close_fds=True, env=env, start_new_session=True)
logfile = logs_logfile_path(edge_job.key)
return process, logfile
@staticmethod
def _launch_job(edge_job: EdgeJobFetched):
"""Get the received job executed."""
process: Popen | Process
if AIRFLOW_V_3_0_PLUS:
process, logfile = EdgeWorker._launch_job_af3(edge_job)
else:
# Airflow 2.10
process, logfile = EdgeWorker._launch_job_af2_10(edge_job)
EdgeWorker.jobs.append(Job(edge_job, process, logfile, 0))
def start(self):
"""Start the execution in a loop until terminated."""
try:
self.last_hb = worker_register(
self.hostname, EdgeWorkerState.STARTING, self.queues, self._get_sysinfo()
).last_update
except EdgeWorkerVersionException as e:
logger.info("Version mismatch of Edge worker and Core. Shutting down worker.")
raise SystemExit(str(e))
except EdgeWorkerDuplicateException as e:
logger.error(str(e))
raise SystemExit(str(e))
except HTTPError as e:
if e.response.status_code == HTTPStatus.NOT_FOUND:
raise SystemExit("Error: API endpoint is not ready, please set [edge] api_enabled=True.")
raise SystemExit(str(e))
if not self.daemon:
write_pid_to_pidfile(self.pid_file_path)
signal.signal(signal.SIGINT, EdgeWorker.signal_handler)
signal.signal(SIG_STATUS, EdgeWorker.signal_handler)
signal.signal(signal.SIGTERM, self.shutdown_handler)
os.environ["HOSTNAME"] = self.hostname
os.environ["AIRFLOW__CORE__HOSTNAME_CALLABLE"] = f"{_edge_hostname.__module__}._edge_hostname"
try:
self.worker_state_changed = self.heartbeat()
self.last_hb = datetime.now()
while not EdgeWorker.drain or EdgeWorker.jobs:
self.loop()
logger.info("Quitting worker, signal being offline.")
try:
worker_set_state(
self.hostname,
EdgeWorkerState.OFFLINE_MAINTENANCE
if EdgeWorker.maintenance_mode
else EdgeWorkerState.OFFLINE,
0,
self.queues,
self._get_sysinfo(),
)
except EdgeWorkerVersionException:
logger.info("Version mismatch of Edge worker and Core. Quitting worker anyway.")
finally:
if not self.daemon:
remove_existing_pidfile(self.pid_file_path)
def loop(self):
"""Run a loop of scheduling and monitoring tasks."""
new_job = False
previous_jobs = EdgeWorker.jobs
if not any((EdgeWorker.drain, EdgeWorker.maintenance_mode)) and self.free_concurrency > 0:
new_job = self.fetch_job()
self.check_running_jobs()
if (
EdgeWorker.drain
or datetime.now().timestamp() - self.last_hb.timestamp() > self.hb_interval
or self.worker_state_changed # send heartbeat immediately if the state is different in db
or bool(previous_jobs) != bool(EdgeWorker.jobs) # when number of jobs changes from/to 0
):
self.worker_state_changed = self.heartbeat()
self.last_hb = datetime.now()
if not new_job:
self.interruptible_sleep()
def fetch_job(self) -> bool:
"""Fetch and start a new job from central site."""
logger.debug("Attempting to fetch a new job...")
edge_job = jobs_fetch(self.hostname, self.queues, self.free_concurrency)
if edge_job:
logger.info("Received job: %s", edge_job)
EdgeWorker._launch_job(edge_job)
jobs_set_state(edge_job.key, TaskInstanceState.RUNNING)
return True
logger.info(
"No new job to process%s",
f", {len(EdgeWorker.jobs)} still running" if EdgeWorker.jobs else "",
)
return False
def check_running_jobs(self) -> None:
"""Check which of the running tasks/jobs are completed and report back."""
used_concurrency = 0
for i in range(len(EdgeWorker.jobs) - 1, -1, -1):
job = EdgeWorker.jobs[i]
if not job.is_running:
EdgeWorker.jobs.remove(job)
if job.is_success:
logger.info("Job completed: %s", job.edge_job)
jobs_set_state(job.edge_job.key, TaskInstanceState.SUCCESS)
else:
logger.error("Job failed: %s", job.edge_job)
jobs_set_state(job.edge_job.key, TaskInstanceState.FAILED)
else:
used_concurrency += job.edge_job.concurrency_slots
if (
conf.getboolean("edge", "push_logs")
and job.logfile.exists()
and job.logfile.stat().st_size > job.logsize
):
with job.logfile.open("rb") as logfile:
push_log_chunk_size = conf.getint("edge", "push_log_chunk_size")
logfile.seek(job.logsize, os.SEEK_SET)
read_data = logfile.read()
job.logsize += len(read_data)
# backslashreplace to keep not decoded characters and not raising exception
# replace null with question mark to fix issue during DB push
log_data = read_data.decode(errors="backslashreplace").replace("\x00", "\ufffd")
while True:
chunk_data = log_data[:push_log_chunk_size]
log_data = log_data[push_log_chunk_size:]
if not chunk_data:
break
logs_push(
task=job.edge_job.key,
log_chunk_time=timezone.utcnow(),
log_chunk_data=chunk_data,
)
self.free_concurrency = self.concurrency - used_concurrency
def heartbeat(self, new_maintenance_comments: str | None = None) -> bool:
"""Report liveness state of worker to central site with stats."""
state = EdgeWorker._get_state()
sysinfo = self._get_sysinfo()
worker_state_changed: bool = False
try:
worker_info = worker_set_state(
self.hostname,
state,
len(EdgeWorker.jobs),
self.queues,
sysinfo,
new_maintenance_comments,
)
self.queues = worker_info.queues
if worker_info.state == EdgeWorkerState.MAINTENANCE_REQUEST:
logger.info("Maintenance mode requested!")
EdgeWorker.maintenance_mode = True
elif (
worker_info.state in [EdgeWorkerState.IDLE, EdgeWorkerState.RUNNING]
and EdgeWorker.maintenance_mode
):
logger.info("Exit Maintenance mode requested!")
EdgeWorker.maintenance_mode = False
if EdgeWorker.maintenance_mode:
EdgeWorker.maintenance_comments = worker_info.maintenance_comments
else:
EdgeWorker.maintenance_comments = None
if worker_info.state == EdgeWorkerState.SHUTDOWN_REQUEST:
logger.info("Shutdown requested!")
EdgeWorker.drain = True
worker_state_changed = worker_info.state != state
except EdgeWorkerVersionException:
logger.info("Version mismatch of Edge worker and Core. Shutting down worker.")
EdgeWorker.drain = True
return worker_state_changed
def interruptible_sleep(self):
"""Sleeps but stops sleeping if drain is made."""
drain_before_sleep = EdgeWorker.drain
for _ in range(0, self.job_poll_interval * 10):
sleep(0.1)
if drain_before_sleep != EdgeWorker.drain:
return
| EdgeWorker |
python | ansible__ansible | test/units/parsing/test_ajson.py | {
"start": 2915,
"end": 6946
} | class ____:
"""
Namespace for testing AnsibleJSONEncoder.
"""
@pytest.fixture(scope='class')
def mapping(self, request):
"""
Returns object of Mapping mock class.
The object is used for testing handling of Mapping objects
in AnsibleJSONEncoder.default().
Using a plain dictionary instead is not suitable because
it is handled by default encoder of the superclass (json.JSONEncoder).
"""
class M(Mapping):
"""Mock mapping class."""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __getitem__(self, key):
return self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
mapping = M(request.param)
assert isinstance(len(mapping), int) # ensure coverage of __len__
return mapping
@pytest.fixture
def ansible_json_encoder(self):
"""Return AnsibleJSONEncoder object."""
return LegacyControllerJSONEncoder()
###############
# Test methods:
@pytest.mark.parametrize(
'test_input,expected',
[
(datetime(2019, 5, 14, 13, 39, 38, 569047), '2019-05-14T13:39:38.569047'),
(datetime(2019, 5, 14, 13, 47, 16, 923866), '2019-05-14T13:47:16.923866'),
(date(2019, 5, 14), '2019-05-14'),
(date(2020, 5, 14), '2020-05-14'),
(datetime(2019, 6, 15, 14, 45, tzinfo=timezone.utc), '2019-06-15T14:45:00+00:00'),
(datetime(2019, 6, 15, 14, 45, tzinfo=timezone(timedelta(hours=1, minutes=40))), '2019-06-15T14:45:00+01:40'),
]
)
def test_date_datetime(self, ansible_json_encoder, test_input, expected):
"""
Test for passing datetime.date or datetime.datetime objects to AnsibleJSONEncoder.default().
"""
assert ansible_json_encoder.default(test_input) == expected
@pytest.mark.parametrize(
'mapping,expected',
[
({'1': 1}, {'1': 1}),
({'2': 2}, {'2': 2}),
({'1': 2}, {'1': 2}),
({'2': 1}, {'2': 1}),
], indirect=['mapping'],
)
def test_mapping(self, ansible_json_encoder, mapping, expected):
"""
Test for passing Mapping object to AnsibleJSONEncoder.default().
"""
assert ansible_json_encoder.default(mapping) == expected
@pytest.mark.parametrize('test_input,expected', vault_data())
def test_ansible_json_encoder_vault(self, test_input, expected):
"""
Test for passing vaulted values to AnsibleJSONEncoder.default().
"""
profile = _legacy
assert json.dumps(test_input, cls=get_encoder(profile)) == '{"__ansible_vault": "%s"}' % expected.replace('\n', '\\n')
@pytest.mark.parametrize(
'test_input,expected',
[
({'1': 'first'}, {'1': 'first'}),
({'2': 'second'}, {'2': 'second'}),
]
)
def test_default_encoder(self, ansible_json_encoder, test_input, expected):
"""
Test for the default encoder of AnsibleJSONEncoder.default().
If objects of different classes that are not tested above were passed,
AnsibleJSONEncoder.default() invokes 'default()' method of json.JSONEncoder superclass.
"""
assert ansible_json_encoder.default(test_input) == expected
@pytest.mark.parametrize("trust_input_str", (
True,
False
))
def test_string_trust_propagation(trust_input_str: bool) -> None:
"""Verify that input trust propagation behaves as expected. The presence of trust on the input string determines if trust is applied to outputs."""
data = '{"foo": "bar"}'
if trust_input_str:
data = TrustedAsTemplate().tag(data)
res = json.loads(data, cls=_legacy.Decoder)
assert trust_input_str == TrustedAsTemplate.is_tagged_on(res['foo'])
| TestAnsibleJSONEncoder |
python | facelessuser__pymdown-extensions | pymdownx/_bypassnorm.py | {
"start": 1018,
"end": 1368
} | class ____(Preprocessor):
"""Preprocessor to clean up normalization bypass hack."""
def run(self, lines):
"""Convert alternate placeholder symbols to actual placeholder symbols."""
source = '\n'.join(lines)
source = source.replace(SOH, STX).replace(EOT, ETX)
return source.split('\n')
| PostNormalizePreprocessor |
python | ZoranPandovski__al-go-rithms | data_structures/trie/Python/trie_word_search.py | {
"start": 0,
"end": 79
} | class ____:
def __init__(self):
self.ch=[None]*26
self.endofword=None
| Trienode |
python | scipy__scipy | benchmarks/benchmarks/fft_basic.py | {
"start": 651,
"end": 1682
} | class ____:
"""Backend for pyfftw"""
__ua_domain__ = 'numpy.scipy.fft'
@staticmethod
def __ua_function__(method, args, kwargs):
kwargs.pop('overwrite_x', None)
fn = getattr(pyfftw_fft, method.__name__, None)
return (NotImplemented if fn is None
else fn(*args, **kwargs))
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def get_module(mod_name):
module_map = {
'scipy.fftpack': scipy.fftpack,
'scipy.fft': scipy_fft,
'numpy.fft': numpy.fft
}
if not has_scipy_fft and mod_name == 'scipy.fft':
raise NotImplementedError
return module_map[mod_name]
| PyfftwBackend |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/retriever.py | {
"start": 622,
"end": 7088
} | class ____(BaseRetriever):
def __init__(
self, table: Union[AsyncTable, Table], multimodal: bool, **kwargs: Any
):
self.table = table
self.multimodal = multimodal
callback_manager = kwargs.get("callback_manager")
verbose = kwargs.get("verbose", False)
super().__init__(callback_manager, verbose)
def _retrieve(self, query_bundle: ExtendedQueryBundle) -> List[NodeWithScore]:
if not self.multimodal:
return query_text(table=self.table, query=query_bundle.query_str)
else:
if not query_bundle.image and not query_bundle.image_path:
raise ValueError(
"No image or image_path has been provided, but retrieval is set to multi-modal."
)
elif query_bundle.image:
return query_multimodal(table=self.table, query=query_bundle.image)
elif query_bundle.image_path:
img = ImageBlock(path=query_bundle.image_path)
return query_multimodal(table=self.table, query=img)
else:
return []
async def _aretrieve(
self, query_bundle: ExtendedQueryBundle
) -> List[NodeWithScore]:
if not self.multimodal:
return await aquery_text(table=self.table, query=query_bundle.query_str)
else:
if not query_bundle.image and not query_bundle.image_path:
raise ValueError(
"No image or image_path has been provided, but retrieval is set to multi-modal."
)
elif query_bundle.image:
return await aquery_multimodal(
table=self.table, query=query_bundle.image
)
elif query_bundle.image_path:
img = ImageBlock(path=query_bundle.image_path)
return await aquery_multimodal(table=self.table, query=img)
else:
return []
@override
def retrieve(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> List[NodeWithScore]:
"""
Retrieves nodes relevant to the given query.
Args:
query_str (Optional[str]): The text query string. Required if the retriever is not multimodal.
query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): The image query, which can be a PIL Image, ImageBlock, ImageDocument, or a string path/URL. Used if the retriever is multimodal.
query_image_path (Optional[os.PathLike[str]]): The file path to the image query. Used if the retriever is multimodal.
Returns:
List[NodeWithScore]: A list of nodes with associated relevance scores.
Raises:
ValueError: If none of the query parameters are provided.
ValueError: If a text query is not provided for a non-multimodal retriever.
ValueError: If neither an image nor image path is provided for a multimodal retriever.
"""
if not query_str and not query_image and not query_image_path:
raise ValueError(
"At least one among query_str, query_image and query_image_path needs to be set"
)
if not self.multimodal:
if query_str:
query_bundle = ExtendedQueryBundle(query_str=query_str)
else:
raise ValueError(
"No query_str provided, but the retriever is not multimodal"
)
else:
if query_image:
query_bundle = ExtendedQueryBundle(query_str="", image=query_image)
elif query_image_path:
query_bundle = ExtendedQueryBundle(
query_str="", image_path=query_image_path
)
else:
raise ValueError(
"No query_image or query_image_path provided, but the retriever is multimodal"
)
return self._retrieve(query_bundle=query_bundle)
@override
async def aretrieve(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> List[NodeWithScore]:
"""
Asynchronously retrieves nodes relevant to the given query.
Args:
query_str (Optional[str]): The text query string. Required if the retriever is not multimodal.
query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): The image query, which can be a PIL Image, ImageBlock, ImageDocument, or a string path/URL. Used if the retriever is multimodal.
query_image_path (Optional[os.PathLike[str]]): The file path to the image query. Used if the retriever is multimodal.
Returns:
List[NodeWithScore]: A list of nodes with associated relevance scores.
Raises:
ValueError: If none of the query parameters are provided.
ValueError: If a text query is not provided for a non-multimodal retriever.
ValueError: If neither an image nor image path is provided for a multimodal retriever.
"""
if not query_str and not query_image and not query_image_path:
raise ValueError(
"At least one among query_str, query_image and query_image_path needs to be set"
)
if not self.multimodal:
if query_str:
query_bundle = ExtendedQueryBundle(query_str=query_str)
else:
raise ValueError(
"No query_str provided, but the retriever is not multimodal"
)
else:
if query_image:
query_bundle = ExtendedQueryBundle(query_str="", image=query_image)
elif query_image_path:
query_bundle = ExtendedQueryBundle(
query_str="", image_path=query_image_path
)
else:
raise ValueError(
"No query_image or query_image_path provided, but the retriever is multimodal"
)
return await self._aretrieve(query_bundle=query_bundle)
| LanceDBRetriever |
python | tensorflow__tensorflow | tensorflow/python/data/ops/multi_device_iterator_ops.py | {
"start": 1690,
"end": 7199
} | class ____(dataset_ops.DatasetV2):
"""A `dummy` generator dataset."""
def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,
source_device, element_spec, iterator_is_anonymous):
self._element_spec = element_spec
self._name = f"device_generator_{shard_num}"
multi_device_iterator_string_handle = (
gen_dataset_ops.multi_device_iterator_to_string_handle(
multi_device_iterator_resource))
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@def_function.function(autograph=False) # Pure graph code.
def _init_func():
return multi_device_iterator_string_handle
init_func_concrete = _init_func.get_concrete_function()
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@def_function.function(autograph=False) # Pure graph code.
def _remote_init_func():
return functional_ops.remote_call(
target=source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func.get_concrete_function()
self._init_captured_args = self._init_func.captured_inputs
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _next_func(string_handle):
# pylint: disable=protected-access
multi_device_iterator = (
gen_dataset_ops.multi_device_iterator_from_string_handle(
string_handle=string_handle,
output_types=structure.get_flat_tensor_types(self._element_spec),
output_shapes=structure.get_flat_tensor_shapes(
self._element_spec)))
return gen_dataset_ops.multi_device_iterator_get_next_from_shard(
multi_device_iterator=multi_device_iterator,
shard_num=shard_num,
incarnation_id=incarnation_id,
output_types=structure.get_flat_tensor_types(self._element_spec),
output_shapes=structure.get_flat_tensor_shapes(self._element_spec))
next_func_concrete = _next_func.get_concrete_function()
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
experimental_attributes={"experimental_ints_on_device": True},
autograph=False) # Pure graph code.
def _remote_next_func(string_handle):
return_values = functional_ops.remote_call(
target=source_device,
args=[string_handle] + next_func_concrete.captured_inputs,
Tout=structure.get_flat_tensor_types(self._element_spec),
f=next_func_concrete)
# Add full type information to the graph so that the RemoteCall op
# can determine for each of its outputs whether or not they are ragged
# tensors (or other types that use variants) that contain strings
# (or other host memory types). Then RemoteCall can
# appropriately set AllocatorAttributes to control copies so
# strings/host memory types stay on CPU.
fulltype_list = type_utils.fulltypes_for_flat_tensors(self._element_spec)
fulltype = type_utils.fulltype_list_to_product(fulltype_list)
for return_value in return_values:
return_value.op.experimental_set_type(fulltype)
return return_values
self._next_func = _remote_next_func.get_concrete_function()
self._next_captured_args = self._next_func.captured_inputs
if iterator_is_anonymous:
self._next_captured_args = self._next_captured_args + [
multi_device_iterator_resource
]
self._incarnation_id_index = -1
for i, arg in enumerate(self._next_captured_args):
if arg is incarnation_id:
self._incarnation_id_index = i
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _finalize_func(unused_string_handle):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func.get_concrete_function()
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func.get_concrete_function()
self._finalize_captured_args = self._finalize_func.captured_inputs
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**self._flat_structure)
super(_PerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def element_spec(self):
return self._element_spec
| _PerDeviceGenerator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.