repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
zarr-developers/zarr | zarr/hierarchy.py | Group.move | def move(self, source, dest):
"""Move contents from one path to another relative to the Group.
Parameters
----------
source : string
Name or path to a Zarr object to move.
dest : string
New name or path of the Zarr object.
"""
source = self._item_path(source)
dest = self._item_path(dest)
# Check that source exists.
if not (contains_array(self._store, source) or
contains_group(self._store, source)):
raise ValueError('The source, "%s", does not exist.' % source)
if contains_array(self._store, dest) or contains_group(self._store, dest):
raise ValueError('The dest, "%s", already exists.' % dest)
# Ensure groups needed for `dest` exist.
if "/" in dest:
self.require_group("/" + dest.rsplit("/", 1)[0])
self._write_op(self._move_nosync, source, dest) | python | def move(self, source, dest):
"""Move contents from one path to another relative to the Group.
Parameters
----------
source : string
Name or path to a Zarr object to move.
dest : string
New name or path of the Zarr object.
"""
source = self._item_path(source)
dest = self._item_path(dest)
# Check that source exists.
if not (contains_array(self._store, source) or
contains_group(self._store, source)):
raise ValueError('The source, "%s", does not exist.' % source)
if contains_array(self._store, dest) or contains_group(self._store, dest):
raise ValueError('The dest, "%s", already exists.' % dest)
# Ensure groups needed for `dest` exist.
if "/" in dest:
self.require_group("/" + dest.rsplit("/", 1)[0])
self._write_op(self._move_nosync, source, dest) | [
"def",
"move",
"(",
"self",
",",
"source",
",",
"dest",
")",
":",
"source",
"=",
"self",
".",
"_item_path",
"(",
"source",
")",
"dest",
"=",
"self",
".",
"_item_path",
"(",
"dest",
")",
"# Check that source exists.",
"if",
"not",
"(",
"contains_array",
"... | Move contents from one path to another relative to the Group.
Parameters
----------
source : string
Name or path to a Zarr object to move.
dest : string
New name or path of the Zarr object. | [
"Move",
"contents",
"from",
"one",
"path",
"to",
"another",
"relative",
"to",
"the",
"Group",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/hierarchy.py#L970-L995 | train | 215,800 |
zarr-developers/zarr | zarr/attrs.py | Attributes.asdict | def asdict(self):
"""Retrieve all attributes as a dictionary."""
if self.cache and self._cached_asdict is not None:
return self._cached_asdict
d = self._get_nosync()
if self.cache:
self._cached_asdict = d
return d | python | def asdict(self):
"""Retrieve all attributes as a dictionary."""
if self.cache and self._cached_asdict is not None:
return self._cached_asdict
d = self._get_nosync()
if self.cache:
self._cached_asdict = d
return d | [
"def",
"asdict",
"(",
"self",
")",
":",
"if",
"self",
".",
"cache",
"and",
"self",
".",
"_cached_asdict",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_cached_asdict",
"d",
"=",
"self",
".",
"_get_nosync",
"(",
")",
"if",
"self",
".",
"cache",
"... | Retrieve all attributes as a dictionary. | [
"Retrieve",
"all",
"attributes",
"as",
"a",
"dictionary",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/attrs.py#L49-L56 | train | 215,801 |
zarr-developers/zarr | zarr/attrs.py | Attributes.update | def update(self, *args, **kwargs):
"""Update the values of several attributes in a single operation."""
self._write_op(self._update_nosync, *args, **kwargs) | python | def update(self, *args, **kwargs):
"""Update the values of several attributes in a single operation."""
self._write_op(self._update_nosync, *args, **kwargs) | [
"def",
"update",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_write_op",
"(",
"self",
".",
"_update_nosync",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Update the values of several attributes in a single operation. | [
"Update",
"the",
"values",
"of",
"several",
"attributes",
"in",
"a",
"single",
"operation",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/attrs.py#L121-L123 | train | 215,802 |
zarr-developers/zarr | zarr/util.py | json_dumps | def json_dumps(o):
"""Write JSON in a consistent, human-readable way."""
return json.dumps(o, indent=4, sort_keys=True, ensure_ascii=True,
separators=(',', ': ')).encode('ascii') | python | def json_dumps(o):
"""Write JSON in a consistent, human-readable way."""
return json.dumps(o, indent=4, sort_keys=True, ensure_ascii=True,
separators=(',', ': ')).encode('ascii') | [
"def",
"json_dumps",
"(",
"o",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"o",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
",",
"ensure_ascii",
"=",
"True",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
".",
"encode",
... | Write JSON in a consistent, human-readable way. | [
"Write",
"JSON",
"in",
"a",
"consistent",
"human",
"-",
"readable",
"way",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/util.py#L36-L39 | train | 215,803 |
zarr-developers/zarr | zarr/util.py | normalize_shape | def normalize_shape(shape):
"""Convenience function to normalize the `shape` argument."""
if shape is None:
raise TypeError('shape is None')
# handle 1D convenience form
if isinstance(shape, numbers.Integral):
shape = (int(shape),)
# normalize
shape = tuple(int(s) for s in shape)
return shape | python | def normalize_shape(shape):
"""Convenience function to normalize the `shape` argument."""
if shape is None:
raise TypeError('shape is None')
# handle 1D convenience form
if isinstance(shape, numbers.Integral):
shape = (int(shape),)
# normalize
shape = tuple(int(s) for s in shape)
return shape | [
"def",
"normalize_shape",
"(",
"shape",
")",
":",
"if",
"shape",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'shape is None'",
")",
"# handle 1D convenience form",
"if",
"isinstance",
"(",
"shape",
",",
"numbers",
".",
"Integral",
")",
":",
"shape",
"=",
... | Convenience function to normalize the `shape` argument. | [
"Convenience",
"function",
"to",
"normalize",
"the",
"shape",
"argument",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/util.py#L47-L59 | train | 215,804 |
zarr-developers/zarr | zarr/util.py | guess_chunks | def guess_chunks(shape, typesize):
"""
Guess an appropriate chunk layout for a dataset, given its shape and
the size of each element in bytes. Will allocate chunks only as large
as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of
each axis, slightly favoring bigger values for the last index.
Undocumented and subject to change without warning.
"""
ndims = len(shape)
# require chunks to have non-zero length for all dimensions
chunks = np.maximum(np.array(shape, dtype='=f8'), 1)
# Determine the optimal chunk size in bytes using a PyTables expression.
# This is kept as a float.
dset_size = np.product(chunks)*typesize
target_size = CHUNK_BASE * (2**np.log10(dset_size/(1024.*1024)))
if target_size > CHUNK_MAX:
target_size = CHUNK_MAX
elif target_size < CHUNK_MIN:
target_size = CHUNK_MIN
idx = 0
while True:
# Repeatedly loop over the axes, dividing them by 2. Stop when:
# 1a. We're smaller than the target chunk size, OR
# 1b. We're within 50% of the target chunk size, AND
# 2. The chunk is smaller than the maximum chunk size
chunk_bytes = np.product(chunks)*typesize
if (chunk_bytes < target_size or
abs(chunk_bytes-target_size)/target_size < 0.5) and \
chunk_bytes < CHUNK_MAX:
break
if np.product(chunks) == 1:
break # Element size larger than CHUNK_MAX
chunks[idx % ndims] = np.ceil(chunks[idx % ndims] / 2.0)
idx += 1
return tuple(int(x) for x in chunks) | python | def guess_chunks(shape, typesize):
"""
Guess an appropriate chunk layout for a dataset, given its shape and
the size of each element in bytes. Will allocate chunks only as large
as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of
each axis, slightly favoring bigger values for the last index.
Undocumented and subject to change without warning.
"""
ndims = len(shape)
# require chunks to have non-zero length for all dimensions
chunks = np.maximum(np.array(shape, dtype='=f8'), 1)
# Determine the optimal chunk size in bytes using a PyTables expression.
# This is kept as a float.
dset_size = np.product(chunks)*typesize
target_size = CHUNK_BASE * (2**np.log10(dset_size/(1024.*1024)))
if target_size > CHUNK_MAX:
target_size = CHUNK_MAX
elif target_size < CHUNK_MIN:
target_size = CHUNK_MIN
idx = 0
while True:
# Repeatedly loop over the axes, dividing them by 2. Stop when:
# 1a. We're smaller than the target chunk size, OR
# 1b. We're within 50% of the target chunk size, AND
# 2. The chunk is smaller than the maximum chunk size
chunk_bytes = np.product(chunks)*typesize
if (chunk_bytes < target_size or
abs(chunk_bytes-target_size)/target_size < 0.5) and \
chunk_bytes < CHUNK_MAX:
break
if np.product(chunks) == 1:
break # Element size larger than CHUNK_MAX
chunks[idx % ndims] = np.ceil(chunks[idx % ndims] / 2.0)
idx += 1
return tuple(int(x) for x in chunks) | [
"def",
"guess_chunks",
"(",
"shape",
",",
"typesize",
")",
":",
"ndims",
"=",
"len",
"(",
"shape",
")",
"# require chunks to have non-zero length for all dimensions",
"chunks",
"=",
"np",
".",
"maximum",
"(",
"np",
".",
"array",
"(",
"shape",
",",
"dtype",
"="... | Guess an appropriate chunk layout for a dataset, given its shape and
the size of each element in bytes. Will allocate chunks only as large
as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of
each axis, slightly favoring bigger values for the last index.
Undocumented and subject to change without warning. | [
"Guess",
"an",
"appropriate",
"chunk",
"layout",
"for",
"a",
"dataset",
"given",
"its",
"shape",
"and",
"the",
"size",
"of",
"each",
"element",
"in",
"bytes",
".",
"Will",
"allocate",
"chunks",
"only",
"as",
"large",
"as",
"MAX_SIZE",
".",
"Chunks",
"are",... | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/util.py#L69-L112 | train | 215,805 |
zarr-developers/zarr | zarr/util.py | normalize_chunks | def normalize_chunks(chunks, shape, typesize):
"""Convenience function to normalize the `chunks` argument for an array
with the given `shape`."""
# N.B., expect shape already normalized
# handle auto-chunking
if chunks is None or chunks is True:
return guess_chunks(shape, typesize)
# handle no chunking
if chunks is False:
return shape
# handle 1D convenience form
if isinstance(chunks, numbers.Integral):
chunks = (int(chunks),)
# handle bad dimensionality
if len(chunks) > len(shape):
raise ValueError('too many dimensions in chunks')
# handle underspecified chunks
if len(chunks) < len(shape):
# assume chunks across remaining dimensions
chunks += shape[len(chunks):]
# handle None in chunks
chunks = tuple(s if c is None else int(c)
for s, c in zip(shape, chunks))
return chunks | python | def normalize_chunks(chunks, shape, typesize):
"""Convenience function to normalize the `chunks` argument for an array
with the given `shape`."""
# N.B., expect shape already normalized
# handle auto-chunking
if chunks is None or chunks is True:
return guess_chunks(shape, typesize)
# handle no chunking
if chunks is False:
return shape
# handle 1D convenience form
if isinstance(chunks, numbers.Integral):
chunks = (int(chunks),)
# handle bad dimensionality
if len(chunks) > len(shape):
raise ValueError('too many dimensions in chunks')
# handle underspecified chunks
if len(chunks) < len(shape):
# assume chunks across remaining dimensions
chunks += shape[len(chunks):]
# handle None in chunks
chunks = tuple(s if c is None else int(c)
for s, c in zip(shape, chunks))
return chunks | [
"def",
"normalize_chunks",
"(",
"chunks",
",",
"shape",
",",
"typesize",
")",
":",
"# N.B., expect shape already normalized",
"# handle auto-chunking",
"if",
"chunks",
"is",
"None",
"or",
"chunks",
"is",
"True",
":",
"return",
"guess_chunks",
"(",
"shape",
",",
"t... | Convenience function to normalize the `chunks` argument for an array
with the given `shape`. | [
"Convenience",
"function",
"to",
"normalize",
"the",
"chunks",
"argument",
"for",
"an",
"array",
"with",
"the",
"given",
"shape",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/util.py#L115-L146 | train | 215,806 |
zarr-developers/zarr | zarr/storage.py | contains_array | def contains_array(store, path=None):
"""Return True if the store contains an array at the given logical path."""
path = normalize_storage_path(path)
prefix = _path_to_prefix(path)
key = prefix + array_meta_key
return key in store | python | def contains_array(store, path=None):
"""Return True if the store contains an array at the given logical path."""
path = normalize_storage_path(path)
prefix = _path_to_prefix(path)
key = prefix + array_meta_key
return key in store | [
"def",
"contains_array",
"(",
"store",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"normalize_storage_path",
"(",
"path",
")",
"prefix",
"=",
"_path_to_prefix",
"(",
"path",
")",
"key",
"=",
"prefix",
"+",
"array_meta_key",
"return",
"key",
"in",
"sto... | Return True if the store contains an array at the given logical path. | [
"Return",
"True",
"if",
"the",
"store",
"contains",
"an",
"array",
"at",
"the",
"given",
"logical",
"path",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L77-L82 | train | 215,807 |
zarr-developers/zarr | zarr/storage.py | contains_group | def contains_group(store, path=None):
"""Return True if the store contains a group at the given logical path."""
path = normalize_storage_path(path)
prefix = _path_to_prefix(path)
key = prefix + group_meta_key
return key in store | python | def contains_group(store, path=None):
"""Return True if the store contains a group at the given logical path."""
path = normalize_storage_path(path)
prefix = _path_to_prefix(path)
key = prefix + group_meta_key
return key in store | [
"def",
"contains_group",
"(",
"store",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"normalize_storage_path",
"(",
"path",
")",
"prefix",
"=",
"_path_to_prefix",
"(",
"path",
")",
"key",
"=",
"prefix",
"+",
"group_meta_key",
"return",
"key",
"in",
"sto... | Return True if the store contains a group at the given logical path. | [
"Return",
"True",
"if",
"the",
"store",
"contains",
"a",
"group",
"at",
"the",
"given",
"logical",
"path",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L85-L90 | train | 215,808 |
zarr-developers/zarr | zarr/storage.py | rmdir | def rmdir(store, path=None):
"""Remove all items under the given path. If `store` provides a `rmdir` method,
this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
path = normalize_storage_path(path)
if hasattr(store, 'rmdir'):
# pass through
store.rmdir(path)
else:
# slow version, delete one key at a time
_rmdir_from_keys(store, path) | python | def rmdir(store, path=None):
"""Remove all items under the given path. If `store` provides a `rmdir` method,
this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
path = normalize_storage_path(path)
if hasattr(store, 'rmdir'):
# pass through
store.rmdir(path)
else:
# slow version, delete one key at a time
_rmdir_from_keys(store, path) | [
"def",
"rmdir",
"(",
"store",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"normalize_storage_path",
"(",
"path",
")",
"if",
"hasattr",
"(",
"store",
",",
"'rmdir'",
")",
":",
"# pass through",
"store",
".",
"rmdir",
"(",
"path",
")",
"else",
":",
... | Remove all items under the given path. If `store` provides a `rmdir` method,
this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface. | [
"Remove",
"all",
"items",
"under",
"the",
"given",
"path",
".",
"If",
"store",
"provides",
"a",
"rmdir",
"method",
"this",
"will",
"be",
"called",
"otherwise",
"will",
"fall",
"back",
"to",
"implementation",
"via",
"the",
"MutableMapping",
"interface",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L101-L111 | train | 215,809 |
zarr-developers/zarr | zarr/storage.py | rename | def rename(store, src_path, dst_path):
"""Rename all items under the given path. If `store` provides a `rename` method,
this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
src_path = normalize_storage_path(src_path)
dst_path = normalize_storage_path(dst_path)
if hasattr(store, 'rename'):
# pass through
store.rename(src_path, dst_path)
else:
# slow version, delete one key at a time
_rename_from_keys(store, src_path, dst_path) | python | def rename(store, src_path, dst_path):
"""Rename all items under the given path. If `store` provides a `rename` method,
this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
src_path = normalize_storage_path(src_path)
dst_path = normalize_storage_path(dst_path)
if hasattr(store, 'rename'):
# pass through
store.rename(src_path, dst_path)
else:
# slow version, delete one key at a time
_rename_from_keys(store, src_path, dst_path) | [
"def",
"rename",
"(",
"store",
",",
"src_path",
",",
"dst_path",
")",
":",
"src_path",
"=",
"normalize_storage_path",
"(",
"src_path",
")",
"dst_path",
"=",
"normalize_storage_path",
"(",
"dst_path",
")",
"if",
"hasattr",
"(",
"store",
",",
"'rename'",
")",
... | Rename all items under the given path. If `store` provides a `rename` method,
this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface. | [
"Rename",
"all",
"items",
"under",
"the",
"given",
"path",
".",
"If",
"store",
"provides",
"a",
"rename",
"method",
"this",
"will",
"be",
"called",
"otherwise",
"will",
"fall",
"back",
"to",
"implementation",
"via",
"the",
"MutableMapping",
"interface",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L124-L135 | train | 215,810 |
zarr-developers/zarr | zarr/storage.py | listdir | def listdir(store, path=None):
"""Obtain a directory listing for the given path. If `store` provides a `listdir`
method, this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
path = normalize_storage_path(path)
if hasattr(store, 'listdir'):
# pass through
return store.listdir(path)
else:
# slow version, iterate through all keys
return _listdir_from_keys(store, path) | python | def listdir(store, path=None):
"""Obtain a directory listing for the given path. If `store` provides a `listdir`
method, this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
path = normalize_storage_path(path)
if hasattr(store, 'listdir'):
# pass through
return store.listdir(path)
else:
# slow version, iterate through all keys
return _listdir_from_keys(store, path) | [
"def",
"listdir",
"(",
"store",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"normalize_storage_path",
"(",
"path",
")",
"if",
"hasattr",
"(",
"store",
",",
"'listdir'",
")",
":",
"# pass through",
"return",
"store",
".",
"listdir",
"(",
"path",
")",... | Obtain a directory listing for the given path. If `store` provides a `listdir`
method, this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface. | [
"Obtain",
"a",
"directory",
"listing",
"for",
"the",
"given",
"path",
".",
"If",
"store",
"provides",
"a",
"listdir",
"method",
"this",
"will",
"be",
"called",
"otherwise",
"will",
"fall",
"back",
"to",
"implementation",
"via",
"the",
"MutableMapping",
"interf... | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L150-L160 | train | 215,811 |
zarr-developers/zarr | zarr/storage.py | getsize | def getsize(store, path=None):
"""Compute size of stored items for a given path. If `store` provides a `getsize`
method, this will be called, otherwise will return -1."""
path = normalize_storage_path(path)
if hasattr(store, 'getsize'):
# pass through
return store.getsize(path)
elif isinstance(store, dict):
# compute from size of values
if path in store:
v = store[path]
size = buffer_size(v)
else:
members = listdir(store, path)
prefix = _path_to_prefix(path)
size = 0
for k in members:
try:
v = store[prefix + k]
except KeyError:
pass
else:
try:
size += buffer_size(v)
except TypeError:
return -1
return size
else:
return -1 | python | def getsize(store, path=None):
"""Compute size of stored items for a given path. If `store` provides a `getsize`
method, this will be called, otherwise will return -1."""
path = normalize_storage_path(path)
if hasattr(store, 'getsize'):
# pass through
return store.getsize(path)
elif isinstance(store, dict):
# compute from size of values
if path in store:
v = store[path]
size = buffer_size(v)
else:
members = listdir(store, path)
prefix = _path_to_prefix(path)
size = 0
for k in members:
try:
v = store[prefix + k]
except KeyError:
pass
else:
try:
size += buffer_size(v)
except TypeError:
return -1
return size
else:
return -1 | [
"def",
"getsize",
"(",
"store",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"normalize_storage_path",
"(",
"path",
")",
"if",
"hasattr",
"(",
"store",
",",
"'getsize'",
")",
":",
"# pass through",
"return",
"store",
".",
"getsize",
"(",
"path",
")",... | Compute size of stored items for a given path. If `store` provides a `getsize`
method, this will be called, otherwise will return -1. | [
"Compute",
"size",
"of",
"stored",
"items",
"for",
"a",
"given",
"path",
".",
"If",
"store",
"provides",
"a",
"getsize",
"method",
"this",
"will",
"be",
"called",
"otherwise",
"will",
"return",
"-",
"1",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L163-L191 | train | 215,812 |
zarr-developers/zarr | zarr/storage.py | init_array | def init_array(store, shape, chunks=True, dtype=None, compressor='default',
fill_value=None, order='C', overwrite=False, path=None,
chunk_store=None, filters=None, object_codec=None):
"""Initialize an array store with the given configuration. Note that this is a low-level
function and there should be no need to call this directly from user code.
Parameters
----------
store : MutableMapping
A mapping that supports string keys and bytes-like values.
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
overwrite : bool, optional
If True, erase all data in `store` prior to initialisation.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
Examples
--------
Initialize an array store::
>>> from zarr.storage import init_array
>>> store = dict()
>>> init_array(store, shape=(10000, 10000), chunks=(1000, 1000))
>>> sorted(store.keys())
['.zarray']
Array metadata is stored as JSON::
>>> print(store['.zarray'].decode())
{
"chunks": [
1000,
1000
],
"compressor": {
"blocksize": 0,
"clevel": 5,
"cname": "lz4",
"id": "blosc",
"shuffle": 1
},
"dtype": "<f8",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [
10000,
10000
],
"zarr_format": 2
}
Initialize an array using a storage path::
>>> store = dict()
>>> init_array(store, shape=100000000, chunks=1000000, dtype='i1', path='foo')
>>> sorted(store.keys())
['.zgroup', 'foo/.zarray']
>>> print(store['foo/.zarray'].decode())
{
"chunks": [
1000000
],
"compressor": {
"blocksize": 0,
"clevel": 5,
"cname": "lz4",
"id": "blosc",
"shuffle": 1
},
"dtype": "|i1",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [
100000000
],
"zarr_format": 2
}
Notes
-----
The initialisation process involves normalising all array metadata, encoding
as JSON and storing under the '.zarray' key.
"""
# normalize path
path = normalize_storage_path(path)
# ensure parent group initialized
_require_parent_group(path, store=store, chunk_store=chunk_store, overwrite=overwrite)
_init_array_metadata(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, overwrite=overwrite, path=path,
chunk_store=chunk_store, filters=filters,
object_codec=object_codec) | python | def init_array(store, shape, chunks=True, dtype=None, compressor='default',
fill_value=None, order='C', overwrite=False, path=None,
chunk_store=None, filters=None, object_codec=None):
"""Initialize an array store with the given configuration. Note that this is a low-level
function and there should be no need to call this directly from user code.
Parameters
----------
store : MutableMapping
A mapping that supports string keys and bytes-like values.
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
overwrite : bool, optional
If True, erase all data in `store` prior to initialisation.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
Examples
--------
Initialize an array store::
>>> from zarr.storage import init_array
>>> store = dict()
>>> init_array(store, shape=(10000, 10000), chunks=(1000, 1000))
>>> sorted(store.keys())
['.zarray']
Array metadata is stored as JSON::
>>> print(store['.zarray'].decode())
{
"chunks": [
1000,
1000
],
"compressor": {
"blocksize": 0,
"clevel": 5,
"cname": "lz4",
"id": "blosc",
"shuffle": 1
},
"dtype": "<f8",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [
10000,
10000
],
"zarr_format": 2
}
Initialize an array using a storage path::
>>> store = dict()
>>> init_array(store, shape=100000000, chunks=1000000, dtype='i1', path='foo')
>>> sorted(store.keys())
['.zgroup', 'foo/.zarray']
>>> print(store['foo/.zarray'].decode())
{
"chunks": [
1000000
],
"compressor": {
"blocksize": 0,
"clevel": 5,
"cname": "lz4",
"id": "blosc",
"shuffle": 1
},
"dtype": "|i1",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [
100000000
],
"zarr_format": 2
}
Notes
-----
The initialisation process involves normalising all array metadata, encoding
as JSON and storing under the '.zarray' key.
"""
# normalize path
path = normalize_storage_path(path)
# ensure parent group initialized
_require_parent_group(path, store=store, chunk_store=chunk_store, overwrite=overwrite)
_init_array_metadata(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, overwrite=overwrite, path=path,
chunk_store=chunk_store, filters=filters,
object_codec=object_codec) | [
"def",
"init_array",
"(",
"store",
",",
"shape",
",",
"chunks",
"=",
"True",
",",
"dtype",
"=",
"None",
",",
"compressor",
"=",
"'default'",
",",
"fill_value",
"=",
"None",
",",
"order",
"=",
"'C'",
",",
"overwrite",
"=",
"False",
",",
"path",
"=",
"... | Initialize an array store with the given configuration. Note that this is a low-level
function and there should be no need to call this directly from user code.
Parameters
----------
store : MutableMapping
A mapping that supports string keys and bytes-like values.
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
overwrite : bool, optional
If True, erase all data in `store` prior to initialisation.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
Examples
--------
Initialize an array store::
>>> from zarr.storage import init_array
>>> store = dict()
>>> init_array(store, shape=(10000, 10000), chunks=(1000, 1000))
>>> sorted(store.keys())
['.zarray']
Array metadata is stored as JSON::
>>> print(store['.zarray'].decode())
{
"chunks": [
1000,
1000
],
"compressor": {
"blocksize": 0,
"clevel": 5,
"cname": "lz4",
"id": "blosc",
"shuffle": 1
},
"dtype": "<f8",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [
10000,
10000
],
"zarr_format": 2
}
Initialize an array using a storage path::
>>> store = dict()
>>> init_array(store, shape=100000000, chunks=1000000, dtype='i1', path='foo')
>>> sorted(store.keys())
['.zgroup', 'foo/.zarray']
>>> print(store['foo/.zarray'].decode())
{
"chunks": [
1000000
],
"compressor": {
"blocksize": 0,
"clevel": 5,
"cname": "lz4",
"id": "blosc",
"shuffle": 1
},
"dtype": "|i1",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [
100000000
],
"zarr_format": 2
}
Notes
-----
The initialisation process involves normalising all array metadata, encoding
as JSON and storing under the '.zarray' key. | [
"Initialize",
"an",
"array",
"store",
"with",
"the",
"given",
"configuration",
".",
"Note",
"that",
"this",
"is",
"a",
"low",
"-",
"level",
"function",
"and",
"there",
"should",
"be",
"no",
"need",
"to",
"call",
"this",
"directly",
"from",
"user",
"code",
... | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L207-L323 | train | 215,813 |
zarr-developers/zarr | zarr/storage.py | init_group | def init_group(store, overwrite=False, path=None, chunk_store=None):
"""Initialize a group store. Note that this is a low-level function and there should be no
need to call this directly from user code.
Parameters
----------
store : MutableMapping
A mapping that supports string keys and byte sequence values.
overwrite : bool, optional
If True, erase all data in `store` prior to initialisation.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
"""
# normalize path
path = normalize_storage_path(path)
# ensure parent group initialized
_require_parent_group(path, store=store, chunk_store=chunk_store,
overwrite=overwrite)
# initialise metadata
_init_group_metadata(store=store, overwrite=overwrite, path=path,
chunk_store=chunk_store) | python | def init_group(store, overwrite=False, path=None, chunk_store=None):
"""Initialize a group store. Note that this is a low-level function and there should be no
need to call this directly from user code.
Parameters
----------
store : MutableMapping
A mapping that supports string keys and byte sequence values.
overwrite : bool, optional
If True, erase all data in `store` prior to initialisation.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
"""
# normalize path
path = normalize_storage_path(path)
# ensure parent group initialized
_require_parent_group(path, store=store, chunk_store=chunk_store,
overwrite=overwrite)
# initialise metadata
_init_group_metadata(store=store, overwrite=overwrite, path=path,
chunk_store=chunk_store) | [
"def",
"init_group",
"(",
"store",
",",
"overwrite",
"=",
"False",
",",
"path",
"=",
"None",
",",
"chunk_store",
"=",
"None",
")",
":",
"# normalize path",
"path",
"=",
"normalize_storage_path",
"(",
"path",
")",
"# ensure parent group initialized",
"_require_pare... | Initialize a group store. Note that this is a low-level function and there should be no
need to call this directly from user code.
Parameters
----------
store : MutableMapping
A mapping that supports string keys and byte sequence values.
overwrite : bool, optional
If True, erase all data in `store` prior to initialisation.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata. | [
"Initialize",
"a",
"group",
"store",
".",
"Note",
"that",
"this",
"is",
"a",
"low",
"-",
"level",
"function",
"and",
"there",
"should",
"be",
"no",
"need",
"to",
"call",
"this",
"directly",
"from",
"user",
"code",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L405-L432 | train | 215,814 |
zarr-developers/zarr | zarr/storage.py | atexit_rmtree | def atexit_rmtree(path,
isdir=os.path.isdir,
rmtree=shutil.rmtree): # pragma: no cover
"""Ensure directory removal at interpreter exit."""
if isdir(path):
rmtree(path) | python | def atexit_rmtree(path,
isdir=os.path.isdir,
rmtree=shutil.rmtree): # pragma: no cover
"""Ensure directory removal at interpreter exit."""
if isdir(path):
rmtree(path) | [
"def",
"atexit_rmtree",
"(",
"path",
",",
"isdir",
"=",
"os",
".",
"path",
".",
"isdir",
",",
"rmtree",
"=",
"shutil",
".",
"rmtree",
")",
":",
"# pragma: no cover",
"if",
"isdir",
"(",
"path",
")",
":",
"rmtree",
"(",
"path",
")"
] | Ensure directory removal at interpreter exit. | [
"Ensure",
"directory",
"removal",
"at",
"interpreter",
"exit",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L863-L868 | train | 215,815 |
zarr-developers/zarr | zarr/storage.py | atexit_rmglob | def atexit_rmglob(path,
glob=glob.glob,
isdir=os.path.isdir,
isfile=os.path.isfile,
remove=os.remove,
rmtree=shutil.rmtree): # pragma: no cover
"""Ensure removal of multiple files at interpreter exit."""
for p in glob(path):
if isfile(p):
remove(p)
elif isdir(p):
rmtree(p) | python | def atexit_rmglob(path,
glob=glob.glob,
isdir=os.path.isdir,
isfile=os.path.isfile,
remove=os.remove,
rmtree=shutil.rmtree): # pragma: no cover
"""Ensure removal of multiple files at interpreter exit."""
for p in glob(path):
if isfile(p):
remove(p)
elif isdir(p):
rmtree(p) | [
"def",
"atexit_rmglob",
"(",
"path",
",",
"glob",
"=",
"glob",
".",
"glob",
",",
"isdir",
"=",
"os",
".",
"path",
".",
"isdir",
",",
"isfile",
"=",
"os",
".",
"path",
".",
"isfile",
",",
"remove",
"=",
"os",
".",
"remove",
",",
"rmtree",
"=",
"sh... | Ensure removal of multiple files at interpreter exit. | [
"Ensure",
"removal",
"of",
"multiple",
"files",
"at",
"interpreter",
"exit",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L872-L883 | train | 215,816 |
zarr-developers/zarr | zarr/storage.py | migrate_1to2 | def migrate_1to2(store):
"""Migrate array metadata in `store` from Zarr format version 1 to
version 2.
Parameters
----------
store : MutableMapping
Store to be migrated.
Notes
-----
Version 1 did not support hierarchies, so this migration function will
look for a single array in `store` and migrate the array metadata to
version 2.
"""
# migrate metadata
from zarr import meta_v1
meta = meta_v1.decode_metadata(store['meta'])
del store['meta']
# add empty filters
meta['filters'] = None
# migration compression metadata
compression = meta['compression']
if compression is None or compression == 'none':
compressor_config = None
else:
compression_opts = meta['compression_opts']
codec_cls = codec_registry[compression]
if isinstance(compression_opts, dict):
compressor = codec_cls(**compression_opts)
else:
compressor = codec_cls(compression_opts)
compressor_config = compressor.get_config()
meta['compressor'] = compressor_config
del meta['compression']
del meta['compression_opts']
# store migrated metadata
store[array_meta_key] = encode_array_metadata(meta)
# migrate user attributes
store[attrs_key] = store['attrs']
del store['attrs'] | python | def migrate_1to2(store):
"""Migrate array metadata in `store` from Zarr format version 1 to
version 2.
Parameters
----------
store : MutableMapping
Store to be migrated.
Notes
-----
Version 1 did not support hierarchies, so this migration function will
look for a single array in `store` and migrate the array metadata to
version 2.
"""
# migrate metadata
from zarr import meta_v1
meta = meta_v1.decode_metadata(store['meta'])
del store['meta']
# add empty filters
meta['filters'] = None
# migration compression metadata
compression = meta['compression']
if compression is None or compression == 'none':
compressor_config = None
else:
compression_opts = meta['compression_opts']
codec_cls = codec_registry[compression]
if isinstance(compression_opts, dict):
compressor = codec_cls(**compression_opts)
else:
compressor = codec_cls(compression_opts)
compressor_config = compressor.get_config()
meta['compressor'] = compressor_config
del meta['compression']
del meta['compression_opts']
# store migrated metadata
store[array_meta_key] = encode_array_metadata(meta)
# migrate user attributes
store[attrs_key] = store['attrs']
del store['attrs'] | [
"def",
"migrate_1to2",
"(",
"store",
")",
":",
"# migrate metadata",
"from",
"zarr",
"import",
"meta_v1",
"meta",
"=",
"meta_v1",
".",
"decode_metadata",
"(",
"store",
"[",
"'meta'",
"]",
")",
"del",
"store",
"[",
"'meta'",
"]",
"# add empty filters",
"meta",
... | Migrate array metadata in `store` from Zarr format version 1 to
version 2.
Parameters
----------
store : MutableMapping
Store to be migrated.
Notes
-----
Version 1 did not support hierarchies, so this migration function will
look for a single array in `store` and migrate the array metadata to
version 2. | [
"Migrate",
"array",
"metadata",
"in",
"store",
"from",
"Zarr",
"format",
"version",
"1",
"to",
"version",
"2",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L1260-L1306 | train | 215,817 |
zarr-developers/zarr | zarr/storage.py | ZipStore.flush | def flush(self):
"""Closes the underlying zip file, ensuring all records are written,
then re-opens the file for further modifications."""
if self.mode != 'r':
with self.mutex:
self.zf.close()
# N.B., re-open with mode 'a' regardless of initial mode so we don't wipe
# what's been written
self.zf = zipfile.ZipFile(self.path, mode='a',
compression=self.compression,
allowZip64=self.allowZip64) | python | def flush(self):
"""Closes the underlying zip file, ensuring all records are written,
then re-opens the file for further modifications."""
if self.mode != 'r':
with self.mutex:
self.zf.close()
# N.B., re-open with mode 'a' regardless of initial mode so we don't wipe
# what's been written
self.zf = zipfile.ZipFile(self.path, mode='a',
compression=self.compression,
allowZip64=self.allowZip64) | [
"def",
"flush",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"!=",
"'r'",
":",
"with",
"self",
".",
"mutex",
":",
"self",
".",
"zf",
".",
"close",
"(",
")",
"# N.B., re-open with mode 'a' regardless of initial mode so we don't wipe",
"# what's been written",
... | Closes the underlying zip file, ensuring all records are written,
then re-opens the file for further modifications. | [
"Closes",
"the",
"underlying",
"zip",
"file",
"ensuring",
"all",
"records",
"are",
"written",
"then",
"re",
"-",
"opens",
"the",
"file",
"for",
"further",
"modifications",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L1154-L1164 | train | 215,818 |
zarr-developers/zarr | zarr/storage.py | DBMStore.close | def close(self):
"""Closes the underlying database file."""
if hasattr(self.db, 'close'):
with self.write_mutex:
self.db.close() | python | def close(self):
"""Closes the underlying database file."""
if hasattr(self.db, 'close'):
with self.write_mutex:
self.db.close() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"db",
",",
"'close'",
")",
":",
"with",
"self",
".",
"write_mutex",
":",
"self",
".",
"db",
".",
"close",
"(",
")"
] | Closes the underlying database file. | [
"Closes",
"the",
"underlying",
"database",
"file",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L1442-L1446 | train | 215,819 |
zarr-developers/zarr | zarr/storage.py | DBMStore.flush | def flush(self):
"""Synchronizes data to the underlying database file."""
if self.flag[0] != 'r':
with self.write_mutex:
if hasattr(self.db, 'sync'):
self.db.sync()
else:
# fall-back, close and re-open, needed for ndbm
flag = self.flag
if flag[0] == 'n':
flag = 'c' + flag[1:] # don't clobber an existing database
self.db.close()
# noinspection PyArgumentList
self.db = self.open(self.path, flag, self.mode, **self.open_kwargs) | python | def flush(self):
"""Synchronizes data to the underlying database file."""
if self.flag[0] != 'r':
with self.write_mutex:
if hasattr(self.db, 'sync'):
self.db.sync()
else:
# fall-back, close and re-open, needed for ndbm
flag = self.flag
if flag[0] == 'n':
flag = 'c' + flag[1:] # don't clobber an existing database
self.db.close()
# noinspection PyArgumentList
self.db = self.open(self.path, flag, self.mode, **self.open_kwargs) | [
"def",
"flush",
"(",
"self",
")",
":",
"if",
"self",
".",
"flag",
"[",
"0",
"]",
"!=",
"'r'",
":",
"with",
"self",
".",
"write_mutex",
":",
"if",
"hasattr",
"(",
"self",
".",
"db",
",",
"'sync'",
")",
":",
"self",
".",
"db",
".",
"sync",
"(",
... | Synchronizes data to the underlying database file. | [
"Synchronizes",
"data",
"to",
"the",
"underlying",
"database",
"file",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/storage.py#L1448-L1461 | train | 215,820 |
zarr-developers/zarr | zarr/indexing.py | oindex | def oindex(a, selection):
"""Implementation of orthogonal indexing with slices and ints."""
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)])
selection = ix_(selection, a.shape)
result = a[selection]
if drop_axes:
result = result.squeeze(axis=drop_axes)
return result | python | def oindex(a, selection):
"""Implementation of orthogonal indexing with slices and ints."""
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)])
selection = ix_(selection, a.shape)
result = a[selection]
if drop_axes:
result = result.squeeze(axis=drop_axes)
return result | [
"def",
"oindex",
"(",
"a",
",",
"selection",
")",
":",
"selection",
"=",
"replace_ellipsis",
"(",
"selection",
",",
"a",
".",
"shape",
")",
"drop_axes",
"=",
"tuple",
"(",
"[",
"i",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"selection",
")",
"if"... | Implementation of orthogonal indexing with slices and ints. | [
"Implementation",
"of",
"orthogonal",
"indexing",
"with",
"slices",
"and",
"ints",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/indexing.py#L496-L504 | train | 215,821 |
zarr-developers/zarr | zarr/n5.py | array_metadata_to_n5 | def array_metadata_to_n5(array_metadata):
'''Convert array metadata from zarr to N5 format.'''
for f, t in zarr_to_n5_keys:
array_metadata[t] = array_metadata[f]
del array_metadata[f]
del array_metadata['zarr_format']
try:
dtype = np.dtype(array_metadata['dataType'])
except TypeError: # pragma: no cover
raise TypeError(
"data type %s not supported by N5" % array_metadata['dataType'])
array_metadata['dataType'] = dtype.name
array_metadata['dimensions'] = array_metadata['dimensions'][::-1]
array_metadata['blockSize'] = array_metadata['blockSize'][::-1]
if 'fill_value' in array_metadata:
if array_metadata['fill_value'] != 0 and array_metadata['fill_value'] is not None:
raise ValueError("N5 only supports fill_value == 0 (for now)")
del array_metadata['fill_value']
if 'order' in array_metadata:
if array_metadata['order'] != 'C':
raise ValueError("zarr N5 storage only stores arrays in C order (for now)")
del array_metadata['order']
if 'filters' in array_metadata:
if array_metadata['filters'] != [] and array_metadata['filters'] is not None:
raise ValueError("N5 storage does not support zarr filters")
del array_metadata['filters']
assert 'compression' in array_metadata
compressor_config = array_metadata['compression']
compressor_config = compressor_config_to_n5(compressor_config)
array_metadata['compression'] = compressor_config
return array_metadata | python | def array_metadata_to_n5(array_metadata):
'''Convert array metadata from zarr to N5 format.'''
for f, t in zarr_to_n5_keys:
array_metadata[t] = array_metadata[f]
del array_metadata[f]
del array_metadata['zarr_format']
try:
dtype = np.dtype(array_metadata['dataType'])
except TypeError: # pragma: no cover
raise TypeError(
"data type %s not supported by N5" % array_metadata['dataType'])
array_metadata['dataType'] = dtype.name
array_metadata['dimensions'] = array_metadata['dimensions'][::-1]
array_metadata['blockSize'] = array_metadata['blockSize'][::-1]
if 'fill_value' in array_metadata:
if array_metadata['fill_value'] != 0 and array_metadata['fill_value'] is not None:
raise ValueError("N5 only supports fill_value == 0 (for now)")
del array_metadata['fill_value']
if 'order' in array_metadata:
if array_metadata['order'] != 'C':
raise ValueError("zarr N5 storage only stores arrays in C order (for now)")
del array_metadata['order']
if 'filters' in array_metadata:
if array_metadata['filters'] != [] and array_metadata['filters'] is not None:
raise ValueError("N5 storage does not support zarr filters")
del array_metadata['filters']
assert 'compression' in array_metadata
compressor_config = array_metadata['compression']
compressor_config = compressor_config_to_n5(compressor_config)
array_metadata['compression'] = compressor_config
return array_metadata | [
"def",
"array_metadata_to_n5",
"(",
"array_metadata",
")",
":",
"for",
"f",
",",
"t",
"in",
"zarr_to_n5_keys",
":",
"array_metadata",
"[",
"t",
"]",
"=",
"array_metadata",
"[",
"f",
"]",
"del",
"array_metadata",
"[",
"f",
"]",
"del",
"array_metadata",
"[",
... | Convert array metadata from zarr to N5 format. | [
"Convert",
"array",
"metadata",
"from",
"zarr",
"to",
"N5",
"format",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/n5.py#L316-L354 | train | 215,822 |
zarr-developers/zarr | zarr/n5.py | array_metadata_to_zarr | def array_metadata_to_zarr(array_metadata):
'''Convert array metadata from N5 to zarr format.'''
for t, f in zarr_to_n5_keys:
array_metadata[t] = array_metadata[f]
del array_metadata[f]
array_metadata['zarr_format'] = ZARR_FORMAT
array_metadata['shape'] = array_metadata['shape'][::-1]
array_metadata['chunks'] = array_metadata['chunks'][::-1]
array_metadata['fill_value'] = 0 # also if None was requested
array_metadata['order'] = 'C'
array_metadata['filters'] = []
compressor_config = array_metadata['compressor']
compressor_config = compressor_config_to_zarr(compressor_config)
array_metadata['compressor'] = {
'id': N5ChunkWrapper.codec_id,
'compressor_config': compressor_config,
'dtype': array_metadata['dtype'],
'chunk_shape': array_metadata['chunks']
}
return array_metadata | python | def array_metadata_to_zarr(array_metadata):
'''Convert array metadata from N5 to zarr format.'''
for t, f in zarr_to_n5_keys:
array_metadata[t] = array_metadata[f]
del array_metadata[f]
array_metadata['zarr_format'] = ZARR_FORMAT
array_metadata['shape'] = array_metadata['shape'][::-1]
array_metadata['chunks'] = array_metadata['chunks'][::-1]
array_metadata['fill_value'] = 0 # also if None was requested
array_metadata['order'] = 'C'
array_metadata['filters'] = []
compressor_config = array_metadata['compressor']
compressor_config = compressor_config_to_zarr(compressor_config)
array_metadata['compressor'] = {
'id': N5ChunkWrapper.codec_id,
'compressor_config': compressor_config,
'dtype': array_metadata['dtype'],
'chunk_shape': array_metadata['chunks']
}
return array_metadata | [
"def",
"array_metadata_to_zarr",
"(",
"array_metadata",
")",
":",
"for",
"t",
",",
"f",
"in",
"zarr_to_n5_keys",
":",
"array_metadata",
"[",
"t",
"]",
"=",
"array_metadata",
"[",
"f",
"]",
"del",
"array_metadata",
"[",
"f",
"]",
"array_metadata",
"[",
"'zarr... | Convert array metadata from N5 to zarr format. | [
"Convert",
"array",
"metadata",
"from",
"N5",
"to",
"zarr",
"format",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/n5.py#L357-L379 | train | 215,823 |
zarr-developers/zarr | zarr/core.py | Array.name | def name(self):
"""Array name following h5py convention."""
if self.path:
# follow h5py convention: add leading slash
name = self.path
if name[0] != '/':
name = '/' + name
return name
return None | python | def name(self):
"""Array name following h5py convention."""
if self.path:
# follow h5py convention: add leading slash
name = self.path
if name[0] != '/':
name = '/' + name
return name
return None | [
"def",
"name",
"(",
"self",
")",
":",
"if",
"self",
".",
"path",
":",
"# follow h5py convention: add leading slash",
"name",
"=",
"self",
".",
"path",
"if",
"name",
"[",
"0",
"]",
"!=",
"'/'",
":",
"name",
"=",
"'/'",
"+",
"name",
"return",
"name",
"re... | Array name following h5py convention. | [
"Array",
"name",
"following",
"h5py",
"convention",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L214-L222 | train | 215,824 |
zarr-developers/zarr | zarr/core.py | Array.nbytes_stored | def nbytes_stored(self):
"""The total number of stored bytes of data for the array. This
includes storage required for configuration metadata and user
attributes."""
m = getsize(self._store, self._path)
if self._chunk_store is None:
return m
else:
n = getsize(self._chunk_store, self._path)
if m < 0 or n < 0:
return -1
else:
return m + n | python | def nbytes_stored(self):
"""The total number of stored bytes of data for the array. This
includes storage required for configuration metadata and user
attributes."""
m = getsize(self._store, self._path)
if self._chunk_store is None:
return m
else:
n = getsize(self._chunk_store, self._path)
if m < 0 or n < 0:
return -1
else:
return m + n | [
"def",
"nbytes_stored",
"(",
"self",
")",
":",
"m",
"=",
"getsize",
"(",
"self",
".",
"_store",
",",
"self",
".",
"_path",
")",
"if",
"self",
".",
"_chunk_store",
"is",
"None",
":",
"return",
"m",
"else",
":",
"n",
"=",
"getsize",
"(",
"self",
".",... | The total number of stored bytes of data for the array. This
includes storage required for configuration metadata and user
attributes. | [
"The",
"total",
"number",
"of",
"stored",
"bytes",
"of",
"data",
"for",
"the",
"array",
".",
"This",
"includes",
"storage",
"required",
"for",
"configuration",
"metadata",
"and",
"user",
"attributes",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L340-L352 | train | 215,825 |
zarr-developers/zarr | zarr/core.py | Array.nchunks_initialized | def nchunks_initialized(self):
"""The number of chunks that have been initialized with some data."""
# key pattern for chunk keys
prog = re.compile(r'\.'.join([r'\d+'] * min(1, self.ndim)))
# count chunk keys
return sum(1 for k in listdir(self.chunk_store, self._path) if prog.match(k)) | python | def nchunks_initialized(self):
"""The number of chunks that have been initialized with some data."""
# key pattern for chunk keys
prog = re.compile(r'\.'.join([r'\d+'] * min(1, self.ndim)))
# count chunk keys
return sum(1 for k in listdir(self.chunk_store, self._path) if prog.match(k)) | [
"def",
"nchunks_initialized",
"(",
"self",
")",
":",
"# key pattern for chunk keys",
"prog",
"=",
"re",
".",
"compile",
"(",
"r'\\.'",
".",
"join",
"(",
"[",
"r'\\d+'",
"]",
"*",
"min",
"(",
"1",
",",
"self",
".",
"ndim",
")",
")",
")",
"# count chunk ke... | The number of chunks that have been initialized with some data. | [
"The",
"number",
"of",
"chunks",
"that",
"have",
"been",
"initialized",
"with",
"some",
"data",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L380-L387 | train | 215,826 |
zarr-developers/zarr | zarr/core.py | Array.get_basic_selection | def get_basic_selection(self, selection=Ellipsis, out=None, fields=None):
"""Retrieve data for an item or region of the array.
Parameters
----------
selection : tuple
A tuple specifying the requested item or region for each dimension of the
array. May be any combination of int and/or slice for multidimensional arrays.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested region.
Examples
--------
Setup a 1-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100))
Retrieve a single item::
>>> z.get_basic_selection(5)
5
Retrieve a region via slicing::
>>> z.get_basic_selection(slice(5))
array([0, 1, 2, 3, 4])
>>> z.get_basic_selection(slice(-5, None))
array([95, 96, 97, 98, 99])
>>> z.get_basic_selection(slice(5, 10))
array([5, 6, 7, 8, 9])
>>> z.get_basic_selection(slice(5, 10, 2))
array([5, 7, 9])
>>> z.get_basic_selection(slice(None, None, 2))
array([ 0, 2, 4, ..., 94, 96, 98])
Setup a 2-dimensional array::
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve an item::
>>> z.get_basic_selection((2, 2))
22
Retrieve a region via slicing::
>>> z.get_basic_selection((slice(1, 3), slice(1, 3)))
array([[11, 12],
[21, 22]])
>>> z.get_basic_selection((slice(1, 3), slice(None)))
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29]])
>>> z.get_basic_selection((slice(None), slice(1, 3)))
array([[ 1, 2],
[11, 12],
[21, 22],
[31, 32],
[41, 42],
[51, 52],
[61, 62],
[71, 72],
[81, 82],
[91, 92]])
>>> z.get_basic_selection((slice(0, 5, 2), slice(0, 5, 2)))
array([[ 0, 2, 4],
[20, 22, 24],
[40, 42, 44]])
>>> z.get_basic_selection((slice(None, None, 2), slice(None, None, 2)))
array([[ 0, 2, 4, 6, 8],
[20, 22, 24, 26, 28],
[40, 42, 44, 46, 48],
[60, 62, 64, 66, 68],
[80, 82, 84, 86, 88]])
For arrays with a structured dtype, specific fields can be retrieved, e.g.::
>>> a = np.array([(b'aaa', 1, 4.2),
... (b'bbb', 2, 8.4),
... (b'ccc', 3, 12.6)],
... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])
>>> z = zarr.array(a)
>>> z.get_basic_selection(slice(2), fields='foo')
array([b'aaa', b'bbb'],
dtype='|S3')
Notes
-----
Slices with step > 1 are supported, but slices with negative step are not.
Currently this method provides the implementation for accessing data via the
square bracket notation (__getitem__). See :func:`__getitem__` for examples
using the alternative notation.
See Also
--------
set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__
"""
# refresh metadata
if not self._cache_metadata:
self._load_metadata()
# check args
check_fields(fields, self._dtype)
# handle zero-dimensional arrays
if self._shape == ():
return self._get_basic_selection_zd(selection=selection, out=out,
fields=fields)
else:
return self._get_basic_selection_nd(selection=selection, out=out,
fields=fields) | python | def get_basic_selection(self, selection=Ellipsis, out=None, fields=None):
"""Retrieve data for an item or region of the array.
Parameters
----------
selection : tuple
A tuple specifying the requested item or region for each dimension of the
array. May be any combination of int and/or slice for multidimensional arrays.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested region.
Examples
--------
Setup a 1-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100))
Retrieve a single item::
>>> z.get_basic_selection(5)
5
Retrieve a region via slicing::
>>> z.get_basic_selection(slice(5))
array([0, 1, 2, 3, 4])
>>> z.get_basic_selection(slice(-5, None))
array([95, 96, 97, 98, 99])
>>> z.get_basic_selection(slice(5, 10))
array([5, 6, 7, 8, 9])
>>> z.get_basic_selection(slice(5, 10, 2))
array([5, 7, 9])
>>> z.get_basic_selection(slice(None, None, 2))
array([ 0, 2, 4, ..., 94, 96, 98])
Setup a 2-dimensional array::
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve an item::
>>> z.get_basic_selection((2, 2))
22
Retrieve a region via slicing::
>>> z.get_basic_selection((slice(1, 3), slice(1, 3)))
array([[11, 12],
[21, 22]])
>>> z.get_basic_selection((slice(1, 3), slice(None)))
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29]])
>>> z.get_basic_selection((slice(None), slice(1, 3)))
array([[ 1, 2],
[11, 12],
[21, 22],
[31, 32],
[41, 42],
[51, 52],
[61, 62],
[71, 72],
[81, 82],
[91, 92]])
>>> z.get_basic_selection((slice(0, 5, 2), slice(0, 5, 2)))
array([[ 0, 2, 4],
[20, 22, 24],
[40, 42, 44]])
>>> z.get_basic_selection((slice(None, None, 2), slice(None, None, 2)))
array([[ 0, 2, 4, 6, 8],
[20, 22, 24, 26, 28],
[40, 42, 44, 46, 48],
[60, 62, 64, 66, 68],
[80, 82, 84, 86, 88]])
For arrays with a structured dtype, specific fields can be retrieved, e.g.::
>>> a = np.array([(b'aaa', 1, 4.2),
... (b'bbb', 2, 8.4),
... (b'ccc', 3, 12.6)],
... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])
>>> z = zarr.array(a)
>>> z.get_basic_selection(slice(2), fields='foo')
array([b'aaa', b'bbb'],
dtype='|S3')
Notes
-----
Slices with step > 1 are supported, but slices with negative step are not.
Currently this method provides the implementation for accessing data via the
square bracket notation (__getitem__). See :func:`__getitem__` for examples
using the alternative notation.
See Also
--------
set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__
"""
# refresh metadata
if not self._cache_metadata:
self._load_metadata()
# check args
check_fields(fields, self._dtype)
# handle zero-dimensional arrays
if self._shape == ():
return self._get_basic_selection_zd(selection=selection, out=out,
fields=fields)
else:
return self._get_basic_selection_nd(selection=selection, out=out,
fields=fields) | [
"def",
"get_basic_selection",
"(",
"self",
",",
"selection",
"=",
"Ellipsis",
",",
"out",
"=",
"None",
",",
"fields",
"=",
"None",
")",
":",
"# refresh metadata",
"if",
"not",
"self",
".",
"_cache_metadata",
":",
"self",
".",
"_load_metadata",
"(",
")",
"#... | Retrieve data for an item or region of the array.
Parameters
----------
selection : tuple
A tuple specifying the requested item or region for each dimension of the
array. May be any combination of int and/or slice for multidimensional arrays.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested region.
Examples
--------
Setup a 1-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100))
Retrieve a single item::
>>> z.get_basic_selection(5)
5
Retrieve a region via slicing::
>>> z.get_basic_selection(slice(5))
array([0, 1, 2, 3, 4])
>>> z.get_basic_selection(slice(-5, None))
array([95, 96, 97, 98, 99])
>>> z.get_basic_selection(slice(5, 10))
array([5, 6, 7, 8, 9])
>>> z.get_basic_selection(slice(5, 10, 2))
array([5, 7, 9])
>>> z.get_basic_selection(slice(None, None, 2))
array([ 0, 2, 4, ..., 94, 96, 98])
Setup a 2-dimensional array::
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve an item::
>>> z.get_basic_selection((2, 2))
22
Retrieve a region via slicing::
>>> z.get_basic_selection((slice(1, 3), slice(1, 3)))
array([[11, 12],
[21, 22]])
>>> z.get_basic_selection((slice(1, 3), slice(None)))
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29]])
>>> z.get_basic_selection((slice(None), slice(1, 3)))
array([[ 1, 2],
[11, 12],
[21, 22],
[31, 32],
[41, 42],
[51, 52],
[61, 62],
[71, 72],
[81, 82],
[91, 92]])
>>> z.get_basic_selection((slice(0, 5, 2), slice(0, 5, 2)))
array([[ 0, 2, 4],
[20, 22, 24],
[40, 42, 44]])
>>> z.get_basic_selection((slice(None, None, 2), slice(None, None, 2)))
array([[ 0, 2, 4, 6, 8],
[20, 22, 24, 26, 28],
[40, 42, 44, 46, 48],
[60, 62, 64, 66, 68],
[80, 82, 84, 86, 88]])
For arrays with a structured dtype, specific fields can be retrieved, e.g.::
>>> a = np.array([(b'aaa', 1, 4.2),
... (b'bbb', 2, 8.4),
... (b'ccc', 3, 12.6)],
... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])
>>> z = zarr.array(a)
>>> z.get_basic_selection(slice(2), fields='foo')
array([b'aaa', b'bbb'],
dtype='|S3')
Notes
-----
Slices with step > 1 are supported, but slices with negative step are not.
Currently this method provides the implementation for accessing data via the
square bracket notation (__getitem__). See :func:`__getitem__` for examples
using the alternative notation.
See Also
--------
set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__ | [
"Retrieve",
"data",
"for",
"an",
"item",
"or",
"region",
"of",
"the",
"array",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L574-L698 | train | 215,827 |
zarr-developers/zarr | zarr/core.py | Array.get_mask_selection | def get_mask_selection(self, selection, out=None, fields=None):
"""Retrieve a selection of individual items, by providing a Boolean array of the
same shape as the array against which the selection is being made, where True
values indicate a selected item.
Parameters
----------
selection : ndarray, bool
A Boolean array of the same shape as the array against which the selection is
being made.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested selection.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve items by specifying a maks::
>>> sel = np.zeros_like(z, dtype=bool)
>>> sel[1, 1] = True
>>> sel[4, 4] = True
>>> z.get_mask_selection(sel)
array([11, 44])
For convenience, the mask selection functionality is also available via the
`vindex` property, e.g.::
>>> z.vindex[sel]
array([11, 44])
Notes
-----
Mask indexing is a form of vectorized or inner indexing, and is equivalent to
coordinate indexing. Internally the mask array is converted to coordinate
arrays by calling `np.nonzero`.
See Also
--------
get_basic_selection, set_basic_selection, set_mask_selection,
get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,
set_coordinate_selection, vindex, oindex, __getitem__, __setitem__
"""
# refresh metadata
if not self._cache_metadata:
self._load_metadata()
# check args
check_fields(fields, self._dtype)
# setup indexer
indexer = MaskIndexer(selection, self)
return self._get_selection(indexer=indexer, out=out, fields=fields) | python | def get_mask_selection(self, selection, out=None, fields=None):
"""Retrieve a selection of individual items, by providing a Boolean array of the
same shape as the array against which the selection is being made, where True
values indicate a selected item.
Parameters
----------
selection : ndarray, bool
A Boolean array of the same shape as the array against which the selection is
being made.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested selection.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve items by specifying a maks::
>>> sel = np.zeros_like(z, dtype=bool)
>>> sel[1, 1] = True
>>> sel[4, 4] = True
>>> z.get_mask_selection(sel)
array([11, 44])
For convenience, the mask selection functionality is also available via the
`vindex` property, e.g.::
>>> z.vindex[sel]
array([11, 44])
Notes
-----
Mask indexing is a form of vectorized or inner indexing, and is equivalent to
coordinate indexing. Internally the mask array is converted to coordinate
arrays by calling `np.nonzero`.
See Also
--------
get_basic_selection, set_basic_selection, set_mask_selection,
get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,
set_coordinate_selection, vindex, oindex, __getitem__, __setitem__
"""
# refresh metadata
if not self._cache_metadata:
self._load_metadata()
# check args
check_fields(fields, self._dtype)
# setup indexer
indexer = MaskIndexer(selection, self)
return self._get_selection(indexer=indexer, out=out, fields=fields) | [
"def",
"get_mask_selection",
"(",
"self",
",",
"selection",
",",
"out",
"=",
"None",
",",
"fields",
"=",
"None",
")",
":",
"# refresh metadata",
"if",
"not",
"self",
".",
"_cache_metadata",
":",
"self",
".",
"_load_metadata",
"(",
")",
"# check args",
"check... | Retrieve a selection of individual items, by providing a Boolean array of the
same shape as the array against which the selection is being made, where True
values indicate a selected item.
Parameters
----------
selection : ndarray, bool
A Boolean array of the same shape as the array against which the selection is
being made.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested selection.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve items by specifying a maks::
>>> sel = np.zeros_like(z, dtype=bool)
>>> sel[1, 1] = True
>>> sel[4, 4] = True
>>> z.get_mask_selection(sel)
array([11, 44])
For convenience, the mask selection functionality is also available via the
`vindex` property, e.g.::
>>> z.vindex[sel]
array([11, 44])
Notes
-----
Mask indexing is a form of vectorized or inner indexing, and is equivalent to
coordinate indexing. Internally the mask array is converted to coordinate
arrays by calling `np.nonzero`.
See Also
--------
get_basic_selection, set_basic_selection, set_mask_selection,
get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,
set_coordinate_selection, vindex, oindex, __getitem__, __setitem__ | [
"Retrieve",
"a",
"selection",
"of",
"individual",
"items",
"by",
"providing",
"a",
"Boolean",
"array",
"of",
"the",
"same",
"shape",
"as",
"the",
"array",
"against",
"which",
"the",
"selection",
"is",
"being",
"made",
"where",
"True",
"values",
"indicate",
"... | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L933-L1000 | train | 215,828 |
zarr-developers/zarr | zarr/core.py | Array.set_basic_selection | def set_basic_selection(self, selection, value, fields=None):
"""Modify data for an item or region of the array.
Parameters
----------
selection : tuple
An integer index or slice or tuple of int/slice specifying the requested
region for each dimension of the array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 1-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros(100, dtype=int)
Set all array elements to the same scalar value::
>>> z.set_basic_selection(..., 42)
>>> z[...]
array([42, 42, 42, ..., 42, 42, 42])
Set a portion of the array::
>>> z.set_basic_selection(slice(10), np.arange(10))
>>> z.set_basic_selection(slice(-10, None), np.arange(10)[::-1])
>>> z[...]
array([ 0, 1, 2, ..., 2, 1, 0])
Setup a 2-dimensional array::
>>> z = zarr.zeros((5, 5), dtype=int)
Set all array elements to the same scalar value::
>>> z.set_basic_selection(..., 42)
Set a portion of the array::
>>> z.set_basic_selection((0, slice(None)), np.arange(z.shape[1]))
>>> z.set_basic_selection((slice(None), 0), np.arange(z.shape[0]))
>>> z[...]
array([[ 0, 1, 2, 3, 4],
[ 1, 42, 42, 42, 42],
[ 2, 42, 42, 42, 42],
[ 3, 42, 42, 42, 42],
[ 4, 42, 42, 42, 42]])
For arrays with a structured dtype, the `fields` parameter can be used to set
data for a specific field, e.g.::
>>> a = np.array([(b'aaa', 1, 4.2),
... (b'bbb', 2, 8.4),
... (b'ccc', 3, 12.6)],
... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])
>>> z = zarr.array(a)
>>> z.set_basic_selection(slice(0, 2), b'zzz', fields='foo')
>>> z[:]
array([(b'zzz', 1, 4.2), (b'zzz', 2, 8.4), (b'ccc', 3, 12.6)],
dtype=[('foo', 'S3'), ('bar', '<i4'), ('baz', '<f8')])
Notes
-----
This method provides the underlying implementation for modifying data via square
bracket notation, see :func:`__setitem__` for equivalent examples using the
alternative notation.
See Also
--------
get_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# handle zero-dimensional arrays
if self._shape == ():
return self._set_basic_selection_zd(selection, value, fields=fields)
else:
return self._set_basic_selection_nd(selection, value, fields=fields) | python | def set_basic_selection(self, selection, value, fields=None):
"""Modify data for an item or region of the array.
Parameters
----------
selection : tuple
An integer index or slice or tuple of int/slice specifying the requested
region for each dimension of the array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 1-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros(100, dtype=int)
Set all array elements to the same scalar value::
>>> z.set_basic_selection(..., 42)
>>> z[...]
array([42, 42, 42, ..., 42, 42, 42])
Set a portion of the array::
>>> z.set_basic_selection(slice(10), np.arange(10))
>>> z.set_basic_selection(slice(-10, None), np.arange(10)[::-1])
>>> z[...]
array([ 0, 1, 2, ..., 2, 1, 0])
Setup a 2-dimensional array::
>>> z = zarr.zeros((5, 5), dtype=int)
Set all array elements to the same scalar value::
>>> z.set_basic_selection(..., 42)
Set a portion of the array::
>>> z.set_basic_selection((0, slice(None)), np.arange(z.shape[1]))
>>> z.set_basic_selection((slice(None), 0), np.arange(z.shape[0]))
>>> z[...]
array([[ 0, 1, 2, 3, 4],
[ 1, 42, 42, 42, 42],
[ 2, 42, 42, 42, 42],
[ 3, 42, 42, 42, 42],
[ 4, 42, 42, 42, 42]])
For arrays with a structured dtype, the `fields` parameter can be used to set
data for a specific field, e.g.::
>>> a = np.array([(b'aaa', 1, 4.2),
... (b'bbb', 2, 8.4),
... (b'ccc', 3, 12.6)],
... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])
>>> z = zarr.array(a)
>>> z.set_basic_selection(slice(0, 2), b'zzz', fields='foo')
>>> z[:]
array([(b'zzz', 1, 4.2), (b'zzz', 2, 8.4), (b'ccc', 3, 12.6)],
dtype=[('foo', 'S3'), ('bar', '<i4'), ('baz', '<f8')])
Notes
-----
This method provides the underlying implementation for modifying data via square
bracket notation, see :func:`__setitem__` for equivalent examples using the
alternative notation.
See Also
--------
get_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# handle zero-dimensional arrays
if self._shape == ():
return self._set_basic_selection_zd(selection, value, fields=fields)
else:
return self._set_basic_selection_nd(selection, value, fields=fields) | [
"def",
"set_basic_selection",
"(",
"self",
",",
"selection",
",",
"value",
",",
"fields",
"=",
"None",
")",
":",
"# guard conditions",
"if",
"self",
".",
"_read_only",
":",
"err_read_only",
"(",
")",
"# refresh metadata",
"if",
"not",
"self",
".",
"_cache_meta... | Modify data for an item or region of the array.
Parameters
----------
selection : tuple
An integer index or slice or tuple of int/slice specifying the requested
region for each dimension of the array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 1-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros(100, dtype=int)
Set all array elements to the same scalar value::
>>> z.set_basic_selection(..., 42)
>>> z[...]
array([42, 42, 42, ..., 42, 42, 42])
Set a portion of the array::
>>> z.set_basic_selection(slice(10), np.arange(10))
>>> z.set_basic_selection(slice(-10, None), np.arange(10)[::-1])
>>> z[...]
array([ 0, 1, 2, ..., 2, 1, 0])
Setup a 2-dimensional array::
>>> z = zarr.zeros((5, 5), dtype=int)
Set all array elements to the same scalar value::
>>> z.set_basic_selection(..., 42)
Set a portion of the array::
>>> z.set_basic_selection((0, slice(None)), np.arange(z.shape[1]))
>>> z.set_basic_selection((slice(None), 0), np.arange(z.shape[0]))
>>> z[...]
array([[ 0, 1, 2, 3, 4],
[ 1, 42, 42, 42, 42],
[ 2, 42, 42, 42, 42],
[ 3, 42, 42, 42, 42],
[ 4, 42, 42, 42, 42]])
For arrays with a structured dtype, the `fields` parameter can be used to set
data for a specific field, e.g.::
>>> a = np.array([(b'aaa', 1, 4.2),
... (b'bbb', 2, 8.4),
... (b'ccc', 3, 12.6)],
... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])
>>> z = zarr.array(a)
>>> z.set_basic_selection(slice(0, 2), b'zzz', fields='foo')
>>> z[:]
array([(b'zzz', 1, 4.2), (b'zzz', 2, 8.4), (b'ccc', 3, 12.6)],
dtype=[('foo', 'S3'), ('bar', '<i4'), ('baz', '<f8')])
Notes
-----
This method provides the underlying implementation for modifying data via square
bracket notation, see :func:`__setitem__` for equivalent examples using the
alternative notation.
See Also
--------
get_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__ | [
"Modify",
"data",
"for",
"an",
"item",
"or",
"region",
"of",
"the",
"array",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L1117-L1210 | train | 215,829 |
zarr-developers/zarr | zarr/core.py | Array.set_orthogonal_selection | def set_orthogonal_selection(self, selection, value, fields=None):
"""Modify data via a selection for each dimension of the array.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of rows::
>>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
Set data for a selection of columns::
>>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2]])
Set data for a selection of rows and columns::
>>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3]])
For convenience, this functionality is also available via the `oindex` property.
E.g.::
>>> z.oindex[[1, 4], [1, 4]] = 4
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# setup indexer
indexer = OrthogonalIndexer(selection, self)
self._set_selection(indexer, value, fields=fields) | python | def set_orthogonal_selection(self, selection, value, fields=None):
"""Modify data via a selection for each dimension of the array.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of rows::
>>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
Set data for a selection of columns::
>>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2]])
Set data for a selection of rows and columns::
>>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3]])
For convenience, this functionality is also available via the `oindex` property.
E.g.::
>>> z.oindex[[1, 4], [1, 4]] = 4
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# setup indexer
indexer = OrthogonalIndexer(selection, self)
self._set_selection(indexer, value, fields=fields) | [
"def",
"set_orthogonal_selection",
"(",
"self",
",",
"selection",
",",
"value",
",",
"fields",
"=",
"None",
")",
":",
"# guard conditions",
"if",
"self",
".",
"_read_only",
":",
"err_read_only",
"(",
")",
"# refresh metadata",
"if",
"not",
"self",
".",
"_cache... | Modify data via a selection for each dimension of the array.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of rows::
>>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
Set data for a selection of columns::
>>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2]])
Set data for a selection of rows and columns::
>>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3]])
For convenience, this functionality is also available via the `oindex` property.
E.g.::
>>> z.oindex[[1, 4], [1, 4]] = 4
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__ | [
"Modify",
"data",
"via",
"a",
"selection",
"for",
"each",
"dimension",
"of",
"the",
"array",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L1212-L1300 | train | 215,830 |
zarr-developers/zarr | zarr/core.py | Array.set_mask_selection | def set_mask_selection(self, selection, value, fields=None):
"""Modify a selection of individual items, by providing a Boolean array of the
same shape as the array against which the selection is being made, where True
values indicate a selected item.
Parameters
----------
selection : ndarray, bool
A Boolean array of the same shape as the array against which the selection is
being made.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of items::
>>> sel = np.zeros_like(z, dtype=bool)
>>> sel[1, 1] = True
>>> sel[4, 4] = True
>>> z.set_mask_selection(sel, 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]])
For convenience, this functionality is also available via the `vindex` property.
E.g.::
>>> z.vindex[sel] = 2
>>> z[...]
array([[0, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 2]])
Notes
-----
Mask indexing is a form of vectorized or inner indexing, and is equivalent to
coordinate indexing. Internally the mask array is converted to coordinate
arrays by calling `np.nonzero`.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection,
get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,
set_coordinate_selection, vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# setup indexer
indexer = MaskIndexer(selection, self)
self._set_selection(indexer, value, fields=fields) | python | def set_mask_selection(self, selection, value, fields=None):
"""Modify a selection of individual items, by providing a Boolean array of the
same shape as the array against which the selection is being made, where True
values indicate a selected item.
Parameters
----------
selection : ndarray, bool
A Boolean array of the same shape as the array against which the selection is
being made.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of items::
>>> sel = np.zeros_like(z, dtype=bool)
>>> sel[1, 1] = True
>>> sel[4, 4] = True
>>> z.set_mask_selection(sel, 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]])
For convenience, this functionality is also available via the `vindex` property.
E.g.::
>>> z.vindex[sel] = 2
>>> z[...]
array([[0, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 2]])
Notes
-----
Mask indexing is a form of vectorized or inner indexing, and is equivalent to
coordinate indexing. Internally the mask array is converted to coordinate
arrays by calling `np.nonzero`.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection,
get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,
set_coordinate_selection, vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# setup indexer
indexer = MaskIndexer(selection, self)
self._set_selection(indexer, value, fields=fields) | [
"def",
"set_mask_selection",
"(",
"self",
",",
"selection",
",",
"value",
",",
"fields",
"=",
"None",
")",
":",
"# guard conditions",
"if",
"self",
".",
"_read_only",
":",
"err_read_only",
"(",
")",
"# refresh metadata",
"if",
"not",
"self",
".",
"_cache_metad... | Modify a selection of individual items, by providing a Boolean array of the
same shape as the array against which the selection is being made, where True
values indicate a selected item.
Parameters
----------
selection : ndarray, bool
A Boolean array of the same shape as the array against which the selection is
being made.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of items::
>>> sel = np.zeros_like(z, dtype=bool)
>>> sel[1, 1] = True
>>> sel[4, 4] = True
>>> z.set_mask_selection(sel, 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]])
For convenience, this functionality is also available via the `vindex` property.
E.g.::
>>> z.vindex[sel] = 2
>>> z[...]
array([[0, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 2]])
Notes
-----
Mask indexing is a form of vectorized or inner indexing, and is equivalent to
coordinate indexing. Internally the mask array is converted to coordinate
arrays by calling `np.nonzero`.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection,
get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,
set_coordinate_selection, vindex, oindex, __getitem__, __setitem__ | [
"Modify",
"a",
"selection",
"of",
"individual",
"items",
"by",
"providing",
"a",
"Boolean",
"array",
"of",
"the",
"same",
"shape",
"as",
"the",
"array",
"against",
"which",
"the",
"selection",
"is",
"being",
"made",
"where",
"True",
"values",
"indicate",
"a"... | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L1380-L1453 | train | 215,831 |
zarr-developers/zarr | zarr/core.py | Array._chunk_getitem | def _chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection,
drop_axes=None, fields=None):
"""Obtain part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : selection
Location of region within the chunk to extract.
out : ndarray
Array to store result in.
out_selection : selection
Location of region within output array to store results in.
drop_axes : tuple of ints
Axes to squeeze out of the chunk.
fields
TODO
"""
assert len(chunk_coords) == len(self._cdata_shape)
# obtain key for chunk
ckey = self._chunk_key(chunk_coords)
try:
# obtain compressed data for chunk
cdata = self.chunk_store[ckey]
except KeyError:
# chunk not initialized
if self._fill_value is not None:
if fields:
fill_value = self._fill_value[fields]
else:
fill_value = self._fill_value
out[out_selection] = fill_value
else:
if (isinstance(out, np.ndarray) and
not fields and
is_contiguous_selection(out_selection) and
is_total_slice(chunk_selection, self._chunks) and
not self._filters and
self._dtype != object):
dest = out[out_selection]
write_direct = (
dest.flags.writeable and (
(self._order == 'C' and dest.flags.c_contiguous) or
(self._order == 'F' and dest.flags.f_contiguous)
)
)
if write_direct:
# optimization: we want the whole chunk, and the destination is
# contiguous, so we can decompress directly from the chunk
# into the destination array
if self._compressor:
self._compressor.decode(cdata, dest)
else:
chunk = ensure_ndarray(cdata).view(self._dtype)
chunk = chunk.reshape(self._chunks, order=self._order)
np.copyto(dest, chunk)
return
# decode chunk
chunk = self._decode_chunk(cdata)
# select data from chunk
if fields:
chunk = chunk[fields]
tmp = chunk[chunk_selection]
if drop_axes:
tmp = np.squeeze(tmp, axis=drop_axes)
# store selected data in output
out[out_selection] = tmp | python | def _chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection,
drop_axes=None, fields=None):
"""Obtain part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : selection
Location of region within the chunk to extract.
out : ndarray
Array to store result in.
out_selection : selection
Location of region within output array to store results in.
drop_axes : tuple of ints
Axes to squeeze out of the chunk.
fields
TODO
"""
assert len(chunk_coords) == len(self._cdata_shape)
# obtain key for chunk
ckey = self._chunk_key(chunk_coords)
try:
# obtain compressed data for chunk
cdata = self.chunk_store[ckey]
except KeyError:
# chunk not initialized
if self._fill_value is not None:
if fields:
fill_value = self._fill_value[fields]
else:
fill_value = self._fill_value
out[out_selection] = fill_value
else:
if (isinstance(out, np.ndarray) and
not fields and
is_contiguous_selection(out_selection) and
is_total_slice(chunk_selection, self._chunks) and
not self._filters and
self._dtype != object):
dest = out[out_selection]
write_direct = (
dest.flags.writeable and (
(self._order == 'C' and dest.flags.c_contiguous) or
(self._order == 'F' and dest.flags.f_contiguous)
)
)
if write_direct:
# optimization: we want the whole chunk, and the destination is
# contiguous, so we can decompress directly from the chunk
# into the destination array
if self._compressor:
self._compressor.decode(cdata, dest)
else:
chunk = ensure_ndarray(cdata).view(self._dtype)
chunk = chunk.reshape(self._chunks, order=self._order)
np.copyto(dest, chunk)
return
# decode chunk
chunk = self._decode_chunk(cdata)
# select data from chunk
if fields:
chunk = chunk[fields]
tmp = chunk[chunk_selection]
if drop_axes:
tmp = np.squeeze(tmp, axis=drop_axes)
# store selected data in output
out[out_selection] = tmp | [
"def",
"_chunk_getitem",
"(",
"self",
",",
"chunk_coords",
",",
"chunk_selection",
",",
"out",
",",
"out_selection",
",",
"drop_axes",
"=",
"None",
",",
"fields",
"=",
"None",
")",
":",
"assert",
"len",
"(",
"chunk_coords",
")",
"==",
"len",
"(",
"self",
... | Obtain part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : selection
Location of region within the chunk to extract.
out : ndarray
Array to store result in.
out_selection : selection
Location of region within output array to store results in.
drop_axes : tuple of ints
Axes to squeeze out of the chunk.
fields
TODO | [
"Obtain",
"part",
"or",
"whole",
"of",
"a",
"chunk",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L1552-L1633 | train | 215,832 |
zarr-developers/zarr | zarr/core.py | Array._chunk_setitem | def _chunk_setitem(self, chunk_coords, chunk_selection, value, fields=None):
"""Replace part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : tuple of slices
Location of region within the chunk.
value : scalar or ndarray
Value to set.
"""
if self._synchronizer is None:
# no synchronization
lock = nolock
else:
# synchronize on the chunk
ckey = self._chunk_key(chunk_coords)
lock = self._synchronizer[ckey]
with lock:
self._chunk_setitem_nosync(chunk_coords, chunk_selection, value,
fields=fields) | python | def _chunk_setitem(self, chunk_coords, chunk_selection, value, fields=None):
"""Replace part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : tuple of slices
Location of region within the chunk.
value : scalar or ndarray
Value to set.
"""
if self._synchronizer is None:
# no synchronization
lock = nolock
else:
# synchronize on the chunk
ckey = self._chunk_key(chunk_coords)
lock = self._synchronizer[ckey]
with lock:
self._chunk_setitem_nosync(chunk_coords, chunk_selection, value,
fields=fields) | [
"def",
"_chunk_setitem",
"(",
"self",
",",
"chunk_coords",
",",
"chunk_selection",
",",
"value",
",",
"fields",
"=",
"None",
")",
":",
"if",
"self",
".",
"_synchronizer",
"is",
"None",
":",
"# no synchronization",
"lock",
"=",
"nolock",
"else",
":",
"# synch... | Replace part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : tuple of slices
Location of region within the chunk.
value : scalar or ndarray
Value to set. | [
"Replace",
"part",
"or",
"whole",
"of",
"a",
"chunk",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L1635-L1659 | train | 215,833 |
zarr-developers/zarr | zarr/core.py | Array.append | def append(self, data, axis=0):
"""Append `data` to `axis`.
Parameters
----------
data : array_like
Data to be appended.
axis : int
Axis along which to append.
Returns
-------
new_shape : tuple
Notes
-----
The size of all dimensions other than `axis` must match between this
array and `data`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000)
>>> z = zarr.array(a, chunks=(1000, 100))
>>> z.shape
(10000, 1000)
>>> z.append(a)
(20000, 1000)
>>> z.append(np.vstack([a, a]), axis=1)
(20000, 2000)
>>> z.shape
(20000, 2000)
"""
return self._write_op(self._append_nosync, data, axis=axis) | python | def append(self, data, axis=0):
"""Append `data` to `axis`.
Parameters
----------
data : array_like
Data to be appended.
axis : int
Axis along which to append.
Returns
-------
new_shape : tuple
Notes
-----
The size of all dimensions other than `axis` must match between this
array and `data`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000)
>>> z = zarr.array(a, chunks=(1000, 100))
>>> z.shape
(10000, 1000)
>>> z.append(a)
(20000, 1000)
>>> z.append(np.vstack([a, a]), axis=1)
(20000, 2000)
>>> z.shape
(20000, 2000)
"""
return self._write_op(self._append_nosync, data, axis=axis) | [
"def",
"append",
"(",
"self",
",",
"data",
",",
"axis",
"=",
"0",
")",
":",
"return",
"self",
".",
"_write_op",
"(",
"self",
".",
"_append_nosync",
",",
"data",
",",
"axis",
"=",
"axis",
")"
] | Append `data` to `axis`.
Parameters
----------
data : array_like
Data to be appended.
axis : int
Axis along which to append.
Returns
-------
new_shape : tuple
Notes
-----
The size of all dimensions other than `axis` must match between this
array and `data`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000)
>>> z = zarr.array(a, chunks=(1000, 100))
>>> z.shape
(10000, 1000)
>>> z.append(a)
(20000, 1000)
>>> z.append(np.vstack([a, a]), axis=1)
(20000, 2000)
>>> z.shape
(20000, 2000) | [
"Append",
"data",
"to",
"axis",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L2027-L2062 | train | 215,834 |
zarr-developers/zarr | zarr/core.py | Array.view | def view(self, shape=None, chunks=None, dtype=None,
fill_value=None, filters=None, read_only=None,
synchronizer=None):
"""Return an array sharing the same data.
Parameters
----------
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape.
dtype : string or dtype, optional
NumPy dtype.
fill_value : object
Default value to use for uninitialized portions of the array.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to
compression.
read_only : bool, optional
True if array should be protected against modification.
synchronizer : object, optional
Array synchronizer.
Notes
-----
WARNING: This is an experimental feature and should be used with care.
There are plenty of ways to generate errors and/or cause data
corruption.
Examples
--------
Bypass filters:
>>> import zarr
>>> import numpy as np
>>> np.random.seed(42)
>>> labels = ['female', 'male']
>>> data = np.random.choice(labels, size=10000)
>>> filters = [zarr.Categorize(labels=labels,
... dtype=data.dtype,
... astype='u1')]
>>> a = zarr.array(data, chunks=1000, filters=filters)
>>> a[:]
array(['female', 'male', 'female', ..., 'male', 'male', 'female'],
dtype='<U6')
>>> v = a.view(dtype='u1', filters=[])
>>> v.is_view
True
>>> v[:]
array([1, 2, 1, ..., 2, 2, 1], dtype=uint8)
Views can be used to modify data:
>>> x = v[:]
>>> x.sort()
>>> v[:] = x
>>> v[:]
array([1, 1, 1, ..., 2, 2, 2], dtype=uint8)
>>> a[:]
array(['female', 'female', 'female', ..., 'male', 'male', 'male'],
dtype='<U6')
View as a different dtype with the same item size:
>>> data = np.random.randint(0, 2, size=10000, dtype='u1')
>>> a = zarr.array(data, chunks=1000)
>>> a[:]
array([0, 0, 1, ..., 1, 0, 0], dtype=uint8)
>>> v = a.view(dtype=bool)
>>> v[:]
array([False, False, True, ..., True, False, False])
>>> np.all(a[:].view(dtype=bool) == v[:])
True
An array can be viewed with a dtype with a different item size, however
some care is needed to adjust the shape and chunk shape so that chunk
data is interpreted correctly:
>>> data = np.arange(10000, dtype='u2')
>>> a = zarr.array(data, chunks=1000)
>>> a[:10]
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16)
>>> v = a.view(dtype='u1', shape=20000, chunks=2000)
>>> v[:10]
array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8)
>>> np.all(a[:].view('u1') == v[:])
True
Change fill value for uninitialized chunks:
>>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1')
>>> a[:]
array([-1, -1, -1, ..., -1, -1, -1], dtype=int8)
>>> v = a.view(fill_value=42)
>>> v[:]
array([42, 42, 42, ..., 42, 42, 42], dtype=int8)
Note that resizing or appending to views is not permitted:
>>> a = zarr.empty(10000)
>>> v = a.view()
>>> try:
... v.resize(20000)
... except PermissionError as e:
... print(e)
operation not permitted for views
"""
store = self._store
chunk_store = self._chunk_store
path = self._path
if read_only is None:
read_only = self._read_only
if synchronizer is None:
synchronizer = self._synchronizer
a = Array(store=store, path=path, chunk_store=chunk_store, read_only=read_only,
synchronizer=synchronizer, cache_metadata=True)
a._is_view = True
# allow override of some properties
if dtype is None:
dtype = self._dtype
else:
dtype = np.dtype(dtype)
a._dtype = dtype
if shape is None:
shape = self._shape
else:
shape = normalize_shape(shape)
a._shape = shape
if chunks is not None:
chunks = normalize_chunks(chunks, shape, dtype.itemsize)
a._chunks = chunks
if fill_value is not None:
a._fill_value = fill_value
if filters is not None:
a._filters = filters
return a | python | def view(self, shape=None, chunks=None, dtype=None,
fill_value=None, filters=None, read_only=None,
synchronizer=None):
"""Return an array sharing the same data.
Parameters
----------
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape.
dtype : string or dtype, optional
NumPy dtype.
fill_value : object
Default value to use for uninitialized portions of the array.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to
compression.
read_only : bool, optional
True if array should be protected against modification.
synchronizer : object, optional
Array synchronizer.
Notes
-----
WARNING: This is an experimental feature and should be used with care.
There are plenty of ways to generate errors and/or cause data
corruption.
Examples
--------
Bypass filters:
>>> import zarr
>>> import numpy as np
>>> np.random.seed(42)
>>> labels = ['female', 'male']
>>> data = np.random.choice(labels, size=10000)
>>> filters = [zarr.Categorize(labels=labels,
... dtype=data.dtype,
... astype='u1')]
>>> a = zarr.array(data, chunks=1000, filters=filters)
>>> a[:]
array(['female', 'male', 'female', ..., 'male', 'male', 'female'],
dtype='<U6')
>>> v = a.view(dtype='u1', filters=[])
>>> v.is_view
True
>>> v[:]
array([1, 2, 1, ..., 2, 2, 1], dtype=uint8)
Views can be used to modify data:
>>> x = v[:]
>>> x.sort()
>>> v[:] = x
>>> v[:]
array([1, 1, 1, ..., 2, 2, 2], dtype=uint8)
>>> a[:]
array(['female', 'female', 'female', ..., 'male', 'male', 'male'],
dtype='<U6')
View as a different dtype with the same item size:
>>> data = np.random.randint(0, 2, size=10000, dtype='u1')
>>> a = zarr.array(data, chunks=1000)
>>> a[:]
array([0, 0, 1, ..., 1, 0, 0], dtype=uint8)
>>> v = a.view(dtype=bool)
>>> v[:]
array([False, False, True, ..., True, False, False])
>>> np.all(a[:].view(dtype=bool) == v[:])
True
An array can be viewed with a dtype with a different item size, however
some care is needed to adjust the shape and chunk shape so that chunk
data is interpreted correctly:
>>> data = np.arange(10000, dtype='u2')
>>> a = zarr.array(data, chunks=1000)
>>> a[:10]
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16)
>>> v = a.view(dtype='u1', shape=20000, chunks=2000)
>>> v[:10]
array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8)
>>> np.all(a[:].view('u1') == v[:])
True
Change fill value for uninitialized chunks:
>>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1')
>>> a[:]
array([-1, -1, -1, ..., -1, -1, -1], dtype=int8)
>>> v = a.view(fill_value=42)
>>> v[:]
array([42, 42, 42, ..., 42, 42, 42], dtype=int8)
Note that resizing or appending to views is not permitted:
>>> a = zarr.empty(10000)
>>> v = a.view()
>>> try:
... v.resize(20000)
... except PermissionError as e:
... print(e)
operation not permitted for views
"""
store = self._store
chunk_store = self._chunk_store
path = self._path
if read_only is None:
read_only = self._read_only
if synchronizer is None:
synchronizer = self._synchronizer
a = Array(store=store, path=path, chunk_store=chunk_store, read_only=read_only,
synchronizer=synchronizer, cache_metadata=True)
a._is_view = True
# allow override of some properties
if dtype is None:
dtype = self._dtype
else:
dtype = np.dtype(dtype)
a._dtype = dtype
if shape is None:
shape = self._shape
else:
shape = normalize_shape(shape)
a._shape = shape
if chunks is not None:
chunks = normalize_chunks(chunks, shape, dtype.itemsize)
a._chunks = chunks
if fill_value is not None:
a._fill_value = fill_value
if filters is not None:
a._filters = filters
return a | [
"def",
"view",
"(",
"self",
",",
"shape",
"=",
"None",
",",
"chunks",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"fill_value",
"=",
"None",
",",
"filters",
"=",
"None",
",",
"read_only",
"=",
"None",
",",
"synchronizer",
"=",
"None",
")",
":",
"s... | Return an array sharing the same data.
Parameters
----------
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape.
dtype : string or dtype, optional
NumPy dtype.
fill_value : object
Default value to use for uninitialized portions of the array.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to
compression.
read_only : bool, optional
True if array should be protected against modification.
synchronizer : object, optional
Array synchronizer.
Notes
-----
WARNING: This is an experimental feature and should be used with care.
There are plenty of ways to generate errors and/or cause data
corruption.
Examples
--------
Bypass filters:
>>> import zarr
>>> import numpy as np
>>> np.random.seed(42)
>>> labels = ['female', 'male']
>>> data = np.random.choice(labels, size=10000)
>>> filters = [zarr.Categorize(labels=labels,
... dtype=data.dtype,
... astype='u1')]
>>> a = zarr.array(data, chunks=1000, filters=filters)
>>> a[:]
array(['female', 'male', 'female', ..., 'male', 'male', 'female'],
dtype='<U6')
>>> v = a.view(dtype='u1', filters=[])
>>> v.is_view
True
>>> v[:]
array([1, 2, 1, ..., 2, 2, 1], dtype=uint8)
Views can be used to modify data:
>>> x = v[:]
>>> x.sort()
>>> v[:] = x
>>> v[:]
array([1, 1, 1, ..., 2, 2, 2], dtype=uint8)
>>> a[:]
array(['female', 'female', 'female', ..., 'male', 'male', 'male'],
dtype='<U6')
View as a different dtype with the same item size:
>>> data = np.random.randint(0, 2, size=10000, dtype='u1')
>>> a = zarr.array(data, chunks=1000)
>>> a[:]
array([0, 0, 1, ..., 1, 0, 0], dtype=uint8)
>>> v = a.view(dtype=bool)
>>> v[:]
array([False, False, True, ..., True, False, False])
>>> np.all(a[:].view(dtype=bool) == v[:])
True
An array can be viewed with a dtype with a different item size, however
some care is needed to adjust the shape and chunk shape so that chunk
data is interpreted correctly:
>>> data = np.arange(10000, dtype='u2')
>>> a = zarr.array(data, chunks=1000)
>>> a[:10]
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16)
>>> v = a.view(dtype='u1', shape=20000, chunks=2000)
>>> v[:10]
array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8)
>>> np.all(a[:].view('u1') == v[:])
True
Change fill value for uninitialized chunks:
>>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1')
>>> a[:]
array([-1, -1, -1, ..., -1, -1, -1], dtype=int8)
>>> v = a.view(fill_value=42)
>>> v[:]
array([42, 42, 42, ..., 42, 42, 42], dtype=int8)
Note that resizing or appending to views is not permitted:
>>> a = zarr.empty(10000)
>>> v = a.view()
>>> try:
... v.resize(20000)
... except PermissionError as e:
... print(e)
operation not permitted for views | [
"Return",
"an",
"array",
"sharing",
"the",
"same",
"data",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L2102-L2242 | train | 215,835 |
zarr-developers/zarr | zarr/core.py | Array.astype | def astype(self, dtype):
"""Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32)
"""
dtype = np.dtype(dtype)
filters = []
if self._filters:
filters.extend(self._filters)
filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype))
return self.view(filters=filters, dtype=dtype, read_only=True) | python | def astype(self, dtype):
"""Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32)
"""
dtype = np.dtype(dtype)
filters = []
if self._filters:
filters.extend(self._filters)
filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype))
return self.view(filters=filters, dtype=dtype, read_only=True) | [
"def",
"astype",
"(",
"self",
",",
"dtype",
")",
":",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"filters",
"=",
"[",
"]",
"if",
"self",
".",
"_filters",
":",
"filters",
".",
"extend",
"(",
"self",
".",
"_filters",
")",
"filters",
".",
... | Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32) | [
"Returns",
"a",
"view",
"that",
"does",
"on",
"the",
"fly",
"type",
"conversion",
"of",
"the",
"underlying",
"data",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L2244-L2302 | train | 215,836 |
zarr-developers/zarr | zarr/creation.py | full | def full(shape, fill_value, **kwargs):
"""Create an array, with `fill_value` being used as the default value for
uninitialized portions of the array.
For parameter definitions see :func:`zarr.creation.create`.
Examples
--------
>>> import zarr
>>> z = zarr.full((10000, 10000), chunks=(1000, 1000), fill_value=42)
>>> z
<zarr.core.Array (10000, 10000) float64>
>>> z[:2, :2]
array([[42., 42.],
[42., 42.]])
"""
return create(shape=shape, fill_value=fill_value, **kwargs) | python | def full(shape, fill_value, **kwargs):
"""Create an array, with `fill_value` being used as the default value for
uninitialized portions of the array.
For parameter definitions see :func:`zarr.creation.create`.
Examples
--------
>>> import zarr
>>> z = zarr.full((10000, 10000), chunks=(1000, 1000), fill_value=42)
>>> z
<zarr.core.Array (10000, 10000) float64>
>>> z[:2, :2]
array([[42., 42.],
[42., 42.]])
"""
return create(shape=shape, fill_value=fill_value, **kwargs) | [
"def",
"full",
"(",
"shape",
",",
"fill_value",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"create",
"(",
"shape",
"=",
"shape",
",",
"fill_value",
"=",
"fill_value",
",",
"*",
"*",
"kwargs",
")"
] | Create an array, with `fill_value` being used as the default value for
uninitialized portions of the array.
For parameter definitions see :func:`zarr.creation.create`.
Examples
--------
>>> import zarr
>>> z = zarr.full((10000, 10000), chunks=(1000, 1000), fill_value=42)
>>> z
<zarr.core.Array (10000, 10000) float64>
>>> z[:2, :2]
array([[42., 42.],
[42., 42.]]) | [
"Create",
"an",
"array",
"with",
"fill_value",
"being",
"used",
"as",
"the",
"default",
"value",
"for",
"uninitialized",
"portions",
"of",
"the",
"array",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/creation.py#L259-L277 | train | 215,837 |
zarr-developers/zarr | zarr/creation.py | array | def array(data, **kwargs):
"""Create an array filled with `data`.
The `data` argument should be a NumPy array or array-like object. For
other parameter definitions see :func:`zarr.creation.create`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(100000000).reshape(10000, 10000)
>>> z = zarr.array(a, chunks=(1000, 1000))
>>> z
<zarr.core.Array (10000, 10000) int64>
"""
# ensure data is array-like
if not hasattr(data, 'shape') or not hasattr(data, 'dtype'):
data = np.asanyarray(data)
# setup dtype
kw_dtype = kwargs.get('dtype', None)
if kw_dtype is None:
kwargs['dtype'] = data.dtype
else:
kwargs['dtype'] = kw_dtype
# setup shape and chunks
data_shape, data_chunks = _get_shape_chunks(data)
kwargs['shape'] = data_shape
kw_chunks = kwargs.get('chunks', None)
if kw_chunks is None:
kwargs['chunks'] = data_chunks
else:
kwargs['chunks'] = kw_chunks
# pop read-only to apply after storing the data
read_only = kwargs.pop('read_only', False)
# instantiate array
z = create(**kwargs)
# fill with data
z[...] = data
# set read_only property afterwards
z.read_only = read_only
return z | python | def array(data, **kwargs):
"""Create an array filled with `data`.
The `data` argument should be a NumPy array or array-like object. For
other parameter definitions see :func:`zarr.creation.create`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(100000000).reshape(10000, 10000)
>>> z = zarr.array(a, chunks=(1000, 1000))
>>> z
<zarr.core.Array (10000, 10000) int64>
"""
# ensure data is array-like
if not hasattr(data, 'shape') or not hasattr(data, 'dtype'):
data = np.asanyarray(data)
# setup dtype
kw_dtype = kwargs.get('dtype', None)
if kw_dtype is None:
kwargs['dtype'] = data.dtype
else:
kwargs['dtype'] = kw_dtype
# setup shape and chunks
data_shape, data_chunks = _get_shape_chunks(data)
kwargs['shape'] = data_shape
kw_chunks = kwargs.get('chunks', None)
if kw_chunks is None:
kwargs['chunks'] = data_chunks
else:
kwargs['chunks'] = kw_chunks
# pop read-only to apply after storing the data
read_only = kwargs.pop('read_only', False)
# instantiate array
z = create(**kwargs)
# fill with data
z[...] = data
# set read_only property afterwards
z.read_only = read_only
return z | [
"def",
"array",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"# ensure data is array-like",
"if",
"not",
"hasattr",
"(",
"data",
",",
"'shape'",
")",
"or",
"not",
"hasattr",
"(",
"data",
",",
"'dtype'",
")",
":",
"data",
"=",
"np",
".",
"asanyarray"... | Create an array filled with `data`.
The `data` argument should be a NumPy array or array-like object. For
other parameter definitions see :func:`zarr.creation.create`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(100000000).reshape(10000, 10000)
>>> z = zarr.array(a, chunks=(1000, 1000))
>>> z
<zarr.core.Array (10000, 10000) int64> | [
"Create",
"an",
"array",
"filled",
"with",
"data",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/creation.py#L300-L349 | train | 215,838 |
zarr-developers/zarr | zarr/creation.py | open_array | def open_array(store=None, mode='a', shape=None, chunks=True, dtype=None,
compressor='default', fill_value=0, order='C', synchronizer=None,
filters=None, cache_metadata=True, cache_attrs=True, path=None,
object_codec=None, chunk_store=None, **kwargs):
"""Open an array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
shape : int or tuple of ints, optional
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object, optional
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
synchronizer : object, optional
Array synchronizer.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
path : string, optional
Array path within store.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
chunk_store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
Returns
-------
z : zarr.core.Array
Examples
--------
>>> import numpy as np
>>> import zarr
>>> z1 = zarr.open_array('data/example.zarr', mode='w', shape=(10000, 10000),
... chunks=(1000, 1000), fill_value=0)
>>> z1[:] = np.arange(100000000).reshape(10000, 10000)
>>> z1
<zarr.core.Array (10000, 10000) float64>
>>> z2 = zarr.open_array('data/example.zarr', mode='r')
>>> z2
<zarr.core.Array (10000, 10000) float64 read-only>
>>> np.all(z1[:] == z2[:])
True
Notes
-----
There is no need to close an array. Data are automatically flushed to the
file system.
"""
# use same mode semantics as h5py
# r : read only, must exist
# r+ : read/write, must exist
# w : create, delete if exists
# w- or x : create, fail if exists
# a : read/write if exists, create otherwise (default)
# handle polymorphic store arg
clobber = mode == 'w'
store = normalize_store_arg(store, clobber=clobber)
if chunk_store is not None:
chunk_store = normalize_store_arg(chunk_store, clobber=clobber)
path = normalize_storage_path(path)
# API compatibility with h5py
compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs)
# ensure fill_value of correct type
if fill_value is not None:
fill_value = np.array(fill_value, dtype=dtype)[()]
# ensure store is initialized
if mode in ['r', 'r+']:
if contains_group(store, path=path):
err_contains_group(path)
elif not contains_array(store, path=path):
err_array_not_found(path)
elif mode == 'w':
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, overwrite=True, path=path,
object_codec=object_codec, chunk_store=chunk_store)
elif mode == 'a':
if contains_group(store, path=path):
err_contains_group(path)
elif not contains_array(store, path=path):
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, path=path,
object_codec=object_codec, chunk_store=chunk_store)
elif mode in ['w-', 'x']:
if contains_group(store, path=path):
err_contains_group(path)
elif contains_array(store, path=path):
err_contains_array(path)
else:
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, path=path,
object_codec=object_codec, chunk_store=chunk_store)
# determine read only status
read_only = mode == 'r'
# instantiate array
z = Array(store, read_only=read_only, synchronizer=synchronizer,
cache_metadata=cache_metadata, cache_attrs=cache_attrs, path=path,
chunk_store=chunk_store)
return z | python | def open_array(store=None, mode='a', shape=None, chunks=True, dtype=None,
compressor='default', fill_value=0, order='C', synchronizer=None,
filters=None, cache_metadata=True, cache_attrs=True, path=None,
object_codec=None, chunk_store=None, **kwargs):
"""Open an array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
shape : int or tuple of ints, optional
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object, optional
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
synchronizer : object, optional
Array synchronizer.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
path : string, optional
Array path within store.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
chunk_store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
Returns
-------
z : zarr.core.Array
Examples
--------
>>> import numpy as np
>>> import zarr
>>> z1 = zarr.open_array('data/example.zarr', mode='w', shape=(10000, 10000),
... chunks=(1000, 1000), fill_value=0)
>>> z1[:] = np.arange(100000000).reshape(10000, 10000)
>>> z1
<zarr.core.Array (10000, 10000) float64>
>>> z2 = zarr.open_array('data/example.zarr', mode='r')
>>> z2
<zarr.core.Array (10000, 10000) float64 read-only>
>>> np.all(z1[:] == z2[:])
True
Notes
-----
There is no need to close an array. Data are automatically flushed to the
file system.
"""
# use same mode semantics as h5py
# r : read only, must exist
# r+ : read/write, must exist
# w : create, delete if exists
# w- or x : create, fail if exists
# a : read/write if exists, create otherwise (default)
# handle polymorphic store arg
clobber = mode == 'w'
store = normalize_store_arg(store, clobber=clobber)
if chunk_store is not None:
chunk_store = normalize_store_arg(chunk_store, clobber=clobber)
path = normalize_storage_path(path)
# API compatibility with h5py
compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs)
# ensure fill_value of correct type
if fill_value is not None:
fill_value = np.array(fill_value, dtype=dtype)[()]
# ensure store is initialized
if mode in ['r', 'r+']:
if contains_group(store, path=path):
err_contains_group(path)
elif not contains_array(store, path=path):
err_array_not_found(path)
elif mode == 'w':
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, overwrite=True, path=path,
object_codec=object_codec, chunk_store=chunk_store)
elif mode == 'a':
if contains_group(store, path=path):
err_contains_group(path)
elif not contains_array(store, path=path):
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, path=path,
object_codec=object_codec, chunk_store=chunk_store)
elif mode in ['w-', 'x']:
if contains_group(store, path=path):
err_contains_group(path)
elif contains_array(store, path=path):
err_contains_array(path)
else:
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, path=path,
object_codec=object_codec, chunk_store=chunk_store)
# determine read only status
read_only = mode == 'r'
# instantiate array
z = Array(store, read_only=read_only, synchronizer=synchronizer,
cache_metadata=cache_metadata, cache_attrs=cache_attrs, path=path,
chunk_store=chunk_store)
return z | [
"def",
"open_array",
"(",
"store",
"=",
"None",
",",
"mode",
"=",
"'a'",
",",
"shape",
"=",
"None",
",",
"chunks",
"=",
"True",
",",
"dtype",
"=",
"None",
",",
"compressor",
"=",
"'default'",
",",
"fill_value",
"=",
"0",
",",
"order",
"=",
"'C'",
"... | Open an array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
shape : int or tuple of ints, optional
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object, optional
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
synchronizer : object, optional
Array synchronizer.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
path : string, optional
Array path within store.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
chunk_store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
Returns
-------
z : zarr.core.Array
Examples
--------
>>> import numpy as np
>>> import zarr
>>> z1 = zarr.open_array('data/example.zarr', mode='w', shape=(10000, 10000),
... chunks=(1000, 1000), fill_value=0)
>>> z1[:] = np.arange(100000000).reshape(10000, 10000)
>>> z1
<zarr.core.Array (10000, 10000) float64>
>>> z2 = zarr.open_array('data/example.zarr', mode='r')
>>> z2
<zarr.core.Array (10000, 10000) float64 read-only>
>>> np.all(z1[:] == z2[:])
True
Notes
-----
There is no need to close an array. Data are automatically flushed to the
file system. | [
"Open",
"an",
"array",
"using",
"file",
"-",
"mode",
"-",
"like",
"semantics",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/creation.py#L352-L489 | train | 215,839 |
zarr-developers/zarr | zarr/creation.py | full_like | def full_like(a, **kwargs):
"""Create a filled array like `a`."""
_like_args(a, kwargs)
if isinstance(a, Array):
kwargs.setdefault('fill_value', a.fill_value)
return full(**kwargs) | python | def full_like(a, **kwargs):
"""Create a filled array like `a`."""
_like_args(a, kwargs)
if isinstance(a, Array):
kwargs.setdefault('fill_value', a.fill_value)
return full(**kwargs) | [
"def",
"full_like",
"(",
"a",
",",
"*",
"*",
"kwargs",
")",
":",
"_like_args",
"(",
"a",
",",
"kwargs",
")",
"if",
"isinstance",
"(",
"a",
",",
"Array",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'fill_value'",
",",
"a",
".",
"fill_value",
")",
"... | Create a filled array like `a`. | [
"Create",
"a",
"filled",
"array",
"like",
"a",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/creation.py#L530-L535 | train | 215,840 |
zarr-developers/zarr | zarr/creation.py | open_like | def open_like(a, path, **kwargs):
"""Open a persistent array like `a`."""
_like_args(a, kwargs)
if isinstance(a, Array):
kwargs.setdefault('fill_value', a.fill_value)
return open_array(path, **kwargs) | python | def open_like(a, path, **kwargs):
"""Open a persistent array like `a`."""
_like_args(a, kwargs)
if isinstance(a, Array):
kwargs.setdefault('fill_value', a.fill_value)
return open_array(path, **kwargs) | [
"def",
"open_like",
"(",
"a",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"_like_args",
"(",
"a",
",",
"kwargs",
")",
"if",
"isinstance",
"(",
"a",
",",
"Array",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'fill_value'",
",",
"a",
".",
"fill_v... | Open a persistent array like `a`. | [
"Open",
"a",
"persistent",
"array",
"like",
"a",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/creation.py#L538-L543 | train | 215,841 |
zarr-developers/zarr | zarr/convenience.py | open | def open(store=None, mode='a', **kwargs):
"""Convenience function to open a group or array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group`
Array or group, depending on what exists in the given store.
See Also
--------
zarr.creation.open_array, zarr.hierarchy.open_group
Examples
--------
Storing data in a directory 'data/example.zarr' on the local file system::
>>> import zarr
>>> store = 'data/example.zarr'
>>> zw = zarr.open(store, mode='w', shape=100, dtype='i4') # open new array
>>> zw
<zarr.core.Array (100,) int32>
>>> za = zarr.open(store, mode='a') # open existing array for reading and writing
>>> za
<zarr.core.Array (100,) int32>
>>> zr = zarr.open(store, mode='r') # open existing array read-only
>>> zr
<zarr.core.Array (100,) int32 read-only>
>>> gw = zarr.open(store, mode='w') # open new group, overwriting previous data
>>> gw
<zarr.hierarchy.Group '/'>
>>> ga = zarr.open(store, mode='a') # open existing group for reading and writing
>>> ga
<zarr.hierarchy.Group '/'>
>>> gr = zarr.open(store, mode='r') # open existing group read-only
>>> gr
<zarr.hierarchy.Group '/' read-only>
"""
path = kwargs.get('path', None)
# handle polymorphic store arg
clobber = mode == 'w'
store = normalize_store_arg(store, clobber=clobber)
path = normalize_storage_path(path)
if mode in {'w', 'w-', 'x'}:
if 'shape' in kwargs:
return open_array(store, mode=mode, **kwargs)
else:
return open_group(store, mode=mode, **kwargs)
elif mode == 'a':
if contains_array(store, path):
return open_array(store, mode=mode, **kwargs)
elif contains_group(store, path):
return open_group(store, mode=mode, **kwargs)
elif 'shape' in kwargs:
return open_array(store, mode=mode, **kwargs)
else:
return open_group(store, mode=mode, **kwargs)
else:
if contains_array(store, path):
return open_array(store, mode=mode, **kwargs)
elif contains_group(store, path):
return open_group(store, mode=mode, **kwargs)
else:
err_path_not_found(path) | python | def open(store=None, mode='a', **kwargs):
"""Convenience function to open a group or array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group`
Array or group, depending on what exists in the given store.
See Also
--------
zarr.creation.open_array, zarr.hierarchy.open_group
Examples
--------
Storing data in a directory 'data/example.zarr' on the local file system::
>>> import zarr
>>> store = 'data/example.zarr'
>>> zw = zarr.open(store, mode='w', shape=100, dtype='i4') # open new array
>>> zw
<zarr.core.Array (100,) int32>
>>> za = zarr.open(store, mode='a') # open existing array for reading and writing
>>> za
<zarr.core.Array (100,) int32>
>>> zr = zarr.open(store, mode='r') # open existing array read-only
>>> zr
<zarr.core.Array (100,) int32 read-only>
>>> gw = zarr.open(store, mode='w') # open new group, overwriting previous data
>>> gw
<zarr.hierarchy.Group '/'>
>>> ga = zarr.open(store, mode='a') # open existing group for reading and writing
>>> ga
<zarr.hierarchy.Group '/'>
>>> gr = zarr.open(store, mode='r') # open existing group read-only
>>> gr
<zarr.hierarchy.Group '/' read-only>
"""
path = kwargs.get('path', None)
# handle polymorphic store arg
clobber = mode == 'w'
store = normalize_store_arg(store, clobber=clobber)
path = normalize_storage_path(path)
if mode in {'w', 'w-', 'x'}:
if 'shape' in kwargs:
return open_array(store, mode=mode, **kwargs)
else:
return open_group(store, mode=mode, **kwargs)
elif mode == 'a':
if contains_array(store, path):
return open_array(store, mode=mode, **kwargs)
elif contains_group(store, path):
return open_group(store, mode=mode, **kwargs)
elif 'shape' in kwargs:
return open_array(store, mode=mode, **kwargs)
else:
return open_group(store, mode=mode, **kwargs)
else:
if contains_array(store, path):
return open_array(store, mode=mode, **kwargs)
elif contains_group(store, path):
return open_group(store, mode=mode, **kwargs)
else:
err_path_not_found(path) | [
"def",
"open",
"(",
"store",
"=",
"None",
",",
"mode",
"=",
"'a'",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"kwargs",
".",
"get",
"(",
"'path'",
",",
"None",
")",
"# handle polymorphic store arg",
"clobber",
"=",
"mode",
"==",
"'w'",
"store",
... | Convenience function to open a group or array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group`
Array or group, depending on what exists in the given store.
See Also
--------
zarr.creation.open_array, zarr.hierarchy.open_group
Examples
--------
Storing data in a directory 'data/example.zarr' on the local file system::
>>> import zarr
>>> store = 'data/example.zarr'
>>> zw = zarr.open(store, mode='w', shape=100, dtype='i4') # open new array
>>> zw
<zarr.core.Array (100,) int32>
>>> za = zarr.open(store, mode='a') # open existing array for reading and writing
>>> za
<zarr.core.Array (100,) int32>
>>> zr = zarr.open(store, mode='r') # open existing array read-only
>>> zr
<zarr.core.Array (100,) int32 read-only>
>>> gw = zarr.open(store, mode='w') # open new group, overwriting previous data
>>> gw
<zarr.hierarchy.Group '/'>
>>> ga = zarr.open(store, mode='a') # open existing group for reading and writing
>>> ga
<zarr.hierarchy.Group '/'>
>>> gr = zarr.open(store, mode='r') # open existing group read-only
>>> gr
<zarr.hierarchy.Group '/' read-only> | [
"Convenience",
"function",
"to",
"open",
"a",
"group",
"or",
"array",
"using",
"file",
"-",
"mode",
"-",
"like",
"semantics",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L21-L102 | train | 215,842 |
zarr-developers/zarr | zarr/convenience.py | save | def save(store, *args, **kwargs):
"""Convenience function to save an array or group of arrays to the local file system.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
args : ndarray
NumPy arrays with data to save.
kwargs
NumPy arrays with data to save.
Examples
--------
Save an array to a directory on the file system (uses a :class:`DirectoryStore`)::
>>> import zarr
>>> import numpy as np
>>> arr = np.arange(10000)
>>> zarr.save('data/example.zarr', arr)
>>> zarr.load('data/example.zarr')
array([ 0, 1, 2, ..., 9997, 9998, 9999])
Save an array to a Zip file (uses a :class:`ZipStore`)::
>>> zarr.save('data/example.zip', arr)
>>> zarr.load('data/example.zip')
array([ 0, 1, 2, ..., 9997, 9998, 9999])
Save several arrays to a directory on the file system (uses a
:class:`DirectoryStore` and stores arrays in a group)::
>>> import zarr
>>> import numpy as np
>>> a1 = np.arange(10000)
>>> a2 = np.arange(10000, 0, -1)
>>> zarr.save('data/example.zarr', a1, a2)
>>> loader = zarr.load('data/example.zarr')
>>> loader
<LazyLoader: arr_0, arr_1>
>>> loader['arr_0']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['arr_1']
array([10000, 9999, 9998, ..., 3, 2, 1])
Save several arrays using named keyword arguments::
>>> zarr.save('data/example.zarr', foo=a1, bar=a2)
>>> loader = zarr.load('data/example.zarr')
>>> loader
<LazyLoader: bar, foo>
>>> loader['foo']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['bar']
array([10000, 9999, 9998, ..., 3, 2, 1])
Store several arrays in a single zip file (uses a :class:`ZipStore`)::
>>> zarr.save('data/example.zip', foo=a1, bar=a2)
>>> loader = zarr.load('data/example.zip')
>>> loader
<LazyLoader: bar, foo>
>>> loader['foo']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['bar']
array([10000, 9999, 9998, ..., 3, 2, 1])
See Also
--------
save_array, save_group
"""
if len(args) == 0 and len(kwargs) == 0:
raise ValueError('at least one array must be provided')
if len(args) == 1 and len(kwargs) == 0:
save_array(store, args[0])
else:
save_group(store, *args, **kwargs) | python | def save(store, *args, **kwargs):
"""Convenience function to save an array or group of arrays to the local file system.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
args : ndarray
NumPy arrays with data to save.
kwargs
NumPy arrays with data to save.
Examples
--------
Save an array to a directory on the file system (uses a :class:`DirectoryStore`)::
>>> import zarr
>>> import numpy as np
>>> arr = np.arange(10000)
>>> zarr.save('data/example.zarr', arr)
>>> zarr.load('data/example.zarr')
array([ 0, 1, 2, ..., 9997, 9998, 9999])
Save an array to a Zip file (uses a :class:`ZipStore`)::
>>> zarr.save('data/example.zip', arr)
>>> zarr.load('data/example.zip')
array([ 0, 1, 2, ..., 9997, 9998, 9999])
Save several arrays to a directory on the file system (uses a
:class:`DirectoryStore` and stores arrays in a group)::
>>> import zarr
>>> import numpy as np
>>> a1 = np.arange(10000)
>>> a2 = np.arange(10000, 0, -1)
>>> zarr.save('data/example.zarr', a1, a2)
>>> loader = zarr.load('data/example.zarr')
>>> loader
<LazyLoader: arr_0, arr_1>
>>> loader['arr_0']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['arr_1']
array([10000, 9999, 9998, ..., 3, 2, 1])
Save several arrays using named keyword arguments::
>>> zarr.save('data/example.zarr', foo=a1, bar=a2)
>>> loader = zarr.load('data/example.zarr')
>>> loader
<LazyLoader: bar, foo>
>>> loader['foo']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['bar']
array([10000, 9999, 9998, ..., 3, 2, 1])
Store several arrays in a single zip file (uses a :class:`ZipStore`)::
>>> zarr.save('data/example.zip', foo=a1, bar=a2)
>>> loader = zarr.load('data/example.zip')
>>> loader
<LazyLoader: bar, foo>
>>> loader['foo']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['bar']
array([10000, 9999, 9998, ..., 3, 2, 1])
See Also
--------
save_array, save_group
"""
if len(args) == 0 and len(kwargs) == 0:
raise ValueError('at least one array must be provided')
if len(args) == 1 and len(kwargs) == 0:
save_array(store, args[0])
else:
save_group(store, *args, **kwargs) | [
"def",
"save",
"(",
"store",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"and",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'at least one array must be provided'",
")",
"i... | Convenience function to save an array or group of arrays to the local file system.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
args : ndarray
NumPy arrays with data to save.
kwargs
NumPy arrays with data to save.
Examples
--------
Save an array to a directory on the file system (uses a :class:`DirectoryStore`)::
>>> import zarr
>>> import numpy as np
>>> arr = np.arange(10000)
>>> zarr.save('data/example.zarr', arr)
>>> zarr.load('data/example.zarr')
array([ 0, 1, 2, ..., 9997, 9998, 9999])
Save an array to a Zip file (uses a :class:`ZipStore`)::
>>> zarr.save('data/example.zip', arr)
>>> zarr.load('data/example.zip')
array([ 0, 1, 2, ..., 9997, 9998, 9999])
Save several arrays to a directory on the file system (uses a
:class:`DirectoryStore` and stores arrays in a group)::
>>> import zarr
>>> import numpy as np
>>> a1 = np.arange(10000)
>>> a2 = np.arange(10000, 0, -1)
>>> zarr.save('data/example.zarr', a1, a2)
>>> loader = zarr.load('data/example.zarr')
>>> loader
<LazyLoader: arr_0, arr_1>
>>> loader['arr_0']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['arr_1']
array([10000, 9999, 9998, ..., 3, 2, 1])
Save several arrays using named keyword arguments::
>>> zarr.save('data/example.zarr', foo=a1, bar=a2)
>>> loader = zarr.load('data/example.zarr')
>>> loader
<LazyLoader: bar, foo>
>>> loader['foo']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['bar']
array([10000, 9999, 9998, ..., 3, 2, 1])
Store several arrays in a single zip file (uses a :class:`ZipStore`)::
>>> zarr.save('data/example.zip', foo=a1, bar=a2)
>>> loader = zarr.load('data/example.zip')
>>> loader
<LazyLoader: bar, foo>
>>> loader['foo']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['bar']
array([10000, 9999, 9998, ..., 3, 2, 1])
See Also
--------
save_array, save_group | [
"Convenience",
"function",
"to",
"save",
"an",
"array",
"or",
"group",
"of",
"arrays",
"to",
"the",
"local",
"file",
"system",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L222-L299 | train | 215,843 |
zarr-developers/zarr | zarr/convenience.py | load | def load(store):
"""Load data from an array or group into memory.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
Returns
-------
out
If the store contains an array, out will be a numpy array. If the store contains
a group, out will be a dict-like object where keys are array names and values
are numpy arrays.
See Also
--------
save, savez
Notes
-----
If loading data from a group of arrays, data will not be immediately loaded into
memory. Rather, arrays will be loaded into memory as they are requested.
"""
# handle polymorphic store arg
store = normalize_store_arg(store)
if contains_array(store, path=None):
return Array(store=store, path=None)[...]
elif contains_group(store, path=None):
grp = Group(store=store, path=None)
return LazyLoader(grp) | python | def load(store):
"""Load data from an array or group into memory.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
Returns
-------
out
If the store contains an array, out will be a numpy array. If the store contains
a group, out will be a dict-like object where keys are array names and values
are numpy arrays.
See Also
--------
save, savez
Notes
-----
If loading data from a group of arrays, data will not be immediately loaded into
memory. Rather, arrays will be loaded into memory as they are requested.
"""
# handle polymorphic store arg
store = normalize_store_arg(store)
if contains_array(store, path=None):
return Array(store=store, path=None)[...]
elif contains_group(store, path=None):
grp = Group(store=store, path=None)
return LazyLoader(grp) | [
"def",
"load",
"(",
"store",
")",
":",
"# handle polymorphic store arg",
"store",
"=",
"normalize_store_arg",
"(",
"store",
")",
"if",
"contains_array",
"(",
"store",
",",
"path",
"=",
"None",
")",
":",
"return",
"Array",
"(",
"store",
"=",
"store",
",",
"... | Load data from an array or group into memory.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
Returns
-------
out
If the store contains an array, out will be a numpy array. If the store contains
a group, out will be a dict-like object where keys are array names and values
are numpy arrays.
See Also
--------
save, savez
Notes
-----
If loading data from a group of arrays, data will not be immediately loaded into
memory. Rather, arrays will be loaded into memory as they are requested. | [
"Load",
"data",
"from",
"an",
"array",
"or",
"group",
"into",
"memory",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L332-L363 | train | 215,844 |
zarr-developers/zarr | zarr/convenience.py | copy | def copy(source, dest, name=None, shallow=False, without_attrs=False, log=None,
if_exists='raise', dry_run=False, **create_kws):
"""Copy the `source` array or group into the `dest` group.
Parameters
----------
source : group or array/dataset
A zarr group or array, or an h5py group or dataset.
dest : group
A zarr or h5py group.
name : str, optional
Name to copy the object to.
shallow : bool, optional
If True, only copy immediate children of `source`.
without_attrs : bool, optional
Do not copy user attributes.
log : callable, file path or file-like object, optional
If provided, will be used to log progress information.
if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional
How to handle arrays that already exist in the destination group. If
'raise' then a CopyError is raised on the first array already present
in the destination group. If 'replace' then any array will be
replaced in the destination. If 'skip' then any existing arrays will
not be copied. If 'skip_initialized' then any existing arrays with
all chunks initialized will not be copied (not available when copying to
h5py).
dry_run : bool, optional
If True, don't actually copy anything, just log what would have
happened.
**create_kws
Passed through to the create_dataset method when copying an array/dataset.
Returns
-------
n_copied : int
Number of items copied.
n_skipped : int
Number of items skipped.
n_bytes_copied : int
Number of bytes of data that were actually copied.
Examples
--------
Here's an example of copying a group named 'foo' from an HDF5 file to a
Zarr group::
>>> import h5py
>>> import zarr
>>> import numpy as np
>>> source = h5py.File('data/example.h5', mode='w')
>>> foo = source.create_group('foo')
>>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,))
>>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,))
>>> zarr.tree(source)
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> dest = zarr.group()
>>> from sys import stdout
>>> zarr.copy(source['foo'], dest, log=stdout)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
all done: 3 copied, 0 skipped, 800 bytes copied
(3, 0, 800)
>>> dest.tree() # N.B., no spam
/
└── foo
└── bar
└── baz (100,) int64
>>> source.close()
The ``if_exists`` parameter provides options for how to handle pre-existing data in
the destination. Here are some examples of these options, also using
``dry_run=True`` to find out what would happen without actually copying anything::
>>> source = zarr.group()
>>> dest = zarr.group()
>>> baz = source.create_dataset('foo/bar/baz', data=np.arange(100))
>>> spam = source.create_dataset('foo/spam', data=np.arange(1000))
>>> existing_spam = dest.create_dataset('foo/spam', data=np.arange(1000))
>>> from sys import stdout
>>> try:
... zarr.copy(source['foo'], dest, log=stdout, dry_run=True)
... except zarr.CopyError as e:
... print(e)
...
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
an object 'spam' already exists in destination '/foo'
>>> zarr.copy(source['foo'], dest, log=stdout, if_exists='replace', dry_run=True)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
copy /foo/spam (1000,) int64
dry run: 4 copied, 0 skipped
(4, 0, 0)
>>> zarr.copy(source['foo'], dest, log=stdout, if_exists='skip', dry_run=True)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
skip /foo/spam (1000,) int64
dry run: 3 copied, 1 skipped
(3, 1, 0)
Notes
-----
Please note that this is an experimental feature. The behaviour of this
function is still evolving and the default behaviour and/or parameters may change
in future versions.
"""
# value checks
_check_dest_is_group(dest)
# setup logging
with _LogWriter(log) as log:
# do the copying
n_copied, n_skipped, n_bytes_copied = _copy(
log, source, dest, name=name, root=True, shallow=shallow,
without_attrs=without_attrs, if_exists=if_exists, dry_run=dry_run,
**create_kws
)
# log a final message with a summary of what happened
_log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied)
return n_copied, n_skipped, n_bytes_copied | python | def copy(source, dest, name=None, shallow=False, without_attrs=False, log=None,
if_exists='raise', dry_run=False, **create_kws):
"""Copy the `source` array or group into the `dest` group.
Parameters
----------
source : group or array/dataset
A zarr group or array, or an h5py group or dataset.
dest : group
A zarr or h5py group.
name : str, optional
Name to copy the object to.
shallow : bool, optional
If True, only copy immediate children of `source`.
without_attrs : bool, optional
Do not copy user attributes.
log : callable, file path or file-like object, optional
If provided, will be used to log progress information.
if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional
How to handle arrays that already exist in the destination group. If
'raise' then a CopyError is raised on the first array already present
in the destination group. If 'replace' then any array will be
replaced in the destination. If 'skip' then any existing arrays will
not be copied. If 'skip_initialized' then any existing arrays with
all chunks initialized will not be copied (not available when copying to
h5py).
dry_run : bool, optional
If True, don't actually copy anything, just log what would have
happened.
**create_kws
Passed through to the create_dataset method when copying an array/dataset.
Returns
-------
n_copied : int
Number of items copied.
n_skipped : int
Number of items skipped.
n_bytes_copied : int
Number of bytes of data that were actually copied.
Examples
--------
Here's an example of copying a group named 'foo' from an HDF5 file to a
Zarr group::
>>> import h5py
>>> import zarr
>>> import numpy as np
>>> source = h5py.File('data/example.h5', mode='w')
>>> foo = source.create_group('foo')
>>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,))
>>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,))
>>> zarr.tree(source)
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> dest = zarr.group()
>>> from sys import stdout
>>> zarr.copy(source['foo'], dest, log=stdout)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
all done: 3 copied, 0 skipped, 800 bytes copied
(3, 0, 800)
>>> dest.tree() # N.B., no spam
/
└── foo
└── bar
└── baz (100,) int64
>>> source.close()
The ``if_exists`` parameter provides options for how to handle pre-existing data in
the destination. Here are some examples of these options, also using
``dry_run=True`` to find out what would happen without actually copying anything::
>>> source = zarr.group()
>>> dest = zarr.group()
>>> baz = source.create_dataset('foo/bar/baz', data=np.arange(100))
>>> spam = source.create_dataset('foo/spam', data=np.arange(1000))
>>> existing_spam = dest.create_dataset('foo/spam', data=np.arange(1000))
>>> from sys import stdout
>>> try:
... zarr.copy(source['foo'], dest, log=stdout, dry_run=True)
... except zarr.CopyError as e:
... print(e)
...
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
an object 'spam' already exists in destination '/foo'
>>> zarr.copy(source['foo'], dest, log=stdout, if_exists='replace', dry_run=True)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
copy /foo/spam (1000,) int64
dry run: 4 copied, 0 skipped
(4, 0, 0)
>>> zarr.copy(source['foo'], dest, log=stdout, if_exists='skip', dry_run=True)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
skip /foo/spam (1000,) int64
dry run: 3 copied, 1 skipped
(3, 1, 0)
Notes
-----
Please note that this is an experimental feature. The behaviour of this
function is still evolving and the default behaviour and/or parameters may change
in future versions.
"""
# value checks
_check_dest_is_group(dest)
# setup logging
with _LogWriter(log) as log:
# do the copying
n_copied, n_skipped, n_bytes_copied = _copy(
log, source, dest, name=name, root=True, shallow=shallow,
without_attrs=without_attrs, if_exists=if_exists, dry_run=dry_run,
**create_kws
)
# log a final message with a summary of what happened
_log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied)
return n_copied, n_skipped, n_bytes_copied | [
"def",
"copy",
"(",
"source",
",",
"dest",
",",
"name",
"=",
"None",
",",
"shallow",
"=",
"False",
",",
"without_attrs",
"=",
"False",
",",
"log",
"=",
"None",
",",
"if_exists",
"=",
"'raise'",
",",
"dry_run",
"=",
"False",
",",
"*",
"*",
"create_kws... | Copy the `source` array or group into the `dest` group.
Parameters
----------
source : group or array/dataset
A zarr group or array, or an h5py group or dataset.
dest : group
A zarr or h5py group.
name : str, optional
Name to copy the object to.
shallow : bool, optional
If True, only copy immediate children of `source`.
without_attrs : bool, optional
Do not copy user attributes.
log : callable, file path or file-like object, optional
If provided, will be used to log progress information.
if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional
How to handle arrays that already exist in the destination group. If
'raise' then a CopyError is raised on the first array already present
in the destination group. If 'replace' then any array will be
replaced in the destination. If 'skip' then any existing arrays will
not be copied. If 'skip_initialized' then any existing arrays with
all chunks initialized will not be copied (not available when copying to
h5py).
dry_run : bool, optional
If True, don't actually copy anything, just log what would have
happened.
**create_kws
Passed through to the create_dataset method when copying an array/dataset.
Returns
-------
n_copied : int
Number of items copied.
n_skipped : int
Number of items skipped.
n_bytes_copied : int
Number of bytes of data that were actually copied.
Examples
--------
Here's an example of copying a group named 'foo' from an HDF5 file to a
Zarr group::
>>> import h5py
>>> import zarr
>>> import numpy as np
>>> source = h5py.File('data/example.h5', mode='w')
>>> foo = source.create_group('foo')
>>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,))
>>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,))
>>> zarr.tree(source)
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> dest = zarr.group()
>>> from sys import stdout
>>> zarr.copy(source['foo'], dest, log=stdout)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
all done: 3 copied, 0 skipped, 800 bytes copied
(3, 0, 800)
>>> dest.tree() # N.B., no spam
/
└── foo
└── bar
└── baz (100,) int64
>>> source.close()
The ``if_exists`` parameter provides options for how to handle pre-existing data in
the destination. Here are some examples of these options, also using
``dry_run=True`` to find out what would happen without actually copying anything::
>>> source = zarr.group()
>>> dest = zarr.group()
>>> baz = source.create_dataset('foo/bar/baz', data=np.arange(100))
>>> spam = source.create_dataset('foo/spam', data=np.arange(1000))
>>> existing_spam = dest.create_dataset('foo/spam', data=np.arange(1000))
>>> from sys import stdout
>>> try:
... zarr.copy(source['foo'], dest, log=stdout, dry_run=True)
... except zarr.CopyError as e:
... print(e)
...
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
an object 'spam' already exists in destination '/foo'
>>> zarr.copy(source['foo'], dest, log=stdout, if_exists='replace', dry_run=True)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
copy /foo/spam (1000,) int64
dry run: 4 copied, 0 skipped
(4, 0, 0)
>>> zarr.copy(source['foo'], dest, log=stdout, if_exists='skip', dry_run=True)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
skip /foo/spam (1000,) int64
dry run: 3 copied, 1 skipped
(3, 1, 0)
Notes
-----
Please note that this is an experimental feature. The behaviour of this
function is still evolving and the default behaviour and/or parameters may change
in future versions. | [
"Copy",
"the",
"source",
"array",
"or",
"group",
"into",
"the",
"dest",
"group",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L664-L796 | train | 215,845 |
zarr-developers/zarr | zarr/convenience.py | copy_all | def copy_all(source, dest, shallow=False, without_attrs=False, log=None,
if_exists='raise', dry_run=False, **create_kws):
"""Copy all children of the `source` group into the `dest` group.
Parameters
----------
source : group or array/dataset
A zarr group or array, or an h5py group or dataset.
dest : group
A zarr or h5py group.
shallow : bool, optional
If True, only copy immediate children of `source`.
without_attrs : bool, optional
Do not copy user attributes.
log : callable, file path or file-like object, optional
If provided, will be used to log progress information.
if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional
How to handle arrays that already exist in the destination group. If
'raise' then a CopyError is raised on the first array already present
in the destination group. If 'replace' then any array will be
replaced in the destination. If 'skip' then any existing arrays will
not be copied. If 'skip_initialized' then any existing arrays with
all chunks initialized will not be copied (not available when copying to
h5py).
dry_run : bool, optional
If True, don't actually copy anything, just log what would have
happened.
**create_kws
Passed through to the create_dataset method when copying an
array/dataset.
Returns
-------
n_copied : int
Number of items copied.
n_skipped : int
Number of items skipped.
n_bytes_copied : int
Number of bytes of data that were actually copied.
Examples
--------
>>> import h5py
>>> import zarr
>>> import numpy as np
>>> source = h5py.File('data/example.h5', mode='w')
>>> foo = source.create_group('foo')
>>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,))
>>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,))
>>> zarr.tree(source)
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> dest = zarr.group()
>>> import sys
>>> zarr.copy_all(source, dest, log=sys.stdout)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
copy /spam (100,) int64
all done: 4 copied, 0 skipped, 1,600 bytes copied
(4, 0, 1600)
>>> dest.tree()
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> source.close()
Notes
-----
Please note that this is an experimental feature. The behaviour of this
function is still evolving and the default behaviour and/or parameters may change
in future versions.
"""
# value checks
_check_dest_is_group(dest)
# setup counting variables
n_copied = n_skipped = n_bytes_copied = 0
# setup logging
with _LogWriter(log) as log:
for k in source.keys():
c, s, b = _copy(
log, source[k], dest, name=k, root=False, shallow=shallow,
without_attrs=without_attrs, if_exists=if_exists,
dry_run=dry_run, **create_kws)
n_copied += c
n_skipped += s
n_bytes_copied += b
# log a final message with a summary of what happened
_log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied)
return n_copied, n_skipped, n_bytes_copied | python | def copy_all(source, dest, shallow=False, without_attrs=False, log=None,
if_exists='raise', dry_run=False, **create_kws):
"""Copy all children of the `source` group into the `dest` group.
Parameters
----------
source : group or array/dataset
A zarr group or array, or an h5py group or dataset.
dest : group
A zarr or h5py group.
shallow : bool, optional
If True, only copy immediate children of `source`.
without_attrs : bool, optional
Do not copy user attributes.
log : callable, file path or file-like object, optional
If provided, will be used to log progress information.
if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional
How to handle arrays that already exist in the destination group. If
'raise' then a CopyError is raised on the first array already present
in the destination group. If 'replace' then any array will be
replaced in the destination. If 'skip' then any existing arrays will
not be copied. If 'skip_initialized' then any existing arrays with
all chunks initialized will not be copied (not available when copying to
h5py).
dry_run : bool, optional
If True, don't actually copy anything, just log what would have
happened.
**create_kws
Passed through to the create_dataset method when copying an
array/dataset.
Returns
-------
n_copied : int
Number of items copied.
n_skipped : int
Number of items skipped.
n_bytes_copied : int
Number of bytes of data that were actually copied.
Examples
--------
>>> import h5py
>>> import zarr
>>> import numpy as np
>>> source = h5py.File('data/example.h5', mode='w')
>>> foo = source.create_group('foo')
>>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,))
>>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,))
>>> zarr.tree(source)
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> dest = zarr.group()
>>> import sys
>>> zarr.copy_all(source, dest, log=sys.stdout)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
copy /spam (100,) int64
all done: 4 copied, 0 skipped, 1,600 bytes copied
(4, 0, 1600)
>>> dest.tree()
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> source.close()
Notes
-----
Please note that this is an experimental feature. The behaviour of this
function is still evolving and the default behaviour and/or parameters may change
in future versions.
"""
# value checks
_check_dest_is_group(dest)
# setup counting variables
n_copied = n_skipped = n_bytes_copied = 0
# setup logging
with _LogWriter(log) as log:
for k in source.keys():
c, s, b = _copy(
log, source[k], dest, name=k, root=False, shallow=shallow,
without_attrs=without_attrs, if_exists=if_exists,
dry_run=dry_run, **create_kws)
n_copied += c
n_skipped += s
n_bytes_copied += b
# log a final message with a summary of what happened
_log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied)
return n_copied, n_skipped, n_bytes_copied | [
"def",
"copy_all",
"(",
"source",
",",
"dest",
",",
"shallow",
"=",
"False",
",",
"without_attrs",
"=",
"False",
",",
"log",
"=",
"None",
",",
"if_exists",
"=",
"'raise'",
",",
"dry_run",
"=",
"False",
",",
"*",
"*",
"create_kws",
")",
":",
"# value ch... | Copy all children of the `source` group into the `dest` group.
Parameters
----------
source : group or array/dataset
A zarr group or array, or an h5py group or dataset.
dest : group
A zarr or h5py group.
shallow : bool, optional
If True, only copy immediate children of `source`.
without_attrs : bool, optional
Do not copy user attributes.
log : callable, file path or file-like object, optional
If provided, will be used to log progress information.
if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional
How to handle arrays that already exist in the destination group. If
'raise' then a CopyError is raised on the first array already present
in the destination group. If 'replace' then any array will be
replaced in the destination. If 'skip' then any existing arrays will
not be copied. If 'skip_initialized' then any existing arrays with
all chunks initialized will not be copied (not available when copying to
h5py).
dry_run : bool, optional
If True, don't actually copy anything, just log what would have
happened.
**create_kws
Passed through to the create_dataset method when copying an
array/dataset.
Returns
-------
n_copied : int
Number of items copied.
n_skipped : int
Number of items skipped.
n_bytes_copied : int
Number of bytes of data that were actually copied.
Examples
--------
>>> import h5py
>>> import zarr
>>> import numpy as np
>>> source = h5py.File('data/example.h5', mode='w')
>>> foo = source.create_group('foo')
>>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,))
>>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,))
>>> zarr.tree(source)
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> dest = zarr.group()
>>> import sys
>>> zarr.copy_all(source, dest, log=sys.stdout)
copy /foo
copy /foo/bar
copy /foo/bar/baz (100,) int64
copy /spam (100,) int64
all done: 4 copied, 0 skipped, 1,600 bytes copied
(4, 0, 1600)
>>> dest.tree()
/
├── foo
│ └── bar
│ └── baz (100,) int64
└── spam (100,) int64
>>> source.close()
Notes
-----
Please note that this is an experimental feature. The behaviour of this
function is still evolving and the default behaviour and/or parameters may change
in future versions. | [
"Copy",
"all",
"children",
"of",
"the",
"source",
"group",
"into",
"the",
"dest",
"group",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L976-L1077 | train | 215,846 |
zarr-developers/zarr | zarr/convenience.py | consolidate_metadata | def consolidate_metadata(store, metadata_key='.zmetadata'):
"""
Consolidate all metadata for groups and arrays within the given store
into a single resource and put it under the given key.
This produces a single object in the backend store, containing all the
metadata read from all the zarr-related keys that can be found. After
metadata have been consolidated, use :func:`open_consolidated` to open
the root group in optimised, read-only mode, using the consolidated
metadata to reduce the number of read operations on the backend store.
Note, that if the metadata in the store is changed after this
consolidation, then the metadata read by :func:`open_consolidated`
would be incorrect unless this function is called again.
.. note:: This is an experimental feature.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
metadata_key : str
Key to put the consolidated metadata under.
Returns
-------
g : :class:`zarr.hierarchy.Group`
Group instance, opened with the new consolidated metadata.
See Also
--------
open_consolidated
"""
store = normalize_store_arg(store)
def is_zarr_key(key):
return (key.endswith('.zarray') or key.endswith('.zgroup') or
key.endswith('.zattrs'))
out = {
'zarr_consolidated_format': 1,
'metadata': {
key: json_loads(store[key])
for key in store if is_zarr_key(key)
}
}
store[metadata_key] = json_dumps(out)
return open_consolidated(store, metadata_key=metadata_key) | python | def consolidate_metadata(store, metadata_key='.zmetadata'):
"""
Consolidate all metadata for groups and arrays within the given store
into a single resource and put it under the given key.
This produces a single object in the backend store, containing all the
metadata read from all the zarr-related keys that can be found. After
metadata have been consolidated, use :func:`open_consolidated` to open
the root group in optimised, read-only mode, using the consolidated
metadata to reduce the number of read operations on the backend store.
Note, that if the metadata in the store is changed after this
consolidation, then the metadata read by :func:`open_consolidated`
would be incorrect unless this function is called again.
.. note:: This is an experimental feature.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
metadata_key : str
Key to put the consolidated metadata under.
Returns
-------
g : :class:`zarr.hierarchy.Group`
Group instance, opened with the new consolidated metadata.
See Also
--------
open_consolidated
"""
store = normalize_store_arg(store)
def is_zarr_key(key):
return (key.endswith('.zarray') or key.endswith('.zgroup') or
key.endswith('.zattrs'))
out = {
'zarr_consolidated_format': 1,
'metadata': {
key: json_loads(store[key])
for key in store if is_zarr_key(key)
}
}
store[metadata_key] = json_dumps(out)
return open_consolidated(store, metadata_key=metadata_key) | [
"def",
"consolidate_metadata",
"(",
"store",
",",
"metadata_key",
"=",
"'.zmetadata'",
")",
":",
"store",
"=",
"normalize_store_arg",
"(",
"store",
")",
"def",
"is_zarr_key",
"(",
"key",
")",
":",
"return",
"(",
"key",
".",
"endswith",
"(",
"'.zarray'",
")",... | Consolidate all metadata for groups and arrays within the given store
into a single resource and put it under the given key.
This produces a single object in the backend store, containing all the
metadata read from all the zarr-related keys that can be found. After
metadata have been consolidated, use :func:`open_consolidated` to open
the root group in optimised, read-only mode, using the consolidated
metadata to reduce the number of read operations on the backend store.
Note, that if the metadata in the store is changed after this
consolidation, then the metadata read by :func:`open_consolidated`
would be incorrect unless this function is called again.
.. note:: This is an experimental feature.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
metadata_key : str
Key to put the consolidated metadata under.
Returns
-------
g : :class:`zarr.hierarchy.Group`
Group instance, opened with the new consolidated metadata.
See Also
--------
open_consolidated | [
"Consolidate",
"all",
"metadata",
"for",
"groups",
"and",
"arrays",
"within",
"the",
"given",
"store",
"into",
"a",
"single",
"resource",
"and",
"put",
"it",
"under",
"the",
"given",
"key",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L1080-L1128 | train | 215,847 |
zarr-developers/zarr | zarr/convenience.py | open_consolidated | def open_consolidated(store, metadata_key='.zmetadata', mode='r+', **kwargs):
"""Open group using metadata previously consolidated into a single key.
This is an optimised method for opening a Zarr group, where instead of
traversing the group/array hierarchy by accessing the metadata keys at
each level, a single key contains all of the metadata for everything.
For remote data sources where the overhead of accessing a key is large
compared to the time to read data.
The group accessed must have already had its metadata consolidated into a
single key using the function :func:`consolidate_metadata`.
This optimised method only works in modes which do not change the
metadata, although the data may still be written/updated.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
metadata_key : str
Key to read the consolidated metadata from. The default (.zmetadata)
corresponds to the default used by :func:`consolidate_metadata`.
mode : {'r', 'r+'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist) although only writes to data are allowed,
changes to metadata including creation of new arrays or group
are not allowed.
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
g : :class:`zarr.hierarchy.Group`
Group instance, opened with the consolidated metadata.
See Also
--------
consolidate_metadata
"""
from .storage import ConsolidatedMetadataStore
# normalize parameters
store = normalize_store_arg(store)
if mode not in {'r', 'r+'}:
raise ValueError("invalid mode, expected either 'r' or 'r+'; found {!r}"
.format(mode))
# setup metadata sotre
meta_store = ConsolidatedMetadataStore(store, metadata_key=metadata_key)
# pass through
return open(store=meta_store, chunk_store=store, mode=mode, **kwargs) | python | def open_consolidated(store, metadata_key='.zmetadata', mode='r+', **kwargs):
"""Open group using metadata previously consolidated into a single key.
This is an optimised method for opening a Zarr group, where instead of
traversing the group/array hierarchy by accessing the metadata keys at
each level, a single key contains all of the metadata for everything.
For remote data sources where the overhead of accessing a key is large
compared to the time to read data.
The group accessed must have already had its metadata consolidated into a
single key using the function :func:`consolidate_metadata`.
This optimised method only works in modes which do not change the
metadata, although the data may still be written/updated.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
metadata_key : str
Key to read the consolidated metadata from. The default (.zmetadata)
corresponds to the default used by :func:`consolidate_metadata`.
mode : {'r', 'r+'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist) although only writes to data are allowed,
changes to metadata including creation of new arrays or group
are not allowed.
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
g : :class:`zarr.hierarchy.Group`
Group instance, opened with the consolidated metadata.
See Also
--------
consolidate_metadata
"""
from .storage import ConsolidatedMetadataStore
# normalize parameters
store = normalize_store_arg(store)
if mode not in {'r', 'r+'}:
raise ValueError("invalid mode, expected either 'r' or 'r+'; found {!r}"
.format(mode))
# setup metadata sotre
meta_store = ConsolidatedMetadataStore(store, metadata_key=metadata_key)
# pass through
return open(store=meta_store, chunk_store=store, mode=mode, **kwargs) | [
"def",
"open_consolidated",
"(",
"store",
",",
"metadata_key",
"=",
"'.zmetadata'",
",",
"mode",
"=",
"'r+'",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"storage",
"import",
"ConsolidatedMetadataStore",
"# normalize parameters",
"store",
"=",
"normalize_store... | Open group using metadata previously consolidated into a single key.
This is an optimised method for opening a Zarr group, where instead of
traversing the group/array hierarchy by accessing the metadata keys at
each level, a single key contains all of the metadata for everything.
For remote data sources where the overhead of accessing a key is large
compared to the time to read data.
The group accessed must have already had its metadata consolidated into a
single key using the function :func:`consolidate_metadata`.
This optimised method only works in modes which do not change the
metadata, although the data may still be written/updated.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
metadata_key : str
Key to read the consolidated metadata from. The default (.zmetadata)
corresponds to the default used by :func:`consolidate_metadata`.
mode : {'r', 'r+'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist) although only writes to data are allowed,
changes to metadata including creation of new arrays or group
are not allowed.
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
g : :class:`zarr.hierarchy.Group`
Group instance, opened with the consolidated metadata.
See Also
--------
consolidate_metadata | [
"Open",
"group",
"using",
"metadata",
"previously",
"consolidated",
"into",
"a",
"single",
"key",
"."
] | fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5 | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L1131-L1185 | train | 215,848 |
openstack/horizon | openstack_dashboard/api/cinder.py | volume_list_paged | def volume_list_paged(request, search_opts=None, marker=None, paginate=False,
sort_dir="desc"):
"""List volumes with pagination.
To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
has_more_data = False
has_prev_data = False
volumes = []
# To support filtering with group_id, we need to use the microversion.
c_client = _cinderclient_with_generic_groups(request)
if c_client is None:
return volumes, has_more_data, has_prev_data
# build a dictionary of volume_id -> transfer
transfers = {t.volume_id: t
for t in transfer_list(request, search_opts=search_opts)}
if VERSIONS.active > 1 and paginate:
page_size = utils.get_page_size(request)
# sort_key and sort_dir deprecated in kilo, use sort
# if pagination is true, we use a single sort parameter
# by default, it is "created_at"
sort = 'created_at:' + sort_dir
for v in c_client.volumes.list(search_opts=search_opts,
limit=page_size + 1,
marker=marker,
sort=sort):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
volumes, has_more_data, has_prev_data = update_pagination(
volumes, page_size, marker, sort_dir)
else:
for v in c_client.volumes.list(search_opts=search_opts):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
return volumes, has_more_data, has_prev_data | python | def volume_list_paged(request, search_opts=None, marker=None, paginate=False,
sort_dir="desc"):
"""List volumes with pagination.
To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
has_more_data = False
has_prev_data = False
volumes = []
# To support filtering with group_id, we need to use the microversion.
c_client = _cinderclient_with_generic_groups(request)
if c_client is None:
return volumes, has_more_data, has_prev_data
# build a dictionary of volume_id -> transfer
transfers = {t.volume_id: t
for t in transfer_list(request, search_opts=search_opts)}
if VERSIONS.active > 1 and paginate:
page_size = utils.get_page_size(request)
# sort_key and sort_dir deprecated in kilo, use sort
# if pagination is true, we use a single sort parameter
# by default, it is "created_at"
sort = 'created_at:' + sort_dir
for v in c_client.volumes.list(search_opts=search_opts,
limit=page_size + 1,
marker=marker,
sort=sort):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
volumes, has_more_data, has_prev_data = update_pagination(
volumes, page_size, marker, sort_dir)
else:
for v in c_client.volumes.list(search_opts=search_opts):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
return volumes, has_more_data, has_prev_data | [
"def",
"volume_list_paged",
"(",
"request",
",",
"search_opts",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"paginate",
"=",
"False",
",",
"sort_dir",
"=",
"\"desc\"",
")",
":",
"has_more_data",
"=",
"False",
"has_prev_data",
"=",
"False",
"volumes",
"=",
... | List volumes with pagination.
To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1} | [
"List",
"volumes",
"with",
"pagination",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/cinder.py#L341-L380 | train | 215,849 |
openstack/horizon | openstack_dashboard/api/cinder.py | extension_supported | def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name."""
for extension in list_extensions(request):
if extension.name == extension_name:
return True
return False | python | def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name."""
for extension in list_extensions(request):
if extension.name == extension_name:
return True
return False | [
"def",
"extension_supported",
"(",
"request",
",",
"extension_name",
")",
":",
"for",
"extension",
"in",
"list_extensions",
"(",
"request",
")",
":",
"if",
"extension",
".",
"name",
"==",
"extension_name",
":",
"return",
"True",
"return",
"False"
] | This method will determine if Cinder supports a given extension name. | [
"This",
"method",
"will",
"determine",
"if",
"Cinder",
"supports",
"a",
"given",
"extension",
"name",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/cinder.py#L1069-L1074 | train | 215,850 |
openstack/horizon | openstack_dashboard/api/cinder.py | transfer_list | def transfer_list(request, detailed=True, search_opts=None):
"""List volume transfers.
To see all volumes transfers as an admin pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
try:
return [VolumeTransfer(v) for v in c_client.transfers.list(
detailed=detailed, search_opts=search_opts)]
except cinder_exception.Forbidden as error:
LOG.error(error)
return [] | python | def transfer_list(request, detailed=True, search_opts=None):
"""List volume transfers.
To see all volumes transfers as an admin pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
try:
return [VolumeTransfer(v) for v in c_client.transfers.list(
detailed=detailed, search_opts=search_opts)]
except cinder_exception.Forbidden as error:
LOG.error(error)
return [] | [
"def",
"transfer_list",
"(",
"request",
",",
"detailed",
"=",
"True",
",",
"search_opts",
"=",
"None",
")",
":",
"c_client",
"=",
"cinderclient",
"(",
"request",
")",
"try",
":",
"return",
"[",
"VolumeTransfer",
"(",
"v",
")",
"for",
"v",
"in",
"c_client... | List volume transfers.
To see all volumes transfers as an admin pass in a special
search option: {'all_tenants': 1} | [
"List",
"volume",
"transfers",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/cinder.py#L1078-L1090 | train | 215,851 |
openstack/horizon | openstack_dashboard/usage/quotas.py | tenant_quota_usages | def tenant_quota_usages(request, tenant_id=None, targets=None):
"""Get our quotas and construct our usage object.
:param tenant_id: Target tenant ID. If no tenant_id is provided,
a the request.user.project_id is assumed to be used.
:param targets: A tuple of quota names to be retrieved.
If unspecified, all quota and usage information is retrieved.
"""
if not tenant_id:
tenant_id = request.user.project_id
disabled_quotas = get_disabled_quotas(request, targets)
usages = QuotaUsage()
futurist_utils.call_functions_parallel(
(_get_tenant_compute_usages,
[request, usages, disabled_quotas, tenant_id]),
(_get_tenant_network_usages,
[request, usages, disabled_quotas, tenant_id]),
(_get_tenant_volume_usages,
[request, usages, disabled_quotas, tenant_id]))
return usages | python | def tenant_quota_usages(request, tenant_id=None, targets=None):
"""Get our quotas and construct our usage object.
:param tenant_id: Target tenant ID. If no tenant_id is provided,
a the request.user.project_id is assumed to be used.
:param targets: A tuple of quota names to be retrieved.
If unspecified, all quota and usage information is retrieved.
"""
if not tenant_id:
tenant_id = request.user.project_id
disabled_quotas = get_disabled_quotas(request, targets)
usages = QuotaUsage()
futurist_utils.call_functions_parallel(
(_get_tenant_compute_usages,
[request, usages, disabled_quotas, tenant_id]),
(_get_tenant_network_usages,
[request, usages, disabled_quotas, tenant_id]),
(_get_tenant_volume_usages,
[request, usages, disabled_quotas, tenant_id]))
return usages | [
"def",
"tenant_quota_usages",
"(",
"request",
",",
"tenant_id",
"=",
"None",
",",
"targets",
"=",
"None",
")",
":",
"if",
"not",
"tenant_id",
":",
"tenant_id",
"=",
"request",
".",
"user",
".",
"project_id",
"disabled_quotas",
"=",
"get_disabled_quotas",
"(",
... | Get our quotas and construct our usage object.
:param tenant_id: Target tenant ID. If no tenant_id is provided,
a the request.user.project_id is assumed to be used.
:param targets: A tuple of quota names to be retrieved.
If unspecified, all quota and usage information is retrieved. | [
"Get",
"our",
"quotas",
"and",
"construct",
"our",
"usage",
"object",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/usage/quotas.py#L413-L435 | train | 215,852 |
openstack/horizon | openstack_dashboard/usage/quotas.py | QuotaUsage.add_quota | def add_quota(self, quota):
"""Adds an internal tracking reference for the given quota."""
if quota.limit in (None, -1, float('inf')):
# Handle "unlimited" quotas.
self.usages[quota.name]['quota'] = float("inf")
self.usages[quota.name]['available'] = float("inf")
else:
self.usages[quota.name]['quota'] = int(quota.limit) | python | def add_quota(self, quota):
"""Adds an internal tracking reference for the given quota."""
if quota.limit in (None, -1, float('inf')):
# Handle "unlimited" quotas.
self.usages[quota.name]['quota'] = float("inf")
self.usages[quota.name]['available'] = float("inf")
else:
self.usages[quota.name]['quota'] = int(quota.limit) | [
"def",
"add_quota",
"(",
"self",
",",
"quota",
")",
":",
"if",
"quota",
".",
"limit",
"in",
"(",
"None",
",",
"-",
"1",
",",
"float",
"(",
"'inf'",
")",
")",
":",
"# Handle \"unlimited\" quotas.",
"self",
".",
"usages",
"[",
"quota",
".",
"name",
"]"... | Adds an internal tracking reference for the given quota. | [
"Adds",
"an",
"internal",
"tracking",
"reference",
"for",
"the",
"given",
"quota",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/usage/quotas.py#L141-L148 | train | 215,853 |
openstack/horizon | openstack_dashboard/usage/quotas.py | QuotaUsage.tally | def tally(self, name, value):
"""Adds to the "used" metric for the given quota."""
value = value or 0 # Protection against None.
# Start at 0 if this is the first value.
if 'used' not in self.usages[name]:
self.usages[name]['used'] = 0
# Increment our usage and update the "available" metric.
self.usages[name]['used'] += int(value) # Fail if can't coerce to int.
self.update_available(name) | python | def tally(self, name, value):
"""Adds to the "used" metric for the given quota."""
value = value or 0 # Protection against None.
# Start at 0 if this is the first value.
if 'used' not in self.usages[name]:
self.usages[name]['used'] = 0
# Increment our usage and update the "available" metric.
self.usages[name]['used'] += int(value) # Fail if can't coerce to int.
self.update_available(name) | [
"def",
"tally",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"value",
"=",
"value",
"or",
"0",
"# Protection against None.",
"# Start at 0 if this is the first value.",
"if",
"'used'",
"not",
"in",
"self",
".",
"usages",
"[",
"name",
"]",
":",
"self",
"... | Adds to the "used" metric for the given quota. | [
"Adds",
"to",
"the",
"used",
"metric",
"for",
"the",
"given",
"quota",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/usage/quotas.py#L150-L158 | train | 215,854 |
openstack/horizon | openstack_dashboard/usage/quotas.py | QuotaUsage.update_available | def update_available(self, name):
"""Updates the "available" metric for the given quota."""
quota = self.usages.get(name, {}).get('quota', float('inf'))
available = quota - self.usages[name]['used']
if available < 0:
available = 0
self.usages[name]['available'] = available | python | def update_available(self, name):
"""Updates the "available" metric for the given quota."""
quota = self.usages.get(name, {}).get('quota', float('inf'))
available = quota - self.usages[name]['used']
if available < 0:
available = 0
self.usages[name]['available'] = available | [
"def",
"update_available",
"(",
"self",
",",
"name",
")",
":",
"quota",
"=",
"self",
".",
"usages",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
".",
"get",
"(",
"'quota'",
",",
"float",
"(",
"'inf'",
")",
")",
"available",
"=",
"quota",
"-",
"se... | Updates the "available" metric for the given quota. | [
"Updates",
"the",
"available",
"metric",
"for",
"the",
"given",
"quota",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/usage/quotas.py#L160-L166 | train | 215,855 |
openstack/horizon | openstack_dashboard/dashboards/project/instances/views.py | process_non_api_filters | def process_non_api_filters(search_opts, non_api_filter_info):
"""Process filters by non-API fields
There are cases where it is useful to provide a filter field
which does not exist in a resource in a backend service.
For example, nova server list provides 'image' field with image ID
but 'image name' is more useful for GUI users.
This function replaces fake fields into corresponding real fields.
The format of non_api_filter_info is a tuple/list of
(fake_field, real_field, resources).
This returns True if further lookup is required.
It returns False if there are no matching resources,
for example, if no corresponding real field exists.
"""
for fake_field, real_field, resources in non_api_filter_info:
if not _swap_filter(resources, search_opts, fake_field, real_field):
return False
return True | python | def process_non_api_filters(search_opts, non_api_filter_info):
"""Process filters by non-API fields
There are cases where it is useful to provide a filter field
which does not exist in a resource in a backend service.
For example, nova server list provides 'image' field with image ID
but 'image name' is more useful for GUI users.
This function replaces fake fields into corresponding real fields.
The format of non_api_filter_info is a tuple/list of
(fake_field, real_field, resources).
This returns True if further lookup is required.
It returns False if there are no matching resources,
for example, if no corresponding real field exists.
"""
for fake_field, real_field, resources in non_api_filter_info:
if not _swap_filter(resources, search_opts, fake_field, real_field):
return False
return True | [
"def",
"process_non_api_filters",
"(",
"search_opts",
",",
"non_api_filter_info",
")",
":",
"for",
"fake_field",
",",
"real_field",
",",
"resources",
"in",
"non_api_filter_info",
":",
"if",
"not",
"_swap_filter",
"(",
"resources",
",",
"search_opts",
",",
"fake_fiel... | Process filters by non-API fields
There are cases where it is useful to provide a filter field
which does not exist in a resource in a backend service.
For example, nova server list provides 'image' field with image ID
but 'image name' is more useful for GUI users.
This function replaces fake fields into corresponding real fields.
The format of non_api_filter_info is a tuple/list of
(fake_field, real_field, resources).
This returns True if further lookup is required.
It returns False if there are no matching resources,
for example, if no corresponding real field exists. | [
"Process",
"filters",
"by",
"non",
"-",
"API",
"fields"
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/project/instances/views.py#L202-L221 | train | 215,856 |
openstack/horizon | horizon/utils/validators.py | validate_port_or_colon_separated_port_range | def validate_port_or_colon_separated_port_range(port_range):
"""Accepts a port number or a single-colon separated range."""
if port_range.count(':') > 1:
raise ValidationError(_("One colon allowed in port range"))
ports = port_range.split(':')
for port in ports:
validate_port_range(port) | python | def validate_port_or_colon_separated_port_range(port_range):
"""Accepts a port number or a single-colon separated range."""
if port_range.count(':') > 1:
raise ValidationError(_("One colon allowed in port range"))
ports = port_range.split(':')
for port in ports:
validate_port_range(port) | [
"def",
"validate_port_or_colon_separated_port_range",
"(",
"port_range",
")",
":",
"if",
"port_range",
".",
"count",
"(",
"':'",
")",
">",
"1",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"\"One colon allowed in port range\"",
")",
")",
"ports",
"=",
"port_ran... | Accepts a port number or a single-colon separated range. | [
"Accepts",
"a",
"port",
"number",
"or",
"a",
"single",
"-",
"colon",
"separated",
"range",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/utils/validators.py#L58-L64 | train | 215,857 |
openstack/horizon | openstack_dashboard/utils/settings.py | import_submodules | def import_submodules(module):
"""Import all submodules and make them available in a dict."""
submodules = {}
for loader, name, ispkg in pkgutil.iter_modules(module.__path__,
module.__name__ + '.'):
try:
submodule = import_module(name)
except ImportError as e:
# FIXME: Make the errors non-fatal (do we want that?).
logging.warning("Error importing %s", name)
logging.exception(e)
else:
parent, child = name.rsplit('.', 1)
submodules[child] = submodule
return submodules | python | def import_submodules(module):
"""Import all submodules and make them available in a dict."""
submodules = {}
for loader, name, ispkg in pkgutil.iter_modules(module.__path__,
module.__name__ + '.'):
try:
submodule = import_module(name)
except ImportError as e:
# FIXME: Make the errors non-fatal (do we want that?).
logging.warning("Error importing %s", name)
logging.exception(e)
else:
parent, child = name.rsplit('.', 1)
submodules[child] = submodule
return submodules | [
"def",
"import_submodules",
"(",
"module",
")",
":",
"submodules",
"=",
"{",
"}",
"for",
"loader",
",",
"name",
",",
"ispkg",
"in",
"pkgutil",
".",
"iter_modules",
"(",
"module",
".",
"__path__",
",",
"module",
".",
"__name__",
"+",
"'.'",
")",
":",
"t... | Import all submodules and make them available in a dict. | [
"Import",
"all",
"submodules",
"and",
"make",
"them",
"available",
"in",
"a",
"dict",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/utils/settings.py#L24-L38 | train | 215,858 |
openstack/horizon | openstack_dashboard/utils/settings.py | import_dashboard_config | def import_dashboard_config(modules):
"""Imports configuration from all the modules and merges it."""
config = collections.defaultdict(dict)
for module in modules:
for submodule in import_submodules(module).values():
if hasattr(submodule, 'DASHBOARD'):
dashboard = submodule.DASHBOARD
config[dashboard].update(submodule.__dict__)
elif (hasattr(submodule, 'PANEL') or
hasattr(submodule, 'PANEL_GROUP') or
hasattr(submodule, 'FEATURE')):
# If enabled and local.enabled contains a same filename,
# the file loaded later (i.e., local.enabled) will be used.
name = submodule.__name__.rsplit('.', 1)[1]
config[name] = submodule.__dict__
else:
logging.warning("Skipping %s because it doesn't have DASHBOARD"
", PANEL, PANEL_GROUP, or FEATURE defined.",
submodule.__name__)
return sorted(config.items(),
key=lambda c: c[1]['__name__'].rsplit('.', 1)[1]) | python | def import_dashboard_config(modules):
"""Imports configuration from all the modules and merges it."""
config = collections.defaultdict(dict)
for module in modules:
for submodule in import_submodules(module).values():
if hasattr(submodule, 'DASHBOARD'):
dashboard = submodule.DASHBOARD
config[dashboard].update(submodule.__dict__)
elif (hasattr(submodule, 'PANEL') or
hasattr(submodule, 'PANEL_GROUP') or
hasattr(submodule, 'FEATURE')):
# If enabled and local.enabled contains a same filename,
# the file loaded later (i.e., local.enabled) will be used.
name = submodule.__name__.rsplit('.', 1)[1]
config[name] = submodule.__dict__
else:
logging.warning("Skipping %s because it doesn't have DASHBOARD"
", PANEL, PANEL_GROUP, or FEATURE defined.",
submodule.__name__)
return sorted(config.items(),
key=lambda c: c[1]['__name__'].rsplit('.', 1)[1]) | [
"def",
"import_dashboard_config",
"(",
"modules",
")",
":",
"config",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"for",
"module",
"in",
"modules",
":",
"for",
"submodule",
"in",
"import_submodules",
"(",
"module",
")",
".",
"values",
"(",
")",... | Imports configuration from all the modules and merges it. | [
"Imports",
"configuration",
"from",
"all",
"the",
"modules",
"and",
"merges",
"it",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/utils/settings.py#L41-L61 | train | 215,859 |
openstack/horizon | openstack_dashboard/utils/settings.py | get_xstatic_dirs | def get_xstatic_dirs(XSTATIC_MODULES, HORIZON_CONFIG):
"""Discover static file configuration of the xstatic modules.
For each entry in the XSTATIC_MODULES list we determine the entry
point files (which may come from the xstatic MAIN var) and then
determine where in the Django static tree the xstatic package's contents
should be placed.
For jquery.bootstrap.wizard.js the module name is None the static file is
actually a 3rd-party file but resides in the Horizon source tree and not
an xstatic package.
The xstatic.pkg.jquery_ui package had its contents moved by packagers so
it must be handled as a special case.
"""
STATICFILES_DIRS = []
HORIZON_CONFIG.setdefault('xstatic_lib_files', [])
for module_name, files in XSTATIC_MODULES:
module = import_module(module_name)
if module_name == 'xstatic.pkg.jquery_ui':
# determine the correct path for jquery-ui which packagers moved
if module.VERSION.startswith('1.10.'):
# The 1.10.x versions already contain 'ui' directory.
files = ['ui/' + files[0]]
STATICFILES_DIRS.append(
('horizon/lib/' + module.NAME, module.BASE_DIR)
)
# pull the file entry points from the xstatic package MAIN if possible
if hasattr(module, 'MAIN'):
files = module.MAIN
if not isinstance(files, list):
files = [files]
# just the Javascript files, please (don't <script> css, etc
# which is explicitly included in style/themes as appropriate)
files = [file for file in files if file.endswith('.js')]
# add to the list of files to link in the HTML
try:
for file in files:
file = 'horizon/lib/' + module.NAME + '/' + file
HORIZON_CONFIG['xstatic_lib_files'].append(file)
except TypeError:
raise Exception(
'%s: Nothing to include because files to include are not '
'defined (i.e., None) in BASE_XSTATIC_MODULES list and '
'a corresponding XStatic module does not define MAIN list.'
% module_name)
return STATICFILES_DIRS | python | def get_xstatic_dirs(XSTATIC_MODULES, HORIZON_CONFIG):
"""Discover static file configuration of the xstatic modules.
For each entry in the XSTATIC_MODULES list we determine the entry
point files (which may come from the xstatic MAIN var) and then
determine where in the Django static tree the xstatic package's contents
should be placed.
For jquery.bootstrap.wizard.js the module name is None the static file is
actually a 3rd-party file but resides in the Horizon source tree and not
an xstatic package.
The xstatic.pkg.jquery_ui package had its contents moved by packagers so
it must be handled as a special case.
"""
STATICFILES_DIRS = []
HORIZON_CONFIG.setdefault('xstatic_lib_files', [])
for module_name, files in XSTATIC_MODULES:
module = import_module(module_name)
if module_name == 'xstatic.pkg.jquery_ui':
# determine the correct path for jquery-ui which packagers moved
if module.VERSION.startswith('1.10.'):
# The 1.10.x versions already contain 'ui' directory.
files = ['ui/' + files[0]]
STATICFILES_DIRS.append(
('horizon/lib/' + module.NAME, module.BASE_DIR)
)
# pull the file entry points from the xstatic package MAIN if possible
if hasattr(module, 'MAIN'):
files = module.MAIN
if not isinstance(files, list):
files = [files]
# just the Javascript files, please (don't <script> css, etc
# which is explicitly included in style/themes as appropriate)
files = [file for file in files if file.endswith('.js')]
# add to the list of files to link in the HTML
try:
for file in files:
file = 'horizon/lib/' + module.NAME + '/' + file
HORIZON_CONFIG['xstatic_lib_files'].append(file)
except TypeError:
raise Exception(
'%s: Nothing to include because files to include are not '
'defined (i.e., None) in BASE_XSTATIC_MODULES list and '
'a corresponding XStatic module does not define MAIN list.'
% module_name)
return STATICFILES_DIRS | [
"def",
"get_xstatic_dirs",
"(",
"XSTATIC_MODULES",
",",
"HORIZON_CONFIG",
")",
":",
"STATICFILES_DIRS",
"=",
"[",
"]",
"HORIZON_CONFIG",
".",
"setdefault",
"(",
"'xstatic_lib_files'",
",",
"[",
"]",
")",
"for",
"module_name",
",",
"files",
"in",
"XSTATIC_MODULES",... | Discover static file configuration of the xstatic modules.
For each entry in the XSTATIC_MODULES list we determine the entry
point files (which may come from the xstatic MAIN var) and then
determine where in the Django static tree the xstatic package's contents
should be placed.
For jquery.bootstrap.wizard.js the module name is None the static file is
actually a 3rd-party file but resides in the Horizon source tree and not
an xstatic package.
The xstatic.pkg.jquery_ui package had its contents moved by packagers so
it must be handled as a special case. | [
"Discover",
"static",
"file",
"configuration",
"of",
"the",
"xstatic",
"modules",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/utils/settings.py#L242-L293 | train | 215,860 |
openstack/horizon | openstack_dashboard/api/nova.py | upgrade_api | def upgrade_api(request, client, version):
"""Ugrade the nova API to the specified version if possible."""
min_ver, max_ver = api_versions._get_server_version_range(client)
if min_ver <= api_versions.APIVersion(version) <= max_ver:
client = _nova.novaclient(request, version)
return client | python | def upgrade_api(request, client, version):
"""Ugrade the nova API to the specified version if possible."""
min_ver, max_ver = api_versions._get_server_version_range(client)
if min_ver <= api_versions.APIVersion(version) <= max_ver:
client = _nova.novaclient(request, version)
return client | [
"def",
"upgrade_api",
"(",
"request",
",",
"client",
",",
"version",
")",
":",
"min_ver",
",",
"max_ver",
"=",
"api_versions",
".",
"_get_server_version_range",
"(",
"client",
")",
"if",
"min_ver",
"<=",
"api_versions",
".",
"APIVersion",
"(",
"version",
")",
... | Ugrade the nova API to the specified version if possible. | [
"Ugrade",
"the",
"nova",
"API",
"to",
"the",
"specified",
"version",
"if",
"possible",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L192-L198 | train | 215,861 |
openstack/horizon | openstack_dashboard/api/nova.py | add_tenant_to_flavor | def add_tenant_to_flavor(request, flavor, tenant):
"""Add a tenant to the given flavor access list."""
return _nova.novaclient(request).flavor_access.add_tenant_access(
flavor=flavor, tenant=tenant) | python | def add_tenant_to_flavor(request, flavor, tenant):
"""Add a tenant to the given flavor access list."""
return _nova.novaclient(request).flavor_access.add_tenant_access(
flavor=flavor, tenant=tenant) | [
"def",
"add_tenant_to_flavor",
"(",
"request",
",",
"flavor",
",",
"tenant",
")",
":",
"return",
"_nova",
".",
"novaclient",
"(",
"request",
")",
".",
"flavor_access",
".",
"add_tenant_access",
"(",
"flavor",
"=",
"flavor",
",",
"tenant",
"=",
"tenant",
")"
... | Add a tenant to the given flavor access list. | [
"Add",
"a",
"tenant",
"to",
"the",
"given",
"flavor",
"access",
"list",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L336-L339 | train | 215,862 |
openstack/horizon | openstack_dashboard/api/nova.py | remove_tenant_from_flavor | def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return _nova.novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant) | python | def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return _nova.novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant) | [
"def",
"remove_tenant_from_flavor",
"(",
"request",
",",
"flavor",
",",
"tenant",
")",
":",
"return",
"_nova",
".",
"novaclient",
"(",
"request",
")",
".",
"flavor_access",
".",
"remove_tenant_access",
"(",
"flavor",
"=",
"flavor",
",",
"tenant",
"=",
"tenant"... | Remove a tenant from the given flavor access list. | [
"Remove",
"a",
"tenant",
"from",
"the",
"given",
"flavor",
"access",
"list",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L343-L346 | train | 215,863 |
openstack/horizon | openstack_dashboard/api/nova.py | flavor_get_extras | def flavor_get_extras(request, flavor_id, raw=False, flavor=None):
"""Get flavor extra specs."""
if flavor is None:
flavor = _nova.novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()] | python | def flavor_get_extras(request, flavor_id, raw=False, flavor=None):
"""Get flavor extra specs."""
if flavor is None:
flavor = _nova.novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()] | [
"def",
"flavor_get_extras",
"(",
"request",
",",
"flavor_id",
",",
"raw",
"=",
"False",
",",
"flavor",
"=",
"None",
")",
":",
"if",
"flavor",
"is",
"None",
":",
"flavor",
"=",
"_nova",
".",
"novaclient",
"(",
"request",
")",
".",
"flavors",
".",
"get",... | Get flavor extra specs. | [
"Get",
"flavor",
"extra",
"specs",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L350-L358 | train | 215,864 |
openstack/horizon | openstack_dashboard/api/nova.py | flavor_extra_delete | def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = _nova.novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys) | python | def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = _nova.novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys) | [
"def",
"flavor_extra_delete",
"(",
"request",
",",
"flavor_id",
",",
"keys",
")",
":",
"flavor",
"=",
"_nova",
".",
"novaclient",
"(",
"request",
")",
".",
"flavors",
".",
"get",
"(",
"flavor_id",
")",
"return",
"flavor",
".",
"unset_keys",
"(",
"keys",
... | Unset the flavor extra spec keys. | [
"Unset",
"the",
"flavor",
"extra",
"spec",
"keys",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L362-L365 | train | 215,865 |
openstack/horizon | openstack_dashboard/api/nova.py | flavor_extra_set | def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = _nova.novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata) | python | def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = _nova.novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata) | [
"def",
"flavor_extra_set",
"(",
"request",
",",
"flavor_id",
",",
"metadata",
")",
":",
"flavor",
"=",
"_nova",
".",
"novaclient",
"(",
"request",
")",
".",
"flavors",
".",
"get",
"(",
"flavor_id",
")",
"if",
"(",
"not",
"metadata",
")",
":",
"# not a wa... | Set the flavor extra spec keys. | [
"Set",
"the",
"flavor",
"extra",
"spec",
"keys",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L369-L374 | train | 215,866 |
openstack/horizon | openstack_dashboard/api/nova.py | server_console_output | def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
nc = _nova.novaclient(request)
return nc.servers.get_console_output(instance_id, length=tail_length) | python | def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
nc = _nova.novaclient(request)
return nc.servers.get_console_output(instance_id, length=tail_length) | [
"def",
"server_console_output",
"(",
"request",
",",
"instance_id",
",",
"tail_length",
"=",
"None",
")",
":",
"nc",
"=",
"_nova",
".",
"novaclient",
"(",
"request",
")",
"return",
"nc",
".",
"servers",
".",
"get_console_output",
"(",
"instance_id",
",",
"le... | Gets console output of an instance. | [
"Gets",
"console",
"output",
"of",
"an",
"instance",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L533-L536 | train | 215,867 |
openstack/horizon | openstack_dashboard/api/nova.py | list_extensions | def list_extensions(request):
"""List all nova extensions, except the ones in the blacklist."""
blacklist = set(getattr(settings,
'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', []))
nova_api = _nova.novaclient(request)
return tuple(
extension for extension in
nova_list_extensions.ListExtManager(nova_api).show_all()
if extension.name not in blacklist
) | python | def list_extensions(request):
"""List all nova extensions, except the ones in the blacklist."""
blacklist = set(getattr(settings,
'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', []))
nova_api = _nova.novaclient(request)
return tuple(
extension for extension in
nova_list_extensions.ListExtManager(nova_api).show_all()
if extension.name not in blacklist
) | [
"def",
"list_extensions",
"(",
"request",
")",
":",
"blacklist",
"=",
"set",
"(",
"getattr",
"(",
"settings",
",",
"'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST'",
",",
"[",
"]",
")",
")",
"nova_api",
"=",
"_nova",
".",
"novaclient",
"(",
"request",
")",
"return",
"... | List all nova extensions, except the ones in the blacklist. | [
"List",
"all",
"nova",
"extensions",
"except",
"the",
"ones",
"in",
"the",
"blacklist",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L1012-L1021 | train | 215,868 |
openstack/horizon | openstack_dashboard/api/nova.py | extension_supported | def extension_supported(extension_name, request):
"""Determine if nova supports a given extension name.
Example values for the extension_name include AdminActions, ConsoleOutput,
etc.
"""
for ext in list_extensions(request):
if ext.name == extension_name:
return True
return False | python | def extension_supported(extension_name, request):
"""Determine if nova supports a given extension name.
Example values for the extension_name include AdminActions, ConsoleOutput,
etc.
"""
for ext in list_extensions(request):
if ext.name == extension_name:
return True
return False | [
"def",
"extension_supported",
"(",
"extension_name",
",",
"request",
")",
":",
"for",
"ext",
"in",
"list_extensions",
"(",
"request",
")",
":",
"if",
"ext",
".",
"name",
"==",
"extension_name",
":",
"return",
"True",
"return",
"False"
] | Determine if nova supports a given extension name.
Example values for the extension_name include AdminActions, ConsoleOutput,
etc. | [
"Determine",
"if",
"nova",
"supports",
"a",
"given",
"extension",
"name",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/nova.py#L1026-L1035 | train | 215,869 |
openstack/horizon | openstack_dashboard/utils/futurist_utils.py | call_functions_parallel | def call_functions_parallel(*worker_defs):
"""Call specified functions in parallel.
:param *worker_defs: Each positional argument can be either of
a function to be called or a tuple which consists of a function,
a list of positional arguments) and keyword arguments (optional).
If you need to pass arguments, you need to pass a tuple.
Example usages are like:
call_functions_parallel(func1, func2, func3)
call_functions_parallel(func1, (func2, [1, 2]))
call_functions_parallel((func1, [], {'a': 1}),
(func2, [], {'a': 2, 'b': 10}))
:returns: a tuple of values returned from individual functions.
None is returned if a corresponding function does not return.
It is better to return values other than None from individual
functions.
"""
# TODO(amotoki): Needs to figure out what max_workers can be specified.
# According to e0ne, the apache default configuration in devstack allows
# only 10 threads. What happens if max_worker=11 is specified?
max_workers = len(worker_defs)
# Prepare a list with enough length.
futures = [None] * len(worker_defs)
with futurist.ThreadPoolExecutor(max_workers=max_workers) as e:
for index, func_def in enumerate(worker_defs):
if callable(func_def):
func_def = [func_def]
args = func_def[1] if len(func_def) > 1 else []
kwargs = func_def[2] if len(func_def) > 2 else {}
func = functools.partial(func_def[0], *args, **kwargs)
futures[index] = e.submit(fn=func)
return tuple(f.result() for f in futures) | python | def call_functions_parallel(*worker_defs):
"""Call specified functions in parallel.
:param *worker_defs: Each positional argument can be either of
a function to be called or a tuple which consists of a function,
a list of positional arguments) and keyword arguments (optional).
If you need to pass arguments, you need to pass a tuple.
Example usages are like:
call_functions_parallel(func1, func2, func3)
call_functions_parallel(func1, (func2, [1, 2]))
call_functions_parallel((func1, [], {'a': 1}),
(func2, [], {'a': 2, 'b': 10}))
:returns: a tuple of values returned from individual functions.
None is returned if a corresponding function does not return.
It is better to return values other than None from individual
functions.
"""
# TODO(amotoki): Needs to figure out what max_workers can be specified.
# According to e0ne, the apache default configuration in devstack allows
# only 10 threads. What happens if max_worker=11 is specified?
max_workers = len(worker_defs)
# Prepare a list with enough length.
futures = [None] * len(worker_defs)
with futurist.ThreadPoolExecutor(max_workers=max_workers) as e:
for index, func_def in enumerate(worker_defs):
if callable(func_def):
func_def = [func_def]
args = func_def[1] if len(func_def) > 1 else []
kwargs = func_def[2] if len(func_def) > 2 else {}
func = functools.partial(func_def[0], *args, **kwargs)
futures[index] = e.submit(fn=func)
return tuple(f.result() for f in futures) | [
"def",
"call_functions_parallel",
"(",
"*",
"worker_defs",
")",
":",
"# TODO(amotoki): Needs to figure out what max_workers can be specified.",
"# According to e0ne, the apache default configuration in devstack allows",
"# only 10 threads. What happens if max_worker=11 is specified?",
"max_work... | Call specified functions in parallel.
:param *worker_defs: Each positional argument can be either of
a function to be called or a tuple which consists of a function,
a list of positional arguments) and keyword arguments (optional).
If you need to pass arguments, you need to pass a tuple.
Example usages are like:
call_functions_parallel(func1, func2, func3)
call_functions_parallel(func1, (func2, [1, 2]))
call_functions_parallel((func1, [], {'a': 1}),
(func2, [], {'a': 2, 'b': 10}))
:returns: a tuple of values returned from individual functions.
None is returned if a corresponding function does not return.
It is better to return values other than None from individual
functions. | [
"Call",
"specified",
"functions",
"in",
"parallel",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/utils/futurist_utils.py#L18-L50 | train | 215,870 |
openstack/horizon | horizon/utils/secret_key.py | generate_key | def generate_key(key_length=64):
"""Secret key generator.
The quality of randomness depends on operating system support,
see http://docs.python.org/library/random.html#random.SystemRandom.
"""
if hasattr(random, 'SystemRandom'):
logging.info('Generating a secure random key using SystemRandom.')
choice = random.SystemRandom().choice
else:
msg = "WARNING: SystemRandom not present. Generating a random "\
"key using random.choice (NOT CRYPTOGRAPHICALLY SECURE)."
logging.warning(msg)
choice = random.choice
return ''.join(map(lambda x: choice(string.digits + string.ascii_letters),
range(key_length))) | python | def generate_key(key_length=64):
"""Secret key generator.
The quality of randomness depends on operating system support,
see http://docs.python.org/library/random.html#random.SystemRandom.
"""
if hasattr(random, 'SystemRandom'):
logging.info('Generating a secure random key using SystemRandom.')
choice = random.SystemRandom().choice
else:
msg = "WARNING: SystemRandom not present. Generating a random "\
"key using random.choice (NOT CRYPTOGRAPHICALLY SECURE)."
logging.warning(msg)
choice = random.choice
return ''.join(map(lambda x: choice(string.digits + string.ascii_letters),
range(key_length))) | [
"def",
"generate_key",
"(",
"key_length",
"=",
"64",
")",
":",
"if",
"hasattr",
"(",
"random",
",",
"'SystemRandom'",
")",
":",
"logging",
".",
"info",
"(",
"'Generating a secure random key using SystemRandom.'",
")",
"choice",
"=",
"random",
".",
"SystemRandom",
... | Secret key generator.
The quality of randomness depends on operating system support,
see http://docs.python.org/library/random.html#random.SystemRandom. | [
"Secret",
"key",
"generator",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/utils/secret_key.py#L28-L43 | train | 215,871 |
openstack/horizon | horizon/utils/secret_key.py | generate_or_read_from_file | def generate_or_read_from_file(key_file='.secret_key', key_length=64):
"""Multiprocess-safe secret key file generator.
Useful to replace the default (and thus unsafe) SECRET_KEY in settings.py
upon first start. Save to use, i.e. when multiple Python interpreters
serve the dashboard Django application (e.g. in a mod_wsgi + daemonized
environment). Also checks if file permissions are set correctly and
throws an exception if not.
"""
abspath = os.path.abspath(key_file)
# check, if key_file already exists
# if yes, then just read and return key
if os.path.exists(key_file):
key = read_from_file(key_file)
return key
# otherwise, first lock to make sure only one process
lock = lockutils.external_lock(key_file + ".lock",
lock_path=os.path.dirname(abspath))
with lock:
if not os.path.exists(key_file):
key = generate_key(key_length)
old_umask = os.umask(0o177) # Use '0600' file permissions
with open(key_file, 'w') as f:
f.write(key)
os.umask(old_umask)
else:
key = read_from_file(key_file)
return key | python | def generate_or_read_from_file(key_file='.secret_key', key_length=64):
"""Multiprocess-safe secret key file generator.
Useful to replace the default (and thus unsafe) SECRET_KEY in settings.py
upon first start. Save to use, i.e. when multiple Python interpreters
serve the dashboard Django application (e.g. in a mod_wsgi + daemonized
environment). Also checks if file permissions are set correctly and
throws an exception if not.
"""
abspath = os.path.abspath(key_file)
# check, if key_file already exists
# if yes, then just read and return key
if os.path.exists(key_file):
key = read_from_file(key_file)
return key
# otherwise, first lock to make sure only one process
lock = lockutils.external_lock(key_file + ".lock",
lock_path=os.path.dirname(abspath))
with lock:
if not os.path.exists(key_file):
key = generate_key(key_length)
old_umask = os.umask(0o177) # Use '0600' file permissions
with open(key_file, 'w') as f:
f.write(key)
os.umask(old_umask)
else:
key = read_from_file(key_file)
return key | [
"def",
"generate_or_read_from_file",
"(",
"key_file",
"=",
"'.secret_key'",
",",
"key_length",
"=",
"64",
")",
":",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"key_file",
")",
"# check, if key_file already exists",
"# if yes, then just read and return key",... | Multiprocess-safe secret key file generator.
Useful to replace the default (and thus unsafe) SECRET_KEY in settings.py
upon first start. Save to use, i.e. when multiple Python interpreters
serve the dashboard Django application (e.g. in a mod_wsgi + daemonized
environment). Also checks if file permissions are set correctly and
throws an exception if not. | [
"Multiprocess",
"-",
"safe",
"secret",
"key",
"file",
"generator",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/utils/secret_key.py#L56-L84 | train | 215,872 |
openstack/horizon | openstack_dashboard/api/rest/urls.py | register | def register(view):
"""Register API views to respond to a regex pattern.
``url_regex`` on a wrapped view class is used as the regex pattern.
The view should be a standard Django class-based view implementing an
as_view() method. The url_regex attribute of the view should be a standard
Django URL regex pattern.
"""
p = urls.url(view.url_regex, view.as_view())
urlpatterns.append(p)
return view | python | def register(view):
"""Register API views to respond to a regex pattern.
``url_regex`` on a wrapped view class is used as the regex pattern.
The view should be a standard Django class-based view implementing an
as_view() method. The url_regex attribute of the view should be a standard
Django URL regex pattern.
"""
p = urls.url(view.url_regex, view.as_view())
urlpatterns.append(p)
return view | [
"def",
"register",
"(",
"view",
")",
":",
"p",
"=",
"urls",
".",
"url",
"(",
"view",
".",
"url_regex",
",",
"view",
".",
"as_view",
"(",
")",
")",
"urlpatterns",
".",
"append",
"(",
"p",
")",
"return",
"view"
] | Register API views to respond to a regex pattern.
``url_regex`` on a wrapped view class is used as the regex pattern.
The view should be a standard Django class-based view implementing an
as_view() method. The url_regex attribute of the view should be a standard
Django URL regex pattern. | [
"Register",
"API",
"views",
"to",
"respond",
"to",
"a",
"regex",
"pattern",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/rest/urls.py#L22-L32 | train | 215,873 |
openstack/horizon | horizon/templatetags/shellfilter.py | shellfilter | def shellfilter(value):
"""Replace HTML chars for shell usage."""
replacements = {'\\': '\\\\',
'`': '\\`',
"'": "\\'",
'"': '\\"'}
for search, repl in replacements.items():
value = value.replace(search, repl)
return safestring.mark_safe(value) | python | def shellfilter(value):
"""Replace HTML chars for shell usage."""
replacements = {'\\': '\\\\',
'`': '\\`',
"'": "\\'",
'"': '\\"'}
for search, repl in replacements.items():
value = value.replace(search, repl)
return safestring.mark_safe(value) | [
"def",
"shellfilter",
"(",
"value",
")",
":",
"replacements",
"=",
"{",
"'\\\\'",
":",
"'\\\\\\\\'",
",",
"'`'",
":",
"'\\\\`'",
",",
"\"'\"",
":",
"\"\\\\'\"",
",",
"'\"'",
":",
"'\\\\\"'",
"}",
"for",
"search",
",",
"repl",
"in",
"replacements",
".",
... | Replace HTML chars for shell usage. | [
"Replace",
"HTML",
"chars",
"for",
"shell",
"usage",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/templatetags/shellfilter.py#L22-L30 | train | 215,874 |
openstack/horizon | openstack_auth/views.py | websso | def websso(request):
"""Logs a user in using a token from Keystone's POST."""
referer = request.META.get('HTTP_REFERER', settings.OPENSTACK_KEYSTONE_URL)
auth_url = utils.clean_up_auth_url(referer)
token = request.POST.get('token')
try:
request.user = auth.authenticate(request=request, auth_url=auth_url,
token=token)
except exceptions.KeystoneAuthException as exc:
if utils.is_websso_default_redirect():
res = django_http.HttpResponseRedirect(settings.LOGIN_ERROR)
else:
msg = 'Login failed: %s' % six.text_type(exc)
res = django_http.HttpResponseRedirect(settings.LOGIN_URL)
res.set_cookie('logout_reason', msg, max_age=10)
return res
auth_user.set_session_from_user(request, request.user)
auth.login(request, request.user)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return django_http.HttpResponseRedirect(settings.LOGIN_REDIRECT_URL) | python | def websso(request):
"""Logs a user in using a token from Keystone's POST."""
referer = request.META.get('HTTP_REFERER', settings.OPENSTACK_KEYSTONE_URL)
auth_url = utils.clean_up_auth_url(referer)
token = request.POST.get('token')
try:
request.user = auth.authenticate(request=request, auth_url=auth_url,
token=token)
except exceptions.KeystoneAuthException as exc:
if utils.is_websso_default_redirect():
res = django_http.HttpResponseRedirect(settings.LOGIN_ERROR)
else:
msg = 'Login failed: %s' % six.text_type(exc)
res = django_http.HttpResponseRedirect(settings.LOGIN_URL)
res.set_cookie('logout_reason', msg, max_age=10)
return res
auth_user.set_session_from_user(request, request.user)
auth.login(request, request.user)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return django_http.HttpResponseRedirect(settings.LOGIN_REDIRECT_URL) | [
"def",
"websso",
"(",
"request",
")",
":",
"referer",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_REFERER'",
",",
"settings",
".",
"OPENSTACK_KEYSTONE_URL",
")",
"auth_url",
"=",
"utils",
".",
"clean_up_auth_url",
"(",
"referer",
")",
"token",
"=",
... | Logs a user in using a token from Keystone's POST. | [
"Logs",
"a",
"user",
"in",
"using",
"a",
"token",
"from",
"Keystone",
"s",
"POST",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/views.py#L159-L180 | train | 215,875 |
openstack/horizon | openstack_auth/views.py | logout | def logout(request, login_url=None, **kwargs):
"""Logs out the user if he is logged in. Then redirects to the log-in page.
:param login_url:
Once logged out, defines the URL where to redirect after login
:param kwargs:
see django.contrib.auth.views.logout_then_login extra parameters.
"""
msg = 'Logging out user "%(username)s".' % \
{'username': request.user.username}
LOG.info(msg)
""" Securely logs a user out. """
if (utils.is_websso_enabled and utils.is_websso_default_redirect() and
utils.get_websso_default_redirect_logout()):
auth_user.unset_session_user_variables(request)
return django_http.HttpResponseRedirect(
utils.get_websso_default_redirect_logout())
else:
return django_auth_views.logout_then_login(request,
login_url=login_url,
**kwargs) | python | def logout(request, login_url=None, **kwargs):
"""Logs out the user if he is logged in. Then redirects to the log-in page.
:param login_url:
Once logged out, defines the URL where to redirect after login
:param kwargs:
see django.contrib.auth.views.logout_then_login extra parameters.
"""
msg = 'Logging out user "%(username)s".' % \
{'username': request.user.username}
LOG.info(msg)
""" Securely logs a user out. """
if (utils.is_websso_enabled and utils.is_websso_default_redirect() and
utils.get_websso_default_redirect_logout()):
auth_user.unset_session_user_variables(request)
return django_http.HttpResponseRedirect(
utils.get_websso_default_redirect_logout())
else:
return django_auth_views.logout_then_login(request,
login_url=login_url,
**kwargs) | [
"def",
"logout",
"(",
"request",
",",
"login_url",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"'Logging out user \"%(username)s\".'",
"%",
"{",
"'username'",
":",
"request",
".",
"user",
".",
"username",
"}",
"LOG",
".",
"info",
"(",
"ms... | Logs out the user if he is logged in. Then redirects to the log-in page.
:param login_url:
Once logged out, defines the URL where to redirect after login
:param kwargs:
see django.contrib.auth.views.logout_then_login extra parameters. | [
"Logs",
"out",
"the",
"user",
"if",
"he",
"is",
"logged",
"in",
".",
"Then",
"redirects",
"to",
"the",
"log",
"-",
"in",
"page",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/views.py#L183-L206 | train | 215,876 |
openstack/horizon | openstack_auth/views.py | switch | def switch(request, tenant_id, redirect_field_name=auth.REDIRECT_FIELD_NAME):
"""Switches an authenticated user from one project to another."""
LOG.debug('Switching to tenant %s for user "%s".',
tenant_id, request.user.username)
endpoint, __ = utils.fix_auth_url_version_prefix(request.user.endpoint)
session = utils.get_session()
# Keystone can be configured to prevent exchanging a scoped token for
# another token. Always use the unscoped token for requesting a
# scoped token.
unscoped_token = request.user.unscoped_token
auth = utils.get_token_auth_plugin(auth_url=endpoint,
token=unscoped_token,
project_id=tenant_id)
try:
auth_ref = auth.get_access(session)
msg = 'Project switch successful for user "%(username)s".' % \
{'username': request.user.username}
LOG.info(msg)
except keystone_exceptions.ClientException:
msg = (
_('Project switch failed for user "%(username)s".') %
{'username': request.user.username})
messages.error(request, msg)
auth_ref = None
LOG.exception('An error occurred while switching sessions.')
# Ensure the user-originating redirection url is safe.
# Taken from django.contrib.auth.views.login()
redirect_to = request.GET.get(redirect_field_name, '')
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = settings.LOGIN_REDIRECT_URL
if auth_ref:
user = auth_user.create_user_from_token(
request,
auth_user.Token(auth_ref, unscoped_token=unscoped_token),
endpoint)
auth_user.set_session_from_user(request, user)
message = (
_('Switch to project "%(project_name)s" successful.') %
{'project_name': request.user.project_name})
messages.success(request, message)
response = shortcuts.redirect(redirect_to)
utils.set_response_cookie(response, 'recent_project',
request.user.project_id)
return response | python | def switch(request, tenant_id, redirect_field_name=auth.REDIRECT_FIELD_NAME):
"""Switches an authenticated user from one project to another."""
LOG.debug('Switching to tenant %s for user "%s".',
tenant_id, request.user.username)
endpoint, __ = utils.fix_auth_url_version_prefix(request.user.endpoint)
session = utils.get_session()
# Keystone can be configured to prevent exchanging a scoped token for
# another token. Always use the unscoped token for requesting a
# scoped token.
unscoped_token = request.user.unscoped_token
auth = utils.get_token_auth_plugin(auth_url=endpoint,
token=unscoped_token,
project_id=tenant_id)
try:
auth_ref = auth.get_access(session)
msg = 'Project switch successful for user "%(username)s".' % \
{'username': request.user.username}
LOG.info(msg)
except keystone_exceptions.ClientException:
msg = (
_('Project switch failed for user "%(username)s".') %
{'username': request.user.username})
messages.error(request, msg)
auth_ref = None
LOG.exception('An error occurred while switching sessions.')
# Ensure the user-originating redirection url is safe.
# Taken from django.contrib.auth.views.login()
redirect_to = request.GET.get(redirect_field_name, '')
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = settings.LOGIN_REDIRECT_URL
if auth_ref:
user = auth_user.create_user_from_token(
request,
auth_user.Token(auth_ref, unscoped_token=unscoped_token),
endpoint)
auth_user.set_session_from_user(request, user)
message = (
_('Switch to project "%(project_name)s" successful.') %
{'project_name': request.user.project_name})
messages.success(request, message)
response = shortcuts.redirect(redirect_to)
utils.set_response_cookie(response, 'recent_project',
request.user.project_id)
return response | [
"def",
"switch",
"(",
"request",
",",
"tenant_id",
",",
"redirect_field_name",
"=",
"auth",
".",
"REDIRECT_FIELD_NAME",
")",
":",
"LOG",
".",
"debug",
"(",
"'Switching to tenant %s for user \"%s\".'",
",",
"tenant_id",
",",
"request",
".",
"user",
".",
"username",... | Switches an authenticated user from one project to another. | [
"Switches",
"an",
"authenticated",
"user",
"from",
"one",
"project",
"to",
"another",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/views.py#L210-L257 | train | 215,877 |
openstack/horizon | openstack_auth/views.py | switch_region | def switch_region(request, region_name,
redirect_field_name=auth.REDIRECT_FIELD_NAME):
"""Switches the user's region for all services except Identity service.
The region will be switched if the given region is one of the regions
available for the scoped project. Otherwise the region is not switched.
"""
if region_name in request.user.available_services_regions:
request.session['services_region'] = region_name
LOG.debug('Switching services region to %s for user "%s".',
region_name, request.user.username)
redirect_to = request.GET.get(redirect_field_name, '')
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = settings.LOGIN_REDIRECT_URL
response = shortcuts.redirect(redirect_to)
utils.set_response_cookie(response, 'services_region',
request.session['services_region'])
return response | python | def switch_region(request, region_name,
redirect_field_name=auth.REDIRECT_FIELD_NAME):
"""Switches the user's region for all services except Identity service.
The region will be switched if the given region is one of the regions
available for the scoped project. Otherwise the region is not switched.
"""
if region_name in request.user.available_services_regions:
request.session['services_region'] = region_name
LOG.debug('Switching services region to %s for user "%s".',
region_name, request.user.username)
redirect_to = request.GET.get(redirect_field_name, '')
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = settings.LOGIN_REDIRECT_URL
response = shortcuts.redirect(redirect_to)
utils.set_response_cookie(response, 'services_region',
request.session['services_region'])
return response | [
"def",
"switch_region",
"(",
"request",
",",
"region_name",
",",
"redirect_field_name",
"=",
"auth",
".",
"REDIRECT_FIELD_NAME",
")",
":",
"if",
"region_name",
"in",
"request",
".",
"user",
".",
"available_services_regions",
":",
"request",
".",
"session",
"[",
... | Switches the user's region for all services except Identity service.
The region will be switched if the given region is one of the regions
available for the scoped project. Otherwise the region is not switched. | [
"Switches",
"the",
"user",
"s",
"region",
"for",
"all",
"services",
"except",
"Identity",
"service",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/views.py#L261-L280 | train | 215,878 |
openstack/horizon | openstack_auth/views.py | switch_keystone_provider | def switch_keystone_provider(request, keystone_provider=None,
redirect_field_name=auth.REDIRECT_FIELD_NAME):
"""Switches the user's keystone provider using K2K Federation
If keystone_provider is given then we switch the user to
the keystone provider using K2K federation. Otherwise if keystone_provider
is None then we switch the user back to the Identity Provider Keystone
which a non federated token auth will be used.
"""
base_token = request.session.get('k2k_base_unscoped_token', None)
k2k_auth_url = request.session.get('k2k_auth_url', None)
keystone_providers = request.session.get('keystone_providers', None)
recent_project = request.COOKIES.get('recent_project')
if not base_token or not k2k_auth_url:
msg = _('K2K Federation not setup for this session')
raise exceptions.KeystoneAuthException(msg)
redirect_to = request.GET.get(redirect_field_name, '')
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = settings.LOGIN_REDIRECT_URL
unscoped_auth_ref = None
keystone_idp_id = getattr(
settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone')
if keystone_provider == keystone_idp_id:
current_plugin = plugin.TokenPlugin()
unscoped_auth = current_plugin.get_plugin(auth_url=k2k_auth_url,
token=base_token)
else:
# Switch to service provider using K2K federation
plugins = [plugin.TokenPlugin()]
current_plugin = plugin.K2KAuthPlugin()
unscoped_auth = current_plugin.get_plugin(
auth_url=k2k_auth_url, service_provider=keystone_provider,
plugins=plugins, token=base_token, recent_project=recent_project)
try:
# Switch to identity provider using token auth
unscoped_auth_ref = current_plugin.get_access_info(unscoped_auth)
except exceptions.KeystoneAuthException as exc:
msg = 'Switching to Keystone Provider %s has failed. %s' \
% (keystone_provider, (six.text_type(exc)))
messages.error(request, msg)
if unscoped_auth_ref:
try:
request.user = auth.authenticate(
request=request, auth_url=unscoped_auth.auth_url,
token=unscoped_auth_ref.auth_token)
except exceptions.KeystoneAuthException as exc:
msg = 'Keystone provider switch failed: %s' % six.text_type(exc)
res = django_http.HttpResponseRedirect(settings.LOGIN_URL)
res.set_cookie('logout_reason', msg, max_age=10)
return res
auth.login(request, request.user)
auth_user.set_session_from_user(request, request.user)
request.session['keystone_provider_id'] = keystone_provider
request.session['keystone_providers'] = keystone_providers
request.session['k2k_base_unscoped_token'] = base_token
request.session['k2k_auth_url'] = k2k_auth_url
message = (
_('Switch to Keystone Provider "%(keystone_provider)s" '
'successful.') % {'keystone_provider': keystone_provider})
messages.success(request, message)
response = shortcuts.redirect(redirect_to)
return response | python | def switch_keystone_provider(request, keystone_provider=None,
redirect_field_name=auth.REDIRECT_FIELD_NAME):
"""Switches the user's keystone provider using K2K Federation
If keystone_provider is given then we switch the user to
the keystone provider using K2K federation. Otherwise if keystone_provider
is None then we switch the user back to the Identity Provider Keystone
which a non federated token auth will be used.
"""
base_token = request.session.get('k2k_base_unscoped_token', None)
k2k_auth_url = request.session.get('k2k_auth_url', None)
keystone_providers = request.session.get('keystone_providers', None)
recent_project = request.COOKIES.get('recent_project')
if not base_token or not k2k_auth_url:
msg = _('K2K Federation not setup for this session')
raise exceptions.KeystoneAuthException(msg)
redirect_to = request.GET.get(redirect_field_name, '')
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = settings.LOGIN_REDIRECT_URL
unscoped_auth_ref = None
keystone_idp_id = getattr(
settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone')
if keystone_provider == keystone_idp_id:
current_plugin = plugin.TokenPlugin()
unscoped_auth = current_plugin.get_plugin(auth_url=k2k_auth_url,
token=base_token)
else:
# Switch to service provider using K2K federation
plugins = [plugin.TokenPlugin()]
current_plugin = plugin.K2KAuthPlugin()
unscoped_auth = current_plugin.get_plugin(
auth_url=k2k_auth_url, service_provider=keystone_provider,
plugins=plugins, token=base_token, recent_project=recent_project)
try:
# Switch to identity provider using token auth
unscoped_auth_ref = current_plugin.get_access_info(unscoped_auth)
except exceptions.KeystoneAuthException as exc:
msg = 'Switching to Keystone Provider %s has failed. %s' \
% (keystone_provider, (six.text_type(exc)))
messages.error(request, msg)
if unscoped_auth_ref:
try:
request.user = auth.authenticate(
request=request, auth_url=unscoped_auth.auth_url,
token=unscoped_auth_ref.auth_token)
except exceptions.KeystoneAuthException as exc:
msg = 'Keystone provider switch failed: %s' % six.text_type(exc)
res = django_http.HttpResponseRedirect(settings.LOGIN_URL)
res.set_cookie('logout_reason', msg, max_age=10)
return res
auth.login(request, request.user)
auth_user.set_session_from_user(request, request.user)
request.session['keystone_provider_id'] = keystone_provider
request.session['keystone_providers'] = keystone_providers
request.session['k2k_base_unscoped_token'] = base_token
request.session['k2k_auth_url'] = k2k_auth_url
message = (
_('Switch to Keystone Provider "%(keystone_provider)s" '
'successful.') % {'keystone_provider': keystone_provider})
messages.success(request, message)
response = shortcuts.redirect(redirect_to)
return response | [
"def",
"switch_keystone_provider",
"(",
"request",
",",
"keystone_provider",
"=",
"None",
",",
"redirect_field_name",
"=",
"auth",
".",
"REDIRECT_FIELD_NAME",
")",
":",
"base_token",
"=",
"request",
".",
"session",
".",
"get",
"(",
"'k2k_base_unscoped_token'",
",",
... | Switches the user's keystone provider using K2K Federation
If keystone_provider is given then we switch the user to
the keystone provider using K2K federation. Otherwise if keystone_provider
is None then we switch the user back to the Identity Provider Keystone
which a non federated token auth will be used. | [
"Switches",
"the",
"user",
"s",
"keystone",
"provider",
"using",
"K2K",
"Federation"
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/views.py#L284-L353 | train | 215,879 |
openstack/horizon | horizon/tables/actions.py | LinkAction.get_link_url | def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=(obj_id,))
else:
return urls.reverse(self.url)
except urls.NoReverseMatch as ex:
LOG.info('No reverse found for "%(url)s": %(exception)s',
{'url': self.url, 'exception': ex})
return self.url | python | def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=(obj_id,))
else:
return urls.reverse(self.url)
except urls.NoReverseMatch as ex:
LOG.info('No reverse found for "%(url)s": %(exception)s',
{'url': self.url, 'exception': ex})
return self.url | [
"def",
"get_link_url",
"(",
"self",
",",
"datum",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"url",
":",
"raise",
"NotImplementedError",
"(",
"'A LinkAction class must have a '",
"'url attribute or define its own '",
"'get_link_url method.'",
")",
"if",
"callabl... | Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter. | [
"Returns",
"the",
"final",
"URL",
"based",
"on",
"the",
"value",
"of",
"url",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L388-L413 | train | 215,880 |
openstack/horizon | horizon/tables/actions.py | FilterAction.get_param_name | def get_param_name(self):
"""Returns the full query parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``.
"""
return "__".join([self.table.name, self.name, self.param_name]) | python | def get_param_name(self):
"""Returns the full query parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``.
"""
return "__".join([self.table.name, self.name, self.param_name]) | [
"def",
"get_param_name",
"(",
"self",
")",
":",
"return",
"\"__\"",
".",
"join",
"(",
"[",
"self",
".",
"table",
".",
"name",
",",
"self",
".",
"name",
",",
"self",
".",
"param_name",
"]",
")"
] | Returns the full query parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``. | [
"Returns",
"the",
"full",
"query",
"parameter",
"name",
"for",
"this",
"action",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L492-L498 | train | 215,881 |
openstack/horizon | horizon/tables/actions.py | FilterAction.is_api_filter | def is_api_filter(self, filter_field):
"""Determine if agiven filter field should be used as an API filter."""
if self.filter_type == 'server':
for choice in self.filter_choices:
if (choice[0] == filter_field and len(choice) > 2 and
choice[2]):
return True
return False | python | def is_api_filter(self, filter_field):
"""Determine if agiven filter field should be used as an API filter."""
if self.filter_type == 'server':
for choice in self.filter_choices:
if (choice[0] == filter_field and len(choice) > 2 and
choice[2]):
return True
return False | [
"def",
"is_api_filter",
"(",
"self",
",",
"filter_field",
")",
":",
"if",
"self",
".",
"filter_type",
"==",
"'server'",
":",
"for",
"choice",
"in",
"self",
".",
"filter_choices",
":",
"if",
"(",
"choice",
"[",
"0",
"]",
"==",
"filter_field",
"and",
"len"... | Determine if agiven filter field should be used as an API filter. | [
"Determine",
"if",
"agiven",
"filter",
"field",
"should",
"be",
"used",
"as",
"an",
"API",
"filter",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L534-L541 | train | 215,882 |
openstack/horizon | horizon/tables/actions.py | FilterAction.get_select_options | def get_select_options(self):
"""Provide the value, string, and help_text for the template to render.
help_text is returned if applicable.
"""
if self.filter_choices:
return [choice[:4] for choice in self.filter_choices
# Display it If the fifth element is True or does not exist
if len(choice) < 5 or choice[4]] | python | def get_select_options(self):
"""Provide the value, string, and help_text for the template to render.
help_text is returned if applicable.
"""
if self.filter_choices:
return [choice[:4] for choice in self.filter_choices
# Display it If the fifth element is True or does not exist
if len(choice) < 5 or choice[4]] | [
"def",
"get_select_options",
"(",
"self",
")",
":",
"if",
"self",
".",
"filter_choices",
":",
"return",
"[",
"choice",
"[",
":",
"4",
"]",
"for",
"choice",
"in",
"self",
".",
"filter_choices",
"# Display it If the fifth element is True or does not exist",
"if",
"l... | Provide the value, string, and help_text for the template to render.
help_text is returned if applicable. | [
"Provide",
"the",
"value",
"string",
"and",
"help_text",
"for",
"the",
"template",
"to",
"render",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L543-L551 | train | 215,883 |
openstack/horizon | horizon/tables/actions.py | BatchAction._get_action_name | def _get_action_name(self, items=None, past=False):
"""Retreive action name based on the number of items and `past` flag.
:param items:
A list or tuple of items (or container with a __len__ method) to
count the number of concerned items for which this method is
called.
When this method is called for a single item (by the BatchAction
itself), this parameter can be omitted and the number of items
will be considered as "one".
If we want to evaluate to "zero" this parameter must not be omitted
(and should be an empty container).
:param past:
Boolean flag indicating if the action took place in the past.
By default a present action is considered.
"""
action_type = "past" if past else "present"
if items is None:
# Called without items parameter (by a single instance.)
count = 1
else:
count = len(items)
action_attr = getattr(self, "action_%s" % action_type)(count)
if isinstance(action_attr, (six.string_types, Promise)):
action = action_attr
else:
toggle_selection = getattr(self, "current_%s_action" % action_type)
action = action_attr[toggle_selection]
return action | python | def _get_action_name(self, items=None, past=False):
"""Retreive action name based on the number of items and `past` flag.
:param items:
A list or tuple of items (or container with a __len__ method) to
count the number of concerned items for which this method is
called.
When this method is called for a single item (by the BatchAction
itself), this parameter can be omitted and the number of items
will be considered as "one".
If we want to evaluate to "zero" this parameter must not be omitted
(and should be an empty container).
:param past:
Boolean flag indicating if the action took place in the past.
By default a present action is considered.
"""
action_type = "past" if past else "present"
if items is None:
# Called without items parameter (by a single instance.)
count = 1
else:
count = len(items)
action_attr = getattr(self, "action_%s" % action_type)(count)
if isinstance(action_attr, (six.string_types, Promise)):
action = action_attr
else:
toggle_selection = getattr(self, "current_%s_action" % action_type)
action = action_attr[toggle_selection]
return action | [
"def",
"_get_action_name",
"(",
"self",
",",
"items",
"=",
"None",
",",
"past",
"=",
"False",
")",
":",
"action_type",
"=",
"\"past\"",
"if",
"past",
"else",
"\"present\"",
"if",
"items",
"is",
"None",
":",
"# Called without items parameter (by a single instance.)... | Retreive action name based on the number of items and `past` flag.
:param items:
A list or tuple of items (or container with a __len__ method) to
count the number of concerned items for which this method is
called.
When this method is called for a single item (by the BatchAction
itself), this parameter can be omitted and the number of items
will be considered as "one".
If we want to evaluate to "zero" this parameter must not be omitted
(and should be an empty container).
:param past:
Boolean flag indicating if the action took place in the past.
By default a present action is considered. | [
"Retreive",
"action",
"name",
"based",
"on",
"the",
"number",
"of",
"items",
"and",
"past",
"flag",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L697-L730 | train | 215,884 |
openstack/horizon | horizon/tables/actions.py | BatchAction.update | def update(self, request, datum):
"""Switches the action verbose name, if needed."""
if getattr(self, 'action_present', False):
self.verbose_name = self._get_action_name()
self.verbose_name_plural = self._get_action_name('plural') | python | def update(self, request, datum):
"""Switches the action verbose name, if needed."""
if getattr(self, 'action_present', False):
self.verbose_name = self._get_action_name()
self.verbose_name_plural = self._get_action_name('plural') | [
"def",
"update",
"(",
"self",
",",
"request",
",",
"datum",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'action_present'",
",",
"False",
")",
":",
"self",
".",
"verbose_name",
"=",
"self",
".",
"_get_action_name",
"(",
")",
"self",
".",
"verbose_name_p... | Switches the action verbose name, if needed. | [
"Switches",
"the",
"action",
"verbose",
"name",
"if",
"needed",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L740-L744 | train | 215,885 |
openstack/horizon | horizon/tables/actions.py | BatchAction.get_default_attrs | def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action."""
attrs = super(BatchAction, self).get_default_attrs()
attrs.update({'data-batch-action': 'true'})
return attrs | python | def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action."""
attrs = super(BatchAction, self).get_default_attrs()
attrs.update({'data-batch-action': 'true'})
return attrs | [
"def",
"get_default_attrs",
"(",
"self",
")",
":",
"attrs",
"=",
"super",
"(",
"BatchAction",
",",
"self",
")",
".",
"get_default_attrs",
"(",
")",
"attrs",
".",
"update",
"(",
"{",
"'data-batch-action'",
":",
"'true'",
"}",
")",
"return",
"attrs"
] | Returns a list of the default HTML attributes for the action. | [
"Returns",
"a",
"list",
"of",
"the",
"default",
"HTML",
"attributes",
"for",
"the",
"action",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L752-L756 | train | 215,886 |
openstack/horizon | horizon/tabs/views.py | TabView.get_tabs | def get_tabs(self, request, **kwargs):
"""Returns the initialized tab group for this view."""
if self._tab_group is None:
self._tab_group = self.tab_group_class(request, **kwargs)
return self._tab_group | python | def get_tabs(self, request, **kwargs):
"""Returns the initialized tab group for this view."""
if self._tab_group is None:
self._tab_group = self.tab_group_class(request, **kwargs)
return self._tab_group | [
"def",
"get_tabs",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_tab_group",
"is",
"None",
":",
"self",
".",
"_tab_group",
"=",
"self",
".",
"tab_group_class",
"(",
"request",
",",
"*",
"*",
"kwargs",
")",
"retu... | Returns the initialized tab group for this view. | [
"Returns",
"the",
"initialized",
"tab",
"group",
"for",
"this",
"view",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/views.py#L40-L44 | train | 215,887 |
openstack/horizon | horizon/tabs/views.py | TabView.get_context_data | def get_context_data(self, **kwargs):
"""Adds the ``tab_group`` variable to the context data."""
context = super(TabView, self).get_context_data(**kwargs)
try:
tab_group = self.get_tabs(self.request, **kwargs)
context["tab_group"] = tab_group
# Make sure our data is pre-loaded to capture errors.
context["tab_group"].load_tab_data()
except Exception:
exceptions.handle(self.request)
return context | python | def get_context_data(self, **kwargs):
"""Adds the ``tab_group`` variable to the context data."""
context = super(TabView, self).get_context_data(**kwargs)
try:
tab_group = self.get_tabs(self.request, **kwargs)
context["tab_group"] = tab_group
# Make sure our data is pre-loaded to capture errors.
context["tab_group"].load_tab_data()
except Exception:
exceptions.handle(self.request)
return context | [
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"super",
"(",
"TabView",
",",
"self",
")",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")",
"try",
":",
"tab_group",
"=",
"self",
".",
"get_tabs",
"(",
"... | Adds the ``tab_group`` variable to the context data. | [
"Adds",
"the",
"tab_group",
"variable",
"to",
"the",
"context",
"data",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/views.py#L46-L56 | train | 215,888 |
openstack/horizon | horizon/tabs/views.py | TabView.handle_tabbed_response | def handle_tabbed_response(self, tab_group, context):
"""Sends back an AJAX-appropriate response for the tab group if needed.
Otherwise renders the response as normal.
"""
if self.request.is_ajax():
if tab_group.selected:
return http.HttpResponse(tab_group.selected.render())
else:
return http.HttpResponse(tab_group.render())
return self.render_to_response(context) | python | def handle_tabbed_response(self, tab_group, context):
"""Sends back an AJAX-appropriate response for the tab group if needed.
Otherwise renders the response as normal.
"""
if self.request.is_ajax():
if tab_group.selected:
return http.HttpResponse(tab_group.selected.render())
else:
return http.HttpResponse(tab_group.render())
return self.render_to_response(context) | [
"def",
"handle_tabbed_response",
"(",
"self",
",",
"tab_group",
",",
"context",
")",
":",
"if",
"self",
".",
"request",
".",
"is_ajax",
"(",
")",
":",
"if",
"tab_group",
".",
"selected",
":",
"return",
"http",
".",
"HttpResponse",
"(",
"tab_group",
".",
... | Sends back an AJAX-appropriate response for the tab group if needed.
Otherwise renders the response as normal. | [
"Sends",
"back",
"an",
"AJAX",
"-",
"appropriate",
"response",
"for",
"the",
"tab",
"group",
"if",
"needed",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/views.py#L58-L68 | train | 215,889 |
openstack/horizon | horizon/tabs/views.py | TabbedTableView.load_tabs | def load_tabs(self):
"""Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions.
"""
tab_group = self.get_tabs(self.request, **self.kwargs)
tabs = tab_group.get_tabs()
for tab in [t for t in tabs if issubclass(t.__class__, TableTab)]:
self.table_classes.extend(tab.table_classes)
for table in tab._tables.values():
self._table_dict[table._meta.name] = {'table': table,
'tab': tab} | python | def load_tabs(self):
"""Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions.
"""
tab_group = self.get_tabs(self.request, **self.kwargs)
tabs = tab_group.get_tabs()
for tab in [t for t in tabs if issubclass(t.__class__, TableTab)]:
self.table_classes.extend(tab.table_classes)
for table in tab._tables.values():
self._table_dict[table._meta.name] = {'table': table,
'tab': tab} | [
"def",
"load_tabs",
"(",
"self",
")",
":",
"tab_group",
"=",
"self",
".",
"get_tabs",
"(",
"self",
".",
"request",
",",
"*",
"*",
"self",
".",
"kwargs",
")",
"tabs",
"=",
"tab_group",
".",
"get_tabs",
"(",
")",
"for",
"tab",
"in",
"[",
"t",
"for",
... | Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions. | [
"Loads",
"the",
"tab",
"group",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/views.py#L81-L94 | train | 215,890 |
openstack/horizon | horizon/tabs/views.py | TabbedTableView.handle_table | def handle_table(self, table_dict):
"""Loads the table data based on a given table_dict and handles them.
For the given dict containing a ``DataTable`` and a ``TableTab``
instance, it loads the table data for that tab and calls the
table's :meth:`~horizon.tables.DataTable.maybe_handle` method.
The return value will be the result of ``maybe_handle``.
"""
table = table_dict['table']
tab = table_dict['tab']
tab.load_table_data()
table_name = table._meta.name
tab._tables[table_name]._meta.has_prev_data = self.has_prev_data(table)
tab._tables[table_name]._meta.has_more_data = self.has_more_data(table)
handled = tab._tables[table_name].maybe_handle()
return handled | python | def handle_table(self, table_dict):
"""Loads the table data based on a given table_dict and handles them.
For the given dict containing a ``DataTable`` and a ``TableTab``
instance, it loads the table data for that tab and calls the
table's :meth:`~horizon.tables.DataTable.maybe_handle` method.
The return value will be the result of ``maybe_handle``.
"""
table = table_dict['table']
tab = table_dict['tab']
tab.load_table_data()
table_name = table._meta.name
tab._tables[table_name]._meta.has_prev_data = self.has_prev_data(table)
tab._tables[table_name]._meta.has_more_data = self.has_more_data(table)
handled = tab._tables[table_name].maybe_handle()
return handled | [
"def",
"handle_table",
"(",
"self",
",",
"table_dict",
")",
":",
"table",
"=",
"table_dict",
"[",
"'table'",
"]",
"tab",
"=",
"table_dict",
"[",
"'tab'",
"]",
"tab",
".",
"load_table_data",
"(",
")",
"table_name",
"=",
"table",
".",
"_meta",
".",
"name",... | Loads the table data based on a given table_dict and handles them.
For the given dict containing a ``DataTable`` and a ``TableTab``
instance, it loads the table data for that tab and calls the
table's :meth:`~horizon.tables.DataTable.maybe_handle` method.
The return value will be the result of ``maybe_handle``. | [
"Loads",
"the",
"table",
"data",
"based",
"on",
"a",
"given",
"table_dict",
"and",
"handles",
"them",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/views.py#L102-L117 | train | 215,891 |
openstack/horizon | openstack_dashboard/context_processors.py | openstack | def openstack(request):
"""Context processor necessary for OpenStack Dashboard functionality.
The following variables are added to the request context:
``authorized_tenants``
A list of tenant objects which the current user has access to.
``regions``
A dictionary containing information about region support, the current
region, and available regions.
"""
context = {}
# Auth/Keystone context
context.setdefault('authorized_tenants', [])
if request.user.is_authenticated:
context['authorized_tenants'] = [
tenant for tenant in
request.user.authorized_tenants if tenant.enabled]
# Region context/support
available_regions = getattr(settings, 'AVAILABLE_REGIONS', [])
regions = {'support': len(available_regions) > 1,
'current': {'endpoint': request.session.get('region_endpoint'),
'name': request.session.get('region_name')},
'available': [{'endpoint': region[0], 'name':region[1]} for
region in available_regions]}
# K2K Federation Service Providers context/support
available_providers = request.session.get('keystone_providers', [])
if available_providers:
provider_id = request.session.get('keystone_provider_id', None)
provider_name = None
for provider in available_providers:
if provider['id'] == provider_id:
provider_name = provider.get('name')
keystone_providers = {
'support': len(available_providers) > 1,
'current': {
'name': provider_name,
'id': provider_id
},
'available': [
{'name': keystone_provider['name'],
'id': keystone_provider['id']}
for keystone_provider in available_providers]
}
else:
keystone_providers = {'support': False}
context['keystone_providers'] = keystone_providers
context['regions'] = regions
# Adding webroot access
context['WEBROOT'] = getattr(settings, "WEBROOT", "/")
user_menu_links = getattr(settings, "USER_MENU_LINKS", [])
if not getattr(settings, "SHOW_KEYSTONE_V2_RC", False):
user_menu_links = [
link for link in user_menu_links
if link['url'] != 'horizon:project:api_access:openrcv2']
context['USER_MENU_LINKS'] = user_menu_links
# Adding profiler support flag
profiler_settings = getattr(settings, 'OPENSTACK_PROFILER', {})
profiler_enabled = profiler_settings.get('enabled', False)
context['profiler_enabled'] = profiler_enabled
if profiler_enabled and 'profile_page' in request.COOKIES:
index_view_id = request.META.get(profiler.ROOT_HEADER, '')
hmac_keys = profiler_settings.get('keys', [])
context['x_trace_info'] = profiler.update_trace_headers(
hmac_keys, parent_id=index_view_id)
context['JS_CATALOG'] = get_js_catalog(conf)
return context | python | def openstack(request):
"""Context processor necessary for OpenStack Dashboard functionality.
The following variables are added to the request context:
``authorized_tenants``
A list of tenant objects which the current user has access to.
``regions``
A dictionary containing information about region support, the current
region, and available regions.
"""
context = {}
# Auth/Keystone context
context.setdefault('authorized_tenants', [])
if request.user.is_authenticated:
context['authorized_tenants'] = [
tenant for tenant in
request.user.authorized_tenants if tenant.enabled]
# Region context/support
available_regions = getattr(settings, 'AVAILABLE_REGIONS', [])
regions = {'support': len(available_regions) > 1,
'current': {'endpoint': request.session.get('region_endpoint'),
'name': request.session.get('region_name')},
'available': [{'endpoint': region[0], 'name':region[1]} for
region in available_regions]}
# K2K Federation Service Providers context/support
available_providers = request.session.get('keystone_providers', [])
if available_providers:
provider_id = request.session.get('keystone_provider_id', None)
provider_name = None
for provider in available_providers:
if provider['id'] == provider_id:
provider_name = provider.get('name')
keystone_providers = {
'support': len(available_providers) > 1,
'current': {
'name': provider_name,
'id': provider_id
},
'available': [
{'name': keystone_provider['name'],
'id': keystone_provider['id']}
for keystone_provider in available_providers]
}
else:
keystone_providers = {'support': False}
context['keystone_providers'] = keystone_providers
context['regions'] = regions
# Adding webroot access
context['WEBROOT'] = getattr(settings, "WEBROOT", "/")
user_menu_links = getattr(settings, "USER_MENU_LINKS", [])
if not getattr(settings, "SHOW_KEYSTONE_V2_RC", False):
user_menu_links = [
link for link in user_menu_links
if link['url'] != 'horizon:project:api_access:openrcv2']
context['USER_MENU_LINKS'] = user_menu_links
# Adding profiler support flag
profiler_settings = getattr(settings, 'OPENSTACK_PROFILER', {})
profiler_enabled = profiler_settings.get('enabled', False)
context['profiler_enabled'] = profiler_enabled
if profiler_enabled and 'profile_page' in request.COOKIES:
index_view_id = request.META.get(profiler.ROOT_HEADER, '')
hmac_keys = profiler_settings.get('keys', [])
context['x_trace_info'] = profiler.update_trace_headers(
hmac_keys, parent_id=index_view_id)
context['JS_CATALOG'] = get_js_catalog(conf)
return context | [
"def",
"openstack",
"(",
"request",
")",
":",
"context",
"=",
"{",
"}",
"# Auth/Keystone context",
"context",
".",
"setdefault",
"(",
"'authorized_tenants'",
",",
"[",
"]",
")",
"if",
"request",
".",
"user",
".",
"is_authenticated",
":",
"context",
"[",
"'au... | Context processor necessary for OpenStack Dashboard functionality.
The following variables are added to the request context:
``authorized_tenants``
A list of tenant objects which the current user has access to.
``regions``
A dictionary containing information about region support, the current
region, and available regions. | [
"Context",
"processor",
"necessary",
"for",
"OpenStack",
"Dashboard",
"functionality",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/context_processors.py#L30-L110 | train | 215,892 |
openstack/horizon | openstack_auth/utils.py | is_token_valid | def is_token_valid(token, margin=None):
"""Timezone-aware checking of the auth token's expiration timestamp.
Returns ``True`` if the token has not yet expired, otherwise ``False``.
:param token: The openstack_auth.user.Token instance to check
:param margin:
A time margin in seconds to subtract from the real token's validity.
An example usage is that the token can be valid once the middleware
passed, and invalid (timed-out) during a view rendering and this
generates authorization errors during the view rendering.
A default margin can be set by the TOKEN_TIMEOUT_MARGIN in the
django settings.
"""
expiration = token.expires
# In case we get an unparseable expiration timestamp, return False
# so you can't have a "forever" token just by breaking the expires param.
if expiration is None:
return False
if margin is None:
margin = getattr(settings, 'TOKEN_TIMEOUT_MARGIN', 0)
expiration = expiration - datetime.timedelta(seconds=margin)
if settings.USE_TZ and timezone.is_naive(expiration):
# Presumes that the Keystone is using UTC.
expiration = timezone.make_aware(expiration, timezone.utc)
return expiration > timezone.now() | python | def is_token_valid(token, margin=None):
"""Timezone-aware checking of the auth token's expiration timestamp.
Returns ``True`` if the token has not yet expired, otherwise ``False``.
:param token: The openstack_auth.user.Token instance to check
:param margin:
A time margin in seconds to subtract from the real token's validity.
An example usage is that the token can be valid once the middleware
passed, and invalid (timed-out) during a view rendering and this
generates authorization errors during the view rendering.
A default margin can be set by the TOKEN_TIMEOUT_MARGIN in the
django settings.
"""
expiration = token.expires
# In case we get an unparseable expiration timestamp, return False
# so you can't have a "forever" token just by breaking the expires param.
if expiration is None:
return False
if margin is None:
margin = getattr(settings, 'TOKEN_TIMEOUT_MARGIN', 0)
expiration = expiration - datetime.timedelta(seconds=margin)
if settings.USE_TZ and timezone.is_naive(expiration):
# Presumes that the Keystone is using UTC.
expiration = timezone.make_aware(expiration, timezone.utc)
return expiration > timezone.now() | [
"def",
"is_token_valid",
"(",
"token",
",",
"margin",
"=",
"None",
")",
":",
"expiration",
"=",
"token",
".",
"expires",
"# In case we get an unparseable expiration timestamp, return False",
"# so you can't have a \"forever\" token just by breaking the expires param.",
"if",
"exp... | Timezone-aware checking of the auth token's expiration timestamp.
Returns ``True`` if the token has not yet expired, otherwise ``False``.
:param token: The openstack_auth.user.Token instance to check
:param margin:
A time margin in seconds to subtract from the real token's validity.
An example usage is that the token can be valid once the middleware
passed, and invalid (timed-out) during a view rendering and this
generates authorization errors during the view rendering.
A default margin can be set by the TOKEN_TIMEOUT_MARGIN in the
django settings. | [
"Timezone",
"-",
"aware",
"checking",
"of",
"the",
"auth",
"token",
"s",
"expiration",
"timestamp",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L75-L101 | train | 215,893 |
openstack/horizon | openstack_auth/utils.py | is_safe_url | def is_safe_url(url, host=None):
"""Return ``True`` if the url is a safe redirection.
The safe redirection means that it doesn't point to a different host.
Always returns ``False`` on an empty url.
"""
if not url:
return False
netloc = urlparse.urlparse(url)[1]
return not netloc or netloc == host | python | def is_safe_url(url, host=None):
"""Return ``True`` if the url is a safe redirection.
The safe redirection means that it doesn't point to a different host.
Always returns ``False`` on an empty url.
"""
if not url:
return False
netloc = urlparse.urlparse(url)[1]
return not netloc or netloc == host | [
"def",
"is_safe_url",
"(",
"url",
",",
"host",
"=",
"None",
")",
":",
"if",
"not",
"url",
":",
"return",
"False",
"netloc",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"[",
"1",
"]",
"return",
"not",
"netloc",
"or",
"netloc",
"==",
"host"
] | Return ``True`` if the url is a safe redirection.
The safe redirection means that it doesn't point to a different host.
Always returns ``False`` on an empty url. | [
"Return",
"True",
"if",
"the",
"url",
"is",
"a",
"safe",
"redirection",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L107-L116 | train | 215,894 |
openstack/horizon | openstack_auth/utils.py | build_absolute_uri | def build_absolute_uri(request, relative_url):
"""Ensure absolute_uri are relative to WEBROOT."""
webroot = getattr(settings, 'WEBROOT', '')
if webroot.endswith("/") and relative_url.startswith("/"):
webroot = webroot[:-1]
return request.build_absolute_uri(webroot + relative_url) | python | def build_absolute_uri(request, relative_url):
"""Ensure absolute_uri are relative to WEBROOT."""
webroot = getattr(settings, 'WEBROOT', '')
if webroot.endswith("/") and relative_url.startswith("/"):
webroot = webroot[:-1]
return request.build_absolute_uri(webroot + relative_url) | [
"def",
"build_absolute_uri",
"(",
"request",
",",
"relative_url",
")",
":",
"webroot",
"=",
"getattr",
"(",
"settings",
",",
"'WEBROOT'",
",",
"''",
")",
"if",
"webroot",
".",
"endswith",
"(",
"\"/\"",
")",
"and",
"relative_url",
".",
"startswith",
"(",
"\... | Ensure absolute_uri are relative to WEBROOT. | [
"Ensure",
"absolute_uri",
"are",
"relative",
"to",
"WEBROOT",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L173-L179 | train | 215,895 |
openstack/horizon | openstack_auth/utils.py | get_websso_url | def get_websso_url(request, auth_url, websso_auth):
"""Return the keystone endpoint for initiating WebSSO.
Generate the keystone WebSSO endpoint that will redirect the user
to the login page of the federated identity provider.
Based on the authentication type selected by the user in the login
form, it will construct the keystone WebSSO endpoint.
:param request: Django http request object.
:type request: django.http.HttpRequest
:param auth_url: Keystone endpoint configured in the horizon setting.
If WEBSSO_KEYSTONE_URL is defined, its value will be
used. Otherwise, the value is derived from:
- OPENSTACK_KEYSTONE_URL
- AVAILABLE_REGIONS
:type auth_url: string
:param websso_auth: Authentication type selected by the user from the
login form. The value is derived from the horizon
setting WEBSSO_CHOICES.
:type websso_auth: string
Example of horizon WebSSO setting::
WEBSSO_CHOICES = (
("credentials", "Keystone Credentials"),
("oidc", "OpenID Connect"),
("saml2", "Security Assertion Markup Language"),
("acme_oidc", "ACME - OpenID Connect"),
("acme_saml2", "ACME - SAML2")
)
WEBSSO_IDP_MAPPING = {
"acme_oidc": ("acme", "oidc"),
"acme_saml2": ("acme", "saml2")
}
}
The value of websso_auth will be looked up in the WEBSSO_IDP_MAPPING
dictionary, if a match is found it will return a IdP specific WebSSO
endpoint using the values found in the mapping.
The value in WEBSSO_IDP_MAPPING is expected to be a tuple formatted as
(<idp_id>, <protocol_id>). Using the values found, a IdP/protocol
specific URL will be constructed::
/auth/OS-FEDERATION/identity_providers/<idp_id>
/protocols/<protocol_id>/websso
If no value is found from the WEBSSO_IDP_MAPPING dictionary, it will
treat the value as the global WebSSO protocol <protocol_id> and
construct the WebSSO URL by::
/auth/OS-FEDERATION/websso/<protocol_id>
:returns: Keystone WebSSO endpoint.
:rtype: string
"""
origin = build_absolute_uri(request, '/auth/websso/')
idp_mapping = getattr(settings, 'WEBSSO_IDP_MAPPING', {})
idp_id, protocol_id = idp_mapping.get(websso_auth,
(None, websso_auth))
if idp_id:
# Use the IDP specific WebSSO endpoint
url = ('%s/auth/OS-FEDERATION/identity_providers/%s'
'/protocols/%s/websso?origin=%s' %
(auth_url, idp_id, protocol_id, origin))
else:
# If no IDP mapping found for the identifier,
# perform WebSSO by protocol.
url = ('%s/auth/OS-FEDERATION/websso/%s?origin=%s' %
(auth_url, protocol_id, origin))
return url | python | def get_websso_url(request, auth_url, websso_auth):
"""Return the keystone endpoint for initiating WebSSO.
Generate the keystone WebSSO endpoint that will redirect the user
to the login page of the federated identity provider.
Based on the authentication type selected by the user in the login
form, it will construct the keystone WebSSO endpoint.
:param request: Django http request object.
:type request: django.http.HttpRequest
:param auth_url: Keystone endpoint configured in the horizon setting.
If WEBSSO_KEYSTONE_URL is defined, its value will be
used. Otherwise, the value is derived from:
- OPENSTACK_KEYSTONE_URL
- AVAILABLE_REGIONS
:type auth_url: string
:param websso_auth: Authentication type selected by the user from the
login form. The value is derived from the horizon
setting WEBSSO_CHOICES.
:type websso_auth: string
Example of horizon WebSSO setting::
WEBSSO_CHOICES = (
("credentials", "Keystone Credentials"),
("oidc", "OpenID Connect"),
("saml2", "Security Assertion Markup Language"),
("acme_oidc", "ACME - OpenID Connect"),
("acme_saml2", "ACME - SAML2")
)
WEBSSO_IDP_MAPPING = {
"acme_oidc": ("acme", "oidc"),
"acme_saml2": ("acme", "saml2")
}
}
The value of websso_auth will be looked up in the WEBSSO_IDP_MAPPING
dictionary, if a match is found it will return a IdP specific WebSSO
endpoint using the values found in the mapping.
The value in WEBSSO_IDP_MAPPING is expected to be a tuple formatted as
(<idp_id>, <protocol_id>). Using the values found, a IdP/protocol
specific URL will be constructed::
/auth/OS-FEDERATION/identity_providers/<idp_id>
/protocols/<protocol_id>/websso
If no value is found from the WEBSSO_IDP_MAPPING dictionary, it will
treat the value as the global WebSSO protocol <protocol_id> and
construct the WebSSO URL by::
/auth/OS-FEDERATION/websso/<protocol_id>
:returns: Keystone WebSSO endpoint.
:rtype: string
"""
origin = build_absolute_uri(request, '/auth/websso/')
idp_mapping = getattr(settings, 'WEBSSO_IDP_MAPPING', {})
idp_id, protocol_id = idp_mapping.get(websso_auth,
(None, websso_auth))
if idp_id:
# Use the IDP specific WebSSO endpoint
url = ('%s/auth/OS-FEDERATION/identity_providers/%s'
'/protocols/%s/websso?origin=%s' %
(auth_url, idp_id, protocol_id, origin))
else:
# If no IDP mapping found for the identifier,
# perform WebSSO by protocol.
url = ('%s/auth/OS-FEDERATION/websso/%s?origin=%s' %
(auth_url, protocol_id, origin))
return url | [
"def",
"get_websso_url",
"(",
"request",
",",
"auth_url",
",",
"websso_auth",
")",
":",
"origin",
"=",
"build_absolute_uri",
"(",
"request",
",",
"'/auth/websso/'",
")",
"idp_mapping",
"=",
"getattr",
"(",
"settings",
",",
"'WEBSSO_IDP_MAPPING'",
",",
"{",
"}",
... | Return the keystone endpoint for initiating WebSSO.
Generate the keystone WebSSO endpoint that will redirect the user
to the login page of the federated identity provider.
Based on the authentication type selected by the user in the login
form, it will construct the keystone WebSSO endpoint.
:param request: Django http request object.
:type request: django.http.HttpRequest
:param auth_url: Keystone endpoint configured in the horizon setting.
If WEBSSO_KEYSTONE_URL is defined, its value will be
used. Otherwise, the value is derived from:
- OPENSTACK_KEYSTONE_URL
- AVAILABLE_REGIONS
:type auth_url: string
:param websso_auth: Authentication type selected by the user from the
login form. The value is derived from the horizon
setting WEBSSO_CHOICES.
:type websso_auth: string
Example of horizon WebSSO setting::
WEBSSO_CHOICES = (
("credentials", "Keystone Credentials"),
("oidc", "OpenID Connect"),
("saml2", "Security Assertion Markup Language"),
("acme_oidc", "ACME - OpenID Connect"),
("acme_saml2", "ACME - SAML2")
)
WEBSSO_IDP_MAPPING = {
"acme_oidc": ("acme", "oidc"),
"acme_saml2": ("acme", "saml2")
}
}
The value of websso_auth will be looked up in the WEBSSO_IDP_MAPPING
dictionary, if a match is found it will return a IdP specific WebSSO
endpoint using the values found in the mapping.
The value in WEBSSO_IDP_MAPPING is expected to be a tuple formatted as
(<idp_id>, <protocol_id>). Using the values found, a IdP/protocol
specific URL will be constructed::
/auth/OS-FEDERATION/identity_providers/<idp_id>
/protocols/<protocol_id>/websso
If no value is found from the WEBSSO_IDP_MAPPING dictionary, it will
treat the value as the global WebSSO protocol <protocol_id> and
construct the WebSSO URL by::
/auth/OS-FEDERATION/websso/<protocol_id>
:returns: Keystone WebSSO endpoint.
:rtype: string | [
"Return",
"the",
"keystone",
"endpoint",
"for",
"initiating",
"WebSSO",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L182-L257 | train | 215,896 |
openstack/horizon | openstack_auth/utils.py | has_in_url_path | def has_in_url_path(url, subs):
"""Test if any of `subs` strings is present in the `url` path."""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
return any([sub in path for sub in subs]) | python | def has_in_url_path(url, subs):
"""Test if any of `subs` strings is present in the `url` path."""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
return any([sub in path for sub in subs]) | [
"def",
"has_in_url_path",
"(",
"url",
",",
"subs",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"urlparse",
".",
"urlsplit",
"(",
"url",
")",
"return",
"any",
"(",
"[",
"sub",
"in",
"path",
"for",
"sub",
"in",
... | Test if any of `subs` strings is present in the `url` path. | [
"Test",
"if",
"any",
"of",
"subs",
"strings",
"is",
"present",
"in",
"the",
"url",
"path",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L260-L263 | train | 215,897 |
openstack/horizon | openstack_auth/utils.py | url_path_replace | def url_path_replace(url, old, new, count=None):
"""Return a copy of url with replaced path.
Return a copy of url with all occurrences of old replaced by new in the url
path. If the optional argument count is given, only the first count
occurrences are replaced.
"""
args = []
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if count is not None:
args.append(count)
return urlparse.urlunsplit((
scheme, netloc, path.replace(old, new, *args), query, fragment)) | python | def url_path_replace(url, old, new, count=None):
"""Return a copy of url with replaced path.
Return a copy of url with all occurrences of old replaced by new in the url
path. If the optional argument count is given, only the first count
occurrences are replaced.
"""
args = []
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if count is not None:
args.append(count)
return urlparse.urlunsplit((
scheme, netloc, path.replace(old, new, *args), query, fragment)) | [
"def",
"url_path_replace",
"(",
"url",
",",
"old",
",",
"new",
",",
"count",
"=",
"None",
")",
":",
"args",
"=",
"[",
"]",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"urlparse",
".",
"urlsplit",
"(",
"url",
")",
"if",... | Return a copy of url with replaced path.
Return a copy of url with all occurrences of old replaced by new in the url
path. If the optional argument count is given, only the first count
occurrences are replaced. | [
"Return",
"a",
"copy",
"of",
"url",
"with",
"replaced",
"path",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L266-L278 | train | 215,898 |
openstack/horizon | openstack_auth/utils.py | _augment_url_with_version | def _augment_url_with_version(auth_url):
"""Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user.
"""
if has_in_url_path(auth_url, ["/v2.0", "/v3"]):
return auth_url
if get_keystone_version() >= 3:
return url_path_append(auth_url, "/v3")
else:
return url_path_append(auth_url, "/v2.0") | python | def _augment_url_with_version(auth_url):
"""Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user.
"""
if has_in_url_path(auth_url, ["/v2.0", "/v3"]):
return auth_url
if get_keystone_version() >= 3:
return url_path_append(auth_url, "/v3")
else:
return url_path_append(auth_url, "/v2.0") | [
"def",
"_augment_url_with_version",
"(",
"auth_url",
")",
":",
"if",
"has_in_url_path",
"(",
"auth_url",
",",
"[",
"\"/v2.0\"",
",",
"\"/v3\"",
"]",
")",
":",
"return",
"auth_url",
"if",
"get_keystone_version",
"(",
")",
">=",
"3",
":",
"return",
"url_path_app... | Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user. | [
"Optionally",
"augment",
"auth_url",
"path",
"with",
"version",
"suffix",
"."
] | 5601ea9477323e599d9b766fcac1f8be742935b2 | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L287-L303 | train | 215,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.