repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/datasets
|
tensorflow_datasets/image/sun.py
|
_decode_image
|
def _decode_image(fobj, session, filename):
"""Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
"""
buf = fobj.read()
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB.
if image is None:
logging.warning(
"Image %s could not be decoded by OpenCV, falling back to TF", filename)
try:
image = tf.image.decode_image(buf, channels=3)
image = session.run(image)
except tf.errors.InvalidArgumentError:
logging.fatal("Image %s could not be decoded by Tensorflow", filename)
# The GIF images contain a single frame.
if len(image.shape) == 4: # rank=4 -> rank=3
image = image.reshape(image.shape[1:])
return image
|
python
|
def _decode_image(fobj, session, filename):
"""Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
"""
buf = fobj.read()
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB.
if image is None:
logging.warning(
"Image %s could not be decoded by OpenCV, falling back to TF", filename)
try:
image = tf.image.decode_image(buf, channels=3)
image = session.run(image)
except tf.errors.InvalidArgumentError:
logging.fatal("Image %s could not be decoded by Tensorflow", filename)
# The GIF images contain a single frame.
if len(image.shape) == 4: # rank=4 -> rank=3
image = image.reshape(image.shape[1:])
return image
|
[
"def",
"_decode_image",
"(",
"fobj",
",",
"session",
",",
"filename",
")",
":",
"buf",
"=",
"fobj",
".",
"read",
"(",
")",
"image",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"imdecode",
"(",
"np",
".",
"fromstring",
"(",
"buf",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
",",
"flags",
"=",
"3",
")",
"# Note: Converts to RGB.",
"if",
"image",
"is",
"None",
":",
"logging",
".",
"warning",
"(",
"\"Image %s could not be decoded by OpenCV, falling back to TF\"",
",",
"filename",
")",
"try",
":",
"image",
"=",
"tf",
".",
"image",
".",
"decode_image",
"(",
"buf",
",",
"channels",
"=",
"3",
")",
"image",
"=",
"session",
".",
"run",
"(",
"image",
")",
"except",
"tf",
".",
"errors",
".",
"InvalidArgumentError",
":",
"logging",
".",
"fatal",
"(",
"\"Image %s could not be decoded by Tensorflow\"",
",",
"filename",
")",
"# The GIF images contain a single frame.",
"if",
"len",
"(",
"image",
".",
"shape",
")",
"==",
"4",
":",
"# rank=4 -> rank=3",
"image",
"=",
"image",
".",
"reshape",
"(",
"image",
".",
"shape",
"[",
"1",
":",
"]",
")",
"return",
"image"
] |
Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
|
[
"Reads",
"and",
"decodes",
"an",
"image",
"from",
"a",
"file",
"object",
"as",
"a",
"Numpy",
"array",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/sun.py#L65-L102
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/sun.py
|
_process_image_file
|
def _process_image_file(fobj, session, filename):
"""Process image files from the dataset."""
# We need to read the image files and convert them to JPEG, since some files
# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and
# some encoding options that will make TF crash in general.
image = _decode_image(fobj, session, filename=filename)
return _encode_jpeg(image)
|
python
|
def _process_image_file(fobj, session, filename):
"""Process image files from the dataset."""
# We need to read the image files and convert them to JPEG, since some files
# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and
# some encoding options that will make TF crash in general.
image = _decode_image(fobj, session, filename=filename)
return _encode_jpeg(image)
|
[
"def",
"_process_image_file",
"(",
"fobj",
",",
"session",
",",
"filename",
")",
":",
"# We need to read the image files and convert them to JPEG, since some files",
"# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and",
"# some encoding options that will make TF crash in general.",
"image",
"=",
"_decode_image",
"(",
"fobj",
",",
"session",
",",
"filename",
"=",
"filename",
")",
"return",
"_encode_jpeg",
"(",
"image",
")"
] |
Process image files from the dataset.
|
[
"Process",
"image",
"files",
"from",
"the",
"dataset",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/sun.py#L113-L119
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/sun.py
|
Sun397._generate_examples
|
def _generate_examples(self, archive):
"""Yields examples."""
prefix_len = len("SUN397")
with tf.Graph().as_default():
with utils.nogpu_session() as sess:
for filepath, fobj in archive:
if (filepath.endswith(".jpg") and
filepath not in _SUN397_IGNORE_IMAGES):
# Note: all files in the tar.gz are in SUN397/...
filename = filepath[prefix_len:]
# Example:
# From filename: /c/car_interior/backseat/sun_aenygxwhhmjtisnf.jpg
# To class: /c/car_interior/backseat
label = "/".join(filename.split("/")[:-1])
image = _process_image_file(fobj, sess, filepath)
yield {
"file_name": filename,
"image": image,
"label": label,
}
|
python
|
def _generate_examples(self, archive):
"""Yields examples."""
prefix_len = len("SUN397")
with tf.Graph().as_default():
with utils.nogpu_session() as sess:
for filepath, fobj in archive:
if (filepath.endswith(".jpg") and
filepath not in _SUN397_IGNORE_IMAGES):
# Note: all files in the tar.gz are in SUN397/...
filename = filepath[prefix_len:]
# Example:
# From filename: /c/car_interior/backseat/sun_aenygxwhhmjtisnf.jpg
# To class: /c/car_interior/backseat
label = "/".join(filename.split("/")[:-1])
image = _process_image_file(fobj, sess, filepath)
yield {
"file_name": filename,
"image": image,
"label": label,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
")",
":",
"prefix_len",
"=",
"len",
"(",
"\"SUN397\"",
")",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"with",
"utils",
".",
"nogpu_session",
"(",
")",
"as",
"sess",
":",
"for",
"filepath",
",",
"fobj",
"in",
"archive",
":",
"if",
"(",
"filepath",
".",
"endswith",
"(",
"\".jpg\"",
")",
"and",
"filepath",
"not",
"in",
"_SUN397_IGNORE_IMAGES",
")",
":",
"# Note: all files in the tar.gz are in SUN397/...",
"filename",
"=",
"filepath",
"[",
"prefix_len",
":",
"]",
"# Example:",
"# From filename: /c/car_interior/backseat/sun_aenygxwhhmjtisnf.jpg",
"# To class: /c/car_interior/backseat",
"label",
"=",
"\"/\"",
".",
"join",
"(",
"filename",
".",
"split",
"(",
"\"/\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"image",
"=",
"_process_image_file",
"(",
"fobj",
",",
"sess",
",",
"filepath",
")",
"yield",
"{",
"\"file_name\"",
":",
"filename",
",",
"\"image\"",
":",
"image",
",",
"\"label\"",
":",
"label",
",",
"}"
] |
Yields examples.
|
[
"Yields",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/sun.py#L157-L176
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/wmt.py
|
_parse_parallel_sentences
|
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with tf.io.gfile.GFile(path) as f, gzip.GzipFile(fileobj=f) as g:
return g.read().split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with tf.io.gfile.GFile(path) as f:
return f.read().split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with tf.io.gfile.GFile(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = tf.io.gfile.glob(f1)
f2_files = tf.io.gfile.glob(f2)
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), (
"Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files), len(f2_files), f1, f2))
for f1_i, f2_i in zip(sorted(f1_files), sorted(f2_files)):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), (
"Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences), len(l2_sentences), f1_i, f2_i))
for s1, s2 in zip(l1_sentences, l2_sentences):
yield {
l1: s1,
l2: s2
}
|
python
|
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with tf.io.gfile.GFile(path) as f, gzip.GzipFile(fileobj=f) as g:
return g.read().split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with tf.io.gfile.GFile(path) as f:
return f.read().split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with tf.io.gfile.GFile(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = tf.io.gfile.glob(f1)
f2_files = tf.io.gfile.glob(f2)
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), (
"Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files), len(f2_files), f1, f2))
for f1_i, f2_i in zip(sorted(f1_files), sorted(f2_files)):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), (
"Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences), len(l2_sentences), f1_i, f2_i))
for s1, s2 in zip(l1_sentences, l2_sentences):
yield {
l1: s1,
l2: s2
}
|
[
"def",
"_parse_parallel_sentences",
"(",
"f1",
",",
"f2",
")",
":",
"def",
"_parse_text",
"(",
"path",
")",
":",
"\"\"\"Returns the sentences from a single text file, which may be gzipped.\"\"\"",
"split_path",
"=",
"path",
".",
"split",
"(",
"\".\"",
")",
"if",
"split_path",
"[",
"-",
"1",
"]",
"==",
"\"gz\"",
":",
"lang",
"=",
"split_path",
"[",
"-",
"2",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
",",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"f",
")",
"as",
"g",
":",
"return",
"g",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
",",
"lang",
"if",
"split_path",
"[",
"-",
"1",
"]",
"==",
"\"txt\"",
":",
"# CWMT",
"lang",
"=",
"split_path",
"[",
"-",
"2",
"]",
".",
"split",
"(",
"\"_\"",
")",
"[",
"-",
"1",
"]",
"lang",
"=",
"\"zh\"",
"if",
"lang",
"in",
"(",
"\"ch\"",
",",
"\"cn\"",
")",
"else",
"lang",
"else",
":",
"lang",
"=",
"split_path",
"[",
"-",
"1",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
",",
"lang",
"def",
"_parse_sgm",
"(",
"path",
")",
":",
"\"\"\"Returns sentences from a single SGML file.\"\"\"",
"lang",
"=",
"path",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"2",
"]",
"sentences",
"=",
"[",
"]",
"# Note: We can't use the XML parser since some of the files are badly",
"# formatted.",
"seg_re",
"=",
"re",
".",
"compile",
"(",
"r\"<seg id=\\\"\\d+\\\">(.*)</seg>\"",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"seg_match",
"=",
"re",
".",
"match",
"(",
"seg_re",
",",
"line",
")",
"if",
"seg_match",
":",
"assert",
"len",
"(",
"seg_match",
".",
"groups",
"(",
")",
")",
"==",
"1",
"sentences",
".",
"append",
"(",
"seg_match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"return",
"sentences",
",",
"lang",
"parse_file",
"=",
"_parse_sgm",
"if",
"f1",
".",
"endswith",
"(",
"\".sgm\"",
")",
"else",
"_parse_text",
"# Some datasets (e.g., CWMT) contain multiple parallel files specified with",
"# a wildcard. We sort both sets to align them and parse them one by one.",
"f1_files",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"glob",
"(",
"f1",
")",
"f2_files",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"glob",
"(",
"f2",
")",
"assert",
"f1_files",
"and",
"f2_files",
",",
"\"No matching files found: %s, %s.\"",
"%",
"(",
"f1",
",",
"f2",
")",
"assert",
"len",
"(",
"f1_files",
")",
"==",
"len",
"(",
"f2_files",
")",
",",
"(",
"\"Number of files do not match: %d vs %d for %s vs %s.\"",
"%",
"(",
"len",
"(",
"f1_files",
")",
",",
"len",
"(",
"f2_files",
")",
",",
"f1",
",",
"f2",
")",
")",
"for",
"f1_i",
",",
"f2_i",
"in",
"zip",
"(",
"sorted",
"(",
"f1_files",
")",
",",
"sorted",
"(",
"f2_files",
")",
")",
":",
"l1_sentences",
",",
"l1",
"=",
"parse_file",
"(",
"f1_i",
")",
"l2_sentences",
",",
"l2",
"=",
"parse_file",
"(",
"f2_i",
")",
"assert",
"len",
"(",
"l1_sentences",
")",
"==",
"len",
"(",
"l2_sentences",
")",
",",
"(",
"\"Sizes do not match: %d vs %d for %s vs %s.\"",
"%",
"(",
"len",
"(",
"l1_sentences",
")",
",",
"len",
"(",
"l2_sentences",
")",
",",
"f1_i",
",",
"f2_i",
")",
")",
"for",
"s1",
",",
"s2",
"in",
"zip",
"(",
"l1_sentences",
",",
"l2_sentences",
")",
":",
"yield",
"{",
"l1",
":",
"s1",
",",
"l2",
":",
"s2",
"}"
] |
Returns examples from parallel SGML or text files, which may be gzipped.
|
[
"Returns",
"examples",
"from",
"parallel",
"SGML",
"or",
"text",
"files",
"which",
"may",
"be",
"gzipped",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L761-L820
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/wmt.py
|
_parse_tmx
|
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with tf.io.gfile.GFile(path) as f:
for _, elem in ElementTree.iterparse(f):
if elem.tag == "tu":
yield {
_get_tuv_lang(tuv):
_get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")
}
elem.clear()
|
python
|
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with tf.io.gfile.GFile(path) as f:
for _, elem in ElementTree.iterparse(f):
if elem.tag == "tu":
yield {
_get_tuv_lang(tuv):
_get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")
}
elem.clear()
|
[
"def",
"_parse_tmx",
"(",
"path",
")",
":",
"def",
"_get_tuv_lang",
"(",
"tuv",
")",
":",
"for",
"k",
",",
"v",
"in",
"tuv",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"endswith",
"(",
"\"}lang\"",
")",
":",
"return",
"v",
"raise",
"AssertionError",
"(",
"\"Language not found in `tuv` attributes.\"",
")",
"def",
"_get_tuv_seg",
"(",
"tuv",
")",
":",
"segs",
"=",
"tuv",
".",
"findall",
"(",
"\"seg\"",
")",
"assert",
"len",
"(",
"segs",
")",
"==",
"1",
",",
"\"Invalid number of segments: %d\"",
"%",
"len",
"(",
"segs",
")",
"return",
"segs",
"[",
"0",
"]",
".",
"text",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"_",
",",
"elem",
"in",
"ElementTree",
".",
"iterparse",
"(",
"f",
")",
":",
"if",
"elem",
".",
"tag",
"==",
"\"tu\"",
":",
"yield",
"{",
"_get_tuv_lang",
"(",
"tuv",
")",
":",
"_get_tuv_seg",
"(",
"tuv",
")",
"for",
"tuv",
"in",
"elem",
".",
"iterfind",
"(",
"\"tuv\"",
")",
"}",
"elem",
".",
"clear",
"(",
")"
] |
Generates examples from TMX file.
|
[
"Generates",
"examples",
"from",
"TMX",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L838-L858
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/wmt.py
|
_parse_tsv
|
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with tf.io.gfile.GFile(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning(
"Skipping line %d in TSV (%s) with %d != 2 columns.",
j, path, len(cols))
continue
s1, s2 = cols
yield {
l1: s1.strip(),
l2: s2.strip()
}
|
python
|
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with tf.io.gfile.GFile(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning(
"Skipping line %d in TSV (%s) with %d != 2 columns.",
j, path, len(cols))
continue
s1, s2 = cols
yield {
l1: s1.strip(),
l2: s2.strip()
}
|
[
"def",
"_parse_tsv",
"(",
"path",
",",
"language_pair",
"=",
"None",
")",
":",
"if",
"language_pair",
"is",
"None",
":",
"lang_match",
"=",
"re",
".",
"match",
"(",
"r\".*\\.([a-z][a-z])-([a-z][a-z])\\.tsv\"",
",",
"path",
")",
"assert",
"lang_match",
"is",
"not",
"None",
",",
"\"Invalid TSV filename: %s\"",
"%",
"path",
"l1",
",",
"l2",
"=",
"lang_match",
".",
"groups",
"(",
")",
"else",
":",
"l1",
",",
"l2",
"=",
"language_pair",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"j",
",",
"line",
"in",
"enumerate",
"(",
"f",
")",
":",
"cols",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"if",
"len",
"(",
"cols",
")",
"!=",
"2",
":",
"logging",
".",
"warning",
"(",
"\"Skipping line %d in TSV (%s) with %d != 2 columns.\"",
",",
"j",
",",
"path",
",",
"len",
"(",
"cols",
")",
")",
"continue",
"s1",
",",
"s2",
"=",
"cols",
"yield",
"{",
"l1",
":",
"s1",
".",
"strip",
"(",
")",
",",
"l2",
":",
"s2",
".",
"strip",
"(",
")",
"}"
] |
Generates examples from TSV file.
|
[
"Generates",
"examples",
"from",
"TSV",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L861-L881
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/wmt.py
|
_parse_wikiheadlines
|
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with tf.io.gfile.GFile(path) as f:
for line in f:
s1, s2 = line.split("|||")
yield {
l1: s1.strip(),
l2: s2.strip()
}
|
python
|
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with tf.io.gfile.GFile(path) as f:
for line in f:
s1, s2 = line.split("|||")
yield {
l1: s1.strip(),
l2: s2.strip()
}
|
[
"def",
"_parse_wikiheadlines",
"(",
"path",
")",
":",
"lang_match",
"=",
"re",
".",
"match",
"(",
"r\".*\\.([a-z][a-z])-([a-z][a-z])$\"",
",",
"path",
")",
"assert",
"lang_match",
"is",
"not",
"None",
",",
"\"Invalid Wikiheadlines filename: %s\"",
"%",
"path",
"l1",
",",
"l2",
"=",
"lang_match",
".",
"groups",
"(",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"s1",
",",
"s2",
"=",
"line",
".",
"split",
"(",
"\"|||\"",
")",
"yield",
"{",
"l1",
":",
"s1",
".",
"strip",
"(",
")",
",",
"l2",
":",
"s2",
".",
"strip",
"(",
")",
"}"
] |
Generates examples from Wikiheadlines dataset file.
|
[
"Generates",
"examples",
"from",
"Wikiheadlines",
"dataset",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L884-L895
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/wmt.py
|
_parse_czeng
|
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with tf.io.gfile.GFile(filter_path) as f:
bad_blocks = {
blk for blk in re.search(
r"qw{([\s\d]*)}", f.read()).groups()[0].split()
}
logging.info(
"Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.",
len(bad_blocks))
for path in paths:
for gz_path in tf.io.gfile.glob(path):
with tf.io.gfile.GFile(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
for line in f:
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
yield {
"cs": cs.strip(),
"en": en.strip(),
}
|
python
|
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with tf.io.gfile.GFile(filter_path) as f:
bad_blocks = {
blk for blk in re.search(
r"qw{([\s\d]*)}", f.read()).groups()[0].split()
}
logging.info(
"Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.",
len(bad_blocks))
for path in paths:
for gz_path in tf.io.gfile.glob(path):
with tf.io.gfile.GFile(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
for line in f:
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
yield {
"cs": cs.strip(),
"en": en.strip(),
}
|
[
"def",
"_parse_czeng",
"(",
"*",
"paths",
",",
"*",
"*",
"kwargs",
")",
":",
"filter_path",
"=",
"kwargs",
".",
"get",
"(",
"\"filter_path\"",
",",
"None",
")",
"if",
"filter_path",
":",
"re_block",
"=",
"re",
".",
"compile",
"(",
"r\"^[^-]+-b(\\d+)-\\d\\d[tde]\"",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filter_path",
")",
"as",
"f",
":",
"bad_blocks",
"=",
"{",
"blk",
"for",
"blk",
"in",
"re",
".",
"search",
"(",
"r\"qw{([\\s\\d]*)}\"",
",",
"f",
".",
"read",
"(",
")",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
")",
"}",
"logging",
".",
"info",
"(",
"\"Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.\"",
",",
"len",
"(",
"bad_blocks",
")",
")",
"for",
"path",
"in",
"paths",
":",
"for",
"gz_path",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"glob",
"(",
"path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"gz_path",
",",
"\"rb\"",
")",
"as",
"g",
",",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"g",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# required for py3",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"id_",
",",
"unused_score",
",",
"cs",
",",
"en",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"if",
"filter_path",
":",
"block_match",
"=",
"re",
".",
"match",
"(",
"re_block",
",",
"id_",
")",
"if",
"block_match",
"and",
"block_match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"in",
"bad_blocks",
":",
"continue",
"yield",
"{",
"\"cs\"",
":",
"cs",
".",
"strip",
"(",
")",
",",
"\"en\"",
":",
"en",
".",
"strip",
"(",
")",
",",
"}"
] |
Generates examples from CzEng v1.6, with optional filtering for v1.7.
|
[
"Generates",
"examples",
"from",
"CzEng",
"v1",
".",
"6",
"with",
"optional",
"filtering",
"for",
"v1",
".",
"7",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L898-L927
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/wmt.py
|
SubDataset._inject_language
|
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
|
python
|
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
|
[
"def",
"_inject_language",
"(",
"self",
",",
"src",
",",
"strings",
")",
":",
"if",
"src",
"not",
"in",
"self",
".",
"sources",
":",
"raise",
"ValueError",
"(",
"\"Invalid source for '{0}': {1}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"src",
")",
")",
"def",
"_format_string",
"(",
"s",
")",
":",
"if",
"\"{0}\"",
"in",
"s",
"and",
"\"{1}\"",
"and",
"\"{src}\"",
"in",
"s",
":",
"return",
"s",
".",
"format",
"(",
"*",
"sorted",
"(",
"[",
"src",
",",
"self",
".",
"target",
"]",
")",
",",
"src",
"=",
"src",
")",
"elif",
"\"{0}\"",
"in",
"s",
"and",
"\"{1}\"",
"in",
"s",
":",
"return",
"s",
".",
"format",
"(",
"*",
"sorted",
"(",
"[",
"src",
",",
"self",
".",
"target",
"]",
")",
")",
"elif",
"\"{src}\"",
"in",
"s",
":",
"return",
"s",
".",
"format",
"(",
"src",
"=",
"src",
")",
"else",
":",
"return",
"s",
"return",
"[",
"_format_string",
"(",
"s",
")",
"for",
"s",
"in",
"strings",
"]"
] |
Injects languages into (potentially) template strings.
|
[
"Injects",
"languages",
"into",
"(",
"potentially",
")",
"template",
"strings",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L97-L110
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/wmt.py
|
WmtTranslate.subsets
|
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.builder_config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
ds = DATASET_MAP[ss_name]
if ds.target != target or source not in ds.sources:
logging.info(
"Skipping sub-dataset that does not include language pair: %s",
ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
|
python
|
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.builder_config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
ds = DATASET_MAP[ss_name]
if ds.target != target or source not in ds.sources:
logging.info(
"Skipping sub-dataset that does not include language pair: %s",
ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
|
[
"def",
"subsets",
"(",
"self",
")",
":",
"source",
",",
"target",
"=",
"self",
".",
"builder_config",
".",
"language_pair",
"filtered_subsets",
"=",
"{",
"}",
"for",
"split",
",",
"ss_names",
"in",
"self",
".",
"_subsets",
".",
"items",
"(",
")",
":",
"filtered_subsets",
"[",
"split",
"]",
"=",
"[",
"]",
"for",
"ss_name",
"in",
"ss_names",
":",
"ds",
"=",
"DATASET_MAP",
"[",
"ss_name",
"]",
"if",
"ds",
".",
"target",
"!=",
"target",
"or",
"source",
"not",
"in",
"ds",
".",
"sources",
":",
"logging",
".",
"info",
"(",
"\"Skipping sub-dataset that does not include language pair: %s\"",
",",
"ss_name",
")",
"else",
":",
"filtered_subsets",
"[",
"split",
"]",
".",
"append",
"(",
"ss_name",
")",
"logging",
".",
"info",
"(",
"\"Using sub-datasets: %s\"",
",",
"filtered_subsets",
")",
"return",
"filtered_subsets"
] |
Subsets that make up each split of the dataset for the language pair.
|
[
"Subsets",
"that",
"make",
"up",
"each",
"split",
"of",
"the",
"dataset",
"for",
"the",
"language",
"pair",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L615-L630
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/wmt.py
|
WmtTranslate._generate_examples
|
def _generate_examples(self, split_subsets, extraction_map):
"""Returns the examples in the raw (text) form."""
source, _ = self.builder_config.language_pair
def _get_local_paths(ds, extract_dirs):
rel_paths = ds.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)]
for ss_name in split_subsets:
logging.info("Generating examples from: %s", ss_name)
ds = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(ds, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(
_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(
_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(
_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif ss_name.startswith("newscommentary_v14"):
sub_generator = functools.partial(
_parse_tsv, language_pair=self.builder_config.language_pair)
elif "tmx" in fname:
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
yield ex
|
python
|
def _generate_examples(self, split_subsets, extraction_map):
"""Returns the examples in the raw (text) form."""
source, _ = self.builder_config.language_pair
def _get_local_paths(ds, extract_dirs):
rel_paths = ds.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)]
for ss_name in split_subsets:
logging.info("Generating examples from: %s", ss_name)
ds = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(ds, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(
_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(
_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(
_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif ss_name.startswith("newscommentary_v14"):
sub_generator = functools.partial(
_parse_tsv, language_pair=self.builder_config.language_pair)
elif "tmx" in fname:
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
yield ex
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"split_subsets",
",",
"extraction_map",
")",
":",
"source",
",",
"_",
"=",
"self",
".",
"builder_config",
".",
"language_pair",
"def",
"_get_local_paths",
"(",
"ds",
",",
"extract_dirs",
")",
":",
"rel_paths",
"=",
"ds",
".",
"get_path",
"(",
"source",
")",
"if",
"len",
"(",
"extract_dirs",
")",
"==",
"1",
":",
"extract_dirs",
"=",
"extract_dirs",
"*",
"len",
"(",
"rel_paths",
")",
"return",
"[",
"os",
".",
"path",
".",
"join",
"(",
"ex_dir",
",",
"rel_path",
")",
"if",
"rel_path",
"else",
"ex_dir",
"for",
"ex_dir",
",",
"rel_path",
"in",
"zip",
"(",
"extract_dirs",
",",
"rel_paths",
")",
"]",
"for",
"ss_name",
"in",
"split_subsets",
":",
"logging",
".",
"info",
"(",
"\"Generating examples from: %s\"",
",",
"ss_name",
")",
"ds",
"=",
"DATASET_MAP",
"[",
"ss_name",
"]",
"extract_dirs",
"=",
"extraction_map",
"[",
"ss_name",
"]",
"files",
"=",
"_get_local_paths",
"(",
"ds",
",",
"extract_dirs",
")",
"if",
"ss_name",
".",
"startswith",
"(",
"\"czeng\"",
")",
":",
"if",
"ss_name",
".",
"endswith",
"(",
"\"16pre\"",
")",
":",
"sub_generator",
"=",
"functools",
".",
"partial",
"(",
"_parse_tsv",
",",
"language_pair",
"=",
"(",
"\"en\"",
",",
"\"cs\"",
")",
")",
"elif",
"ss_name",
".",
"endswith",
"(",
"\"17\"",
")",
":",
"filter_path",
"=",
"_get_local_paths",
"(",
"_CZENG17_FILTER",
",",
"extraction_map",
"[",
"_CZENG17_FILTER",
".",
"name",
"]",
")",
"[",
"0",
"]",
"sub_generator",
"=",
"functools",
".",
"partial",
"(",
"_parse_czeng",
",",
"filter_path",
"=",
"filter_path",
")",
"else",
":",
"sub_generator",
"=",
"_parse_czeng",
"elif",
"len",
"(",
"files",
")",
"==",
"2",
":",
"if",
"ss_name",
".",
"endswith",
"(",
"\"_frde\"",
")",
":",
"sub_generator",
"=",
"_parse_frde_bitext",
"else",
":",
"sub_generator",
"=",
"_parse_parallel_sentences",
"elif",
"len",
"(",
"files",
")",
"==",
"1",
":",
"fname",
"=",
"files",
"[",
"0",
"]",
"# Note: Due to formatting used by `download_manager`, the file",
"# extension may not be at the end of the file path.",
"if",
"\".tsv\"",
"in",
"fname",
":",
"sub_generator",
"=",
"_parse_tsv",
"elif",
"ss_name",
".",
"startswith",
"(",
"\"newscommentary_v14\"",
")",
":",
"sub_generator",
"=",
"functools",
".",
"partial",
"(",
"_parse_tsv",
",",
"language_pair",
"=",
"self",
".",
"builder_config",
".",
"language_pair",
")",
"elif",
"\"tmx\"",
"in",
"fname",
":",
"sub_generator",
"=",
"_parse_tmx",
"elif",
"ss_name",
".",
"startswith",
"(",
"\"wikiheadlines\"",
")",
":",
"sub_generator",
"=",
"_parse_wikiheadlines",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported file format: %s\"",
"%",
"fname",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid number of files: %d\"",
"%",
"len",
"(",
"files",
")",
")",
"for",
"ex",
"in",
"sub_generator",
"(",
"*",
"files",
")",
":",
"if",
"not",
"all",
"(",
"ex",
".",
"values",
"(",
")",
")",
":",
"continue",
"# TODO(adarob): Add subset feature.",
"# ex[\"subset\"] = subset",
"yield",
"ex"
] |
Returns the examples in the raw (text) form.
|
[
"Returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L703-L758
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/registered.py
|
builder
|
def builder(name, **builder_init_kwargs):
"""Fetches a `tfds.core.DatasetBuilder` by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
**builder_init_kwargs: `dict` of keyword arguments passed to the
`DatasetBuilder`. These will override keyword arguments passed in `name`,
if any.
Returns:
A `tfds.core.DatasetBuilder`.
Raises:
DatasetNotFoundError: if `name` is unrecognized.
"""
name, builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
builder_kwargs.update(builder_init_kwargs)
if name in _ABSTRACT_DATASET_REGISTRY:
raise DatasetNotFoundError(name, is_abstract=True)
if name in _IN_DEVELOPMENT_REGISTRY:
raise DatasetNotFoundError(name, in_development=True)
if name not in _DATASET_REGISTRY:
raise DatasetNotFoundError(name)
try:
return _DATASET_REGISTRY[name](**builder_kwargs)
except BaseException:
logging.error("Failed to construct dataset %s", name)
raise
|
python
|
def builder(name, **builder_init_kwargs):
"""Fetches a `tfds.core.DatasetBuilder` by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
**builder_init_kwargs: `dict` of keyword arguments passed to the
`DatasetBuilder`. These will override keyword arguments passed in `name`,
if any.
Returns:
A `tfds.core.DatasetBuilder`.
Raises:
DatasetNotFoundError: if `name` is unrecognized.
"""
name, builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
builder_kwargs.update(builder_init_kwargs)
if name in _ABSTRACT_DATASET_REGISTRY:
raise DatasetNotFoundError(name, is_abstract=True)
if name in _IN_DEVELOPMENT_REGISTRY:
raise DatasetNotFoundError(name, in_development=True)
if name not in _DATASET_REGISTRY:
raise DatasetNotFoundError(name)
try:
return _DATASET_REGISTRY[name](**builder_kwargs)
except BaseException:
logging.error("Failed to construct dataset %s", name)
raise
|
[
"def",
"builder",
"(",
"name",
",",
"*",
"*",
"builder_init_kwargs",
")",
":",
"name",
",",
"builder_kwargs",
"=",
"_dataset_name_and_kwargs_from_name_str",
"(",
"name",
")",
"builder_kwargs",
".",
"update",
"(",
"builder_init_kwargs",
")",
"if",
"name",
"in",
"_ABSTRACT_DATASET_REGISTRY",
":",
"raise",
"DatasetNotFoundError",
"(",
"name",
",",
"is_abstract",
"=",
"True",
")",
"if",
"name",
"in",
"_IN_DEVELOPMENT_REGISTRY",
":",
"raise",
"DatasetNotFoundError",
"(",
"name",
",",
"in_development",
"=",
"True",
")",
"if",
"name",
"not",
"in",
"_DATASET_REGISTRY",
":",
"raise",
"DatasetNotFoundError",
"(",
"name",
")",
"try",
":",
"return",
"_DATASET_REGISTRY",
"[",
"name",
"]",
"(",
"*",
"*",
"builder_kwargs",
")",
"except",
"BaseException",
":",
"logging",
".",
"error",
"(",
"\"Failed to construct dataset %s\"",
",",
"name",
")",
"raise"
] |
Fetches a `tfds.core.DatasetBuilder` by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
**builder_init_kwargs: `dict` of keyword arguments passed to the
`DatasetBuilder`. These will override keyword arguments passed in `name`,
if any.
Returns:
A `tfds.core.DatasetBuilder`.
Raises:
DatasetNotFoundError: if `name` is unrecognized.
|
[
"Fetches",
"a",
"tfds",
".",
"core",
".",
"DatasetBuilder",
"by",
"string",
"name",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L137-L172
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/registered.py
|
load
|
def load(name,
split=None,
data_dir=None,
batch_size=1,
download=True,
as_supervised=False,
with_info=False,
builder_kwargs=None,
download_and_prepare_kwargs=None,
as_dataset_kwargs=None,
try_gcs=False):
"""Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
"""
name, name_builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
name_builder_kwargs.update(builder_kwargs or {})
builder_kwargs = name_builder_kwargs
# Set data_dir
if try_gcs and gcs_utils.is_dataset_on_gcs(name):
data_dir = constants.GCS_DATA_DIR
elif data_dir is None:
data_dir = constants.DATA_DIR
dbuilder = builder(name, data_dir=data_dir, **builder_kwargs)
if download:
download_and_prepare_kwargs = download_and_prepare_kwargs or {}
dbuilder.download_and_prepare(**download_and_prepare_kwargs)
if as_dataset_kwargs is None:
as_dataset_kwargs = {}
as_dataset_kwargs = dict(as_dataset_kwargs)
as_dataset_kwargs["split"] = split
as_dataset_kwargs["as_supervised"] = as_supervised
as_dataset_kwargs["batch_size"] = batch_size
ds = dbuilder.as_dataset(**as_dataset_kwargs)
if with_info:
return ds, dbuilder.info
return ds
|
python
|
def load(name,
split=None,
data_dir=None,
batch_size=1,
download=True,
as_supervised=False,
with_info=False,
builder_kwargs=None,
download_and_prepare_kwargs=None,
as_dataset_kwargs=None,
try_gcs=False):
"""Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
"""
name, name_builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
name_builder_kwargs.update(builder_kwargs or {})
builder_kwargs = name_builder_kwargs
# Set data_dir
if try_gcs and gcs_utils.is_dataset_on_gcs(name):
data_dir = constants.GCS_DATA_DIR
elif data_dir is None:
data_dir = constants.DATA_DIR
dbuilder = builder(name, data_dir=data_dir, **builder_kwargs)
if download:
download_and_prepare_kwargs = download_and_prepare_kwargs or {}
dbuilder.download_and_prepare(**download_and_prepare_kwargs)
if as_dataset_kwargs is None:
as_dataset_kwargs = {}
as_dataset_kwargs = dict(as_dataset_kwargs)
as_dataset_kwargs["split"] = split
as_dataset_kwargs["as_supervised"] = as_supervised
as_dataset_kwargs["batch_size"] = batch_size
ds = dbuilder.as_dataset(**as_dataset_kwargs)
if with_info:
return ds, dbuilder.info
return ds
|
[
"def",
"load",
"(",
"name",
",",
"split",
"=",
"None",
",",
"data_dir",
"=",
"None",
",",
"batch_size",
"=",
"1",
",",
"download",
"=",
"True",
",",
"as_supervised",
"=",
"False",
",",
"with_info",
"=",
"False",
",",
"builder_kwargs",
"=",
"None",
",",
"download_and_prepare_kwargs",
"=",
"None",
",",
"as_dataset_kwargs",
"=",
"None",
",",
"try_gcs",
"=",
"False",
")",
":",
"name",
",",
"name_builder_kwargs",
"=",
"_dataset_name_and_kwargs_from_name_str",
"(",
"name",
")",
"name_builder_kwargs",
".",
"update",
"(",
"builder_kwargs",
"or",
"{",
"}",
")",
"builder_kwargs",
"=",
"name_builder_kwargs",
"# Set data_dir",
"if",
"try_gcs",
"and",
"gcs_utils",
".",
"is_dataset_on_gcs",
"(",
"name",
")",
":",
"data_dir",
"=",
"constants",
".",
"GCS_DATA_DIR",
"elif",
"data_dir",
"is",
"None",
":",
"data_dir",
"=",
"constants",
".",
"DATA_DIR",
"dbuilder",
"=",
"builder",
"(",
"name",
",",
"data_dir",
"=",
"data_dir",
",",
"*",
"*",
"builder_kwargs",
")",
"if",
"download",
":",
"download_and_prepare_kwargs",
"=",
"download_and_prepare_kwargs",
"or",
"{",
"}",
"dbuilder",
".",
"download_and_prepare",
"(",
"*",
"*",
"download_and_prepare_kwargs",
")",
"if",
"as_dataset_kwargs",
"is",
"None",
":",
"as_dataset_kwargs",
"=",
"{",
"}",
"as_dataset_kwargs",
"=",
"dict",
"(",
"as_dataset_kwargs",
")",
"as_dataset_kwargs",
"[",
"\"split\"",
"]",
"=",
"split",
"as_dataset_kwargs",
"[",
"\"as_supervised\"",
"]",
"=",
"as_supervised",
"as_dataset_kwargs",
"[",
"\"batch_size\"",
"]",
"=",
"batch_size",
"ds",
"=",
"dbuilder",
".",
"as_dataset",
"(",
"*",
"*",
"as_dataset_kwargs",
")",
"if",
"with_info",
":",
"return",
"ds",
",",
"dbuilder",
".",
"info",
"return",
"ds"
] |
Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
|
[
"Loads",
"the",
"named",
"dataset",
"into",
"a",
"tf",
".",
"data",
".",
"Dataset",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L176-L297
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/registered.py
|
_dataset_name_and_kwargs_from_name_str
|
def _dataset_name_and_kwargs_from_name_str(name_str):
"""Extract kwargs from name str."""
res = _NAME_REG.match(name_str)
if not res:
raise ValueError(_NAME_STR_ERR.format(name_str))
name = res.group("dataset_name")
kwargs = _kwargs_str_to_kwargs(res.group("kwargs"))
try:
for attr in ["config", "version"]:
val = res.group(attr)
if val is None:
continue
if attr in kwargs:
raise ValueError("Dataset %s: cannot pass %s twice." % (name, attr))
kwargs[attr] = val
return name, kwargs
except:
logging.error(_NAME_STR_ERR.format(name_str)) # pylint: disable=logging-format-interpolation
raise
|
python
|
def _dataset_name_and_kwargs_from_name_str(name_str):
"""Extract kwargs from name str."""
res = _NAME_REG.match(name_str)
if not res:
raise ValueError(_NAME_STR_ERR.format(name_str))
name = res.group("dataset_name")
kwargs = _kwargs_str_to_kwargs(res.group("kwargs"))
try:
for attr in ["config", "version"]:
val = res.group(attr)
if val is None:
continue
if attr in kwargs:
raise ValueError("Dataset %s: cannot pass %s twice." % (name, attr))
kwargs[attr] = val
return name, kwargs
except:
logging.error(_NAME_STR_ERR.format(name_str)) # pylint: disable=logging-format-interpolation
raise
|
[
"def",
"_dataset_name_and_kwargs_from_name_str",
"(",
"name_str",
")",
":",
"res",
"=",
"_NAME_REG",
".",
"match",
"(",
"name_str",
")",
"if",
"not",
"res",
":",
"raise",
"ValueError",
"(",
"_NAME_STR_ERR",
".",
"format",
"(",
"name_str",
")",
")",
"name",
"=",
"res",
".",
"group",
"(",
"\"dataset_name\"",
")",
"kwargs",
"=",
"_kwargs_str_to_kwargs",
"(",
"res",
".",
"group",
"(",
"\"kwargs\"",
")",
")",
"try",
":",
"for",
"attr",
"in",
"[",
"\"config\"",
",",
"\"version\"",
"]",
":",
"val",
"=",
"res",
".",
"group",
"(",
"attr",
")",
"if",
"val",
"is",
"None",
":",
"continue",
"if",
"attr",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"\"Dataset %s: cannot pass %s twice.\"",
"%",
"(",
"name",
",",
"attr",
")",
")",
"kwargs",
"[",
"attr",
"]",
"=",
"val",
"return",
"name",
",",
"kwargs",
"except",
":",
"logging",
".",
"error",
"(",
"_NAME_STR_ERR",
".",
"format",
"(",
"name_str",
")",
")",
"# pylint: disable=logging-format-interpolation",
"raise"
] |
Extract kwargs from name str.
|
[
"Extract",
"kwargs",
"from",
"name",
"str",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L311-L329
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/registered.py
|
_cast_to_pod
|
def _cast_to_pod(val):
"""Try cast to int, float, bool, str, in that order."""
bools = {"True": True, "False": False}
if val in bools:
return bools[val]
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return tf.compat.as_text(val)
|
python
|
def _cast_to_pod(val):
"""Try cast to int, float, bool, str, in that order."""
bools = {"True": True, "False": False}
if val in bools:
return bools[val]
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return tf.compat.as_text(val)
|
[
"def",
"_cast_to_pod",
"(",
"val",
")",
":",
"bools",
"=",
"{",
"\"True\"",
":",
"True",
",",
"\"False\"",
":",
"False",
"}",
"if",
"val",
"in",
"bools",
":",
"return",
"bools",
"[",
"val",
"]",
"try",
":",
"return",
"int",
"(",
"val",
")",
"except",
"ValueError",
":",
"try",
":",
"return",
"float",
"(",
"val",
")",
"except",
"ValueError",
":",
"return",
"tf",
".",
"compat",
".",
"as_text",
"(",
"val",
")"
] |
Try cast to int, float, bool, str, in that order.
|
[
"Try",
"cast",
"to",
"int",
"float",
"bool",
"str",
"in",
"that",
"order",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L343-L354
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/lazy_imports.py
|
_try_import
|
def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError:
err_msg = ("Tried importing %s but failed. See setup.py extras_require. "
"The dataset you are trying to use may have additional "
"dependencies.")
utils.reraise(err_msg)
|
python
|
def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError:
err_msg = ("Tried importing %s but failed. See setup.py extras_require. "
"The dataset you are trying to use may have additional "
"dependencies.")
utils.reraise(err_msg)
|
[
"def",
"_try_import",
"(",
"module_name",
")",
":",
"try",
":",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"return",
"mod",
"except",
"ImportError",
":",
"err_msg",
"=",
"(",
"\"Tried importing %s but failed. See setup.py extras_require. \"",
"\"The dataset you are trying to use may have additional \"",
"\"dependencies.\"",
")",
"utils",
".",
"reraise",
"(",
"err_msg",
")"
] |
Try importing a module, with an informative error message on failure.
|
[
"Try",
"importing",
"a",
"module",
"with",
"an",
"informative",
"error",
"message",
"on",
"failure",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/lazy_imports.py#L27-L36
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/sequence_feature.py
|
np_to_list
|
def np_to_list(elem):
"""Returns list from list, tuple or ndarray."""
if isinstance(elem, list):
return elem
elif isinstance(elem, tuple):
return list(elem)
elif isinstance(elem, np.ndarray):
return list(elem)
else:
raise ValueError(
'Input elements of a sequence should be either a numpy array, a '
'python list or tuple. Got {}'.format(type(elem)))
|
python
|
def np_to_list(elem):
"""Returns list from list, tuple or ndarray."""
if isinstance(elem, list):
return elem
elif isinstance(elem, tuple):
return list(elem)
elif isinstance(elem, np.ndarray):
return list(elem)
else:
raise ValueError(
'Input elements of a sequence should be either a numpy array, a '
'python list or tuple. Got {}'.format(type(elem)))
|
[
"def",
"np_to_list",
"(",
"elem",
")",
":",
"if",
"isinstance",
"(",
"elem",
",",
"list",
")",
":",
"return",
"elem",
"elif",
"isinstance",
"(",
"elem",
",",
"tuple",
")",
":",
"return",
"list",
"(",
"elem",
")",
"elif",
"isinstance",
"(",
"elem",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"list",
"(",
"elem",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Input elements of a sequence should be either a numpy array, a '",
"'python list or tuple. Got {}'",
".",
"format",
"(",
"type",
"(",
"elem",
")",
")",
")"
] |
Returns list from list, tuple or ndarray.
|
[
"Returns",
"list",
"from",
"list",
"tuple",
"or",
"ndarray",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/sequence_feature.py#L257-L268
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/sequence_feature.py
|
_transpose_dict_list
|
def _transpose_dict_list(dict_list):
"""Transpose a nested dict[list] into a list[nested dict]."""
# 1. Unstack numpy arrays into list
dict_list = utils.map_nested(np_to_list, dict_list, dict_only=True)
# 2. Extract the sequence length (and ensure the length is constant for all
# elements)
length = {'value': None} # dict because `nonlocal` is Python3 only
def update_length(elem):
if length['value'] is None:
length['value'] = len(elem)
elif length['value'] != len(elem):
raise ValueError(
'The length of all elements of one sequence should be the same. '
'Got {} != {}'.format(length['value'], len(elem)))
return elem
utils.map_nested(update_length, dict_list, dict_only=True)
# 3. Extract each individual elements
return [
utils.map_nested(lambda elem: elem[i], dict_list, dict_only=True) # pylint: disable=cell-var-from-loop
for i in range(length['value'])
]
|
python
|
def _transpose_dict_list(dict_list):
"""Transpose a nested dict[list] into a list[nested dict]."""
# 1. Unstack numpy arrays into list
dict_list = utils.map_nested(np_to_list, dict_list, dict_only=True)
# 2. Extract the sequence length (and ensure the length is constant for all
# elements)
length = {'value': None} # dict because `nonlocal` is Python3 only
def update_length(elem):
if length['value'] is None:
length['value'] = len(elem)
elif length['value'] != len(elem):
raise ValueError(
'The length of all elements of one sequence should be the same. '
'Got {} != {}'.format(length['value'], len(elem)))
return elem
utils.map_nested(update_length, dict_list, dict_only=True)
# 3. Extract each individual elements
return [
utils.map_nested(lambda elem: elem[i], dict_list, dict_only=True) # pylint: disable=cell-var-from-loop
for i in range(length['value'])
]
|
[
"def",
"_transpose_dict_list",
"(",
"dict_list",
")",
":",
"# 1. Unstack numpy arrays into list",
"dict_list",
"=",
"utils",
".",
"map_nested",
"(",
"np_to_list",
",",
"dict_list",
",",
"dict_only",
"=",
"True",
")",
"# 2. Extract the sequence length (and ensure the length is constant for all",
"# elements)",
"length",
"=",
"{",
"'value'",
":",
"None",
"}",
"# dict because `nonlocal` is Python3 only",
"def",
"update_length",
"(",
"elem",
")",
":",
"if",
"length",
"[",
"'value'",
"]",
"is",
"None",
":",
"length",
"[",
"'value'",
"]",
"=",
"len",
"(",
"elem",
")",
"elif",
"length",
"[",
"'value'",
"]",
"!=",
"len",
"(",
"elem",
")",
":",
"raise",
"ValueError",
"(",
"'The length of all elements of one sequence should be the same. '",
"'Got {} != {}'",
".",
"format",
"(",
"length",
"[",
"'value'",
"]",
",",
"len",
"(",
"elem",
")",
")",
")",
"return",
"elem",
"utils",
".",
"map_nested",
"(",
"update_length",
",",
"dict_list",
",",
"dict_only",
"=",
"True",
")",
"# 3. Extract each individual elements",
"return",
"[",
"utils",
".",
"map_nested",
"(",
"lambda",
"elem",
":",
"elem",
"[",
"i",
"]",
",",
"dict_list",
",",
"dict_only",
"=",
"True",
")",
"# pylint: disable=cell-var-from-loop",
"for",
"i",
"in",
"range",
"(",
"length",
"[",
"'value'",
"]",
")",
"]"
] |
Transpose a nested dict[list] into a list[nested dict].
|
[
"Transpose",
"a",
"nested",
"dict",
"[",
"list",
"]",
"into",
"a",
"list",
"[",
"nested",
"dict",
"]",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/sequence_feature.py#L271-L293
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/sequence_feature.py
|
SequenceDict.get_tensor_info
|
def get_tensor_info(self):
"""See base class for details."""
# Add the additional length dimension to every shape
def add_length_dim(tensor_info):
return feature_lib.TensorInfo(
shape=(self._length,) + tensor_info.shape,
dtype=tensor_info.dtype,
)
tensor_info = super(SequenceDict, self).get_tensor_info()
return utils.map_nested(add_length_dim, tensor_info)
|
python
|
def get_tensor_info(self):
"""See base class for details."""
# Add the additional length dimension to every shape
def add_length_dim(tensor_info):
return feature_lib.TensorInfo(
shape=(self._length,) + tensor_info.shape,
dtype=tensor_info.dtype,
)
tensor_info = super(SequenceDict, self).get_tensor_info()
return utils.map_nested(add_length_dim, tensor_info)
|
[
"def",
"get_tensor_info",
"(",
"self",
")",
":",
"# Add the additional length dimension to every shape",
"def",
"add_length_dim",
"(",
"tensor_info",
")",
":",
"return",
"feature_lib",
".",
"TensorInfo",
"(",
"shape",
"=",
"(",
"self",
".",
"_length",
",",
")",
"+",
"tensor_info",
".",
"shape",
",",
"dtype",
"=",
"tensor_info",
".",
"dtype",
",",
")",
"tensor_info",
"=",
"super",
"(",
"SequenceDict",
",",
"self",
")",
".",
"get_tensor_info",
"(",
")",
"return",
"utils",
".",
"map_nested",
"(",
"add_length_dim",
",",
"tensor_info",
")"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/sequence_feature.py#L90-L101
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/sequence_feature.py
|
SequenceDict.get_serialized_info
|
def get_serialized_info(self):
"""See base class for details."""
# Add the additional length dimension to every serialized features
def add_length_dim(serialized_info):
"""Add the length dimension to the serialized_info.
Args:
serialized_info: One of tf.io.FixedLenFeature, tf.io.VarLenFeature,...
Returns:
new_serialized_info: serialized_info with extended first dimension
"""
if isinstance(serialized_info, tf.io.FixedLenFeature):
if self._length is not None:
return tf.io.FixedLenFeature(
shape=(self._length,) + serialized_info.shape,
dtype=serialized_info.dtype,
)
else:
return tf.io.FixedLenSequenceFeature(
shape=serialized_info.shape,
dtype=serialized_info.dtype,
allow_missing=True,
)
elif isinstance(serialized_info, tf.io.VarLenFeature):
return serialized_info
else:
raise ValueError(
'FixedLenSequenceFeature not supported inside SequenceDict'
)
return serialized_info
tensor_info = super(SequenceDict, self).get_serialized_info()
return utils.map_nested(add_length_dim, tensor_info)
|
python
|
def get_serialized_info(self):
"""See base class for details."""
# Add the additional length dimension to every serialized features
def add_length_dim(serialized_info):
"""Add the length dimension to the serialized_info.
Args:
serialized_info: One of tf.io.FixedLenFeature, tf.io.VarLenFeature,...
Returns:
new_serialized_info: serialized_info with extended first dimension
"""
if isinstance(serialized_info, tf.io.FixedLenFeature):
if self._length is not None:
return tf.io.FixedLenFeature(
shape=(self._length,) + serialized_info.shape,
dtype=serialized_info.dtype,
)
else:
return tf.io.FixedLenSequenceFeature(
shape=serialized_info.shape,
dtype=serialized_info.dtype,
allow_missing=True,
)
elif isinstance(serialized_info, tf.io.VarLenFeature):
return serialized_info
else:
raise ValueError(
'FixedLenSequenceFeature not supported inside SequenceDict'
)
return serialized_info
tensor_info = super(SequenceDict, self).get_serialized_info()
return utils.map_nested(add_length_dim, tensor_info)
|
[
"def",
"get_serialized_info",
"(",
"self",
")",
":",
"# Add the additional length dimension to every serialized features",
"def",
"add_length_dim",
"(",
"serialized_info",
")",
":",
"\"\"\"Add the length dimension to the serialized_info.\n\n Args:\n serialized_info: One of tf.io.FixedLenFeature, tf.io.VarLenFeature,...\n\n Returns:\n new_serialized_info: serialized_info with extended first dimension\n \"\"\"",
"if",
"isinstance",
"(",
"serialized_info",
",",
"tf",
".",
"io",
".",
"FixedLenFeature",
")",
":",
"if",
"self",
".",
"_length",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"shape",
"=",
"(",
"self",
".",
"_length",
",",
")",
"+",
"serialized_info",
".",
"shape",
",",
"dtype",
"=",
"serialized_info",
".",
"dtype",
",",
")",
"else",
":",
"return",
"tf",
".",
"io",
".",
"FixedLenSequenceFeature",
"(",
"shape",
"=",
"serialized_info",
".",
"shape",
",",
"dtype",
"=",
"serialized_info",
".",
"dtype",
",",
"allow_missing",
"=",
"True",
",",
")",
"elif",
"isinstance",
"(",
"serialized_info",
",",
"tf",
".",
"io",
".",
"VarLenFeature",
")",
":",
"return",
"serialized_info",
"else",
":",
"raise",
"ValueError",
"(",
"'FixedLenSequenceFeature not supported inside SequenceDict'",
")",
"return",
"serialized_info",
"tensor_info",
"=",
"super",
"(",
"SequenceDict",
",",
"self",
")",
".",
"get_serialized_info",
"(",
")",
"return",
"utils",
".",
"map_nested",
"(",
"add_length_dim",
",",
"tensor_info",
")"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/sequence_feature.py#L103-L137
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/mnist.py
|
MNIST._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download the full MNIST Database
filenames = {
"train_data": _MNIST_TRAIN_DATA_FILENAME,
"train_labels": _MNIST_TRAIN_LABELS_FILENAME,
"test_data": _MNIST_TEST_DATA_FILENAME,
"test_labels": _MNIST_TEST_LABELS_FILENAME,
}
mnist_files = dl_manager.download_and_extract(
{k: urllib.parse.urljoin(self.URL, v) for k, v in filenames.items()})
# MNIST provides TRAIN and TEST splits, not a VALIDATION split, so we only
# write the TRAIN and TEST splits to disk.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs=dict(
num_examples=_TRAIN_EXAMPLES,
data_path=mnist_files["train_data"],
label_path=mnist_files["train_labels"],
)),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs=dict(
num_examples=_TEST_EXAMPLES,
data_path=mnist_files["test_data"],
label_path=mnist_files["test_labels"],
)),
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download the full MNIST Database
filenames = {
"train_data": _MNIST_TRAIN_DATA_FILENAME,
"train_labels": _MNIST_TRAIN_LABELS_FILENAME,
"test_data": _MNIST_TEST_DATA_FILENAME,
"test_labels": _MNIST_TEST_LABELS_FILENAME,
}
mnist_files = dl_manager.download_and_extract(
{k: urllib.parse.urljoin(self.URL, v) for k, v in filenames.items()})
# MNIST provides TRAIN and TEST splits, not a VALIDATION split, so we only
# write the TRAIN and TEST splits to disk.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs=dict(
num_examples=_TRAIN_EXAMPLES,
data_path=mnist_files["train_data"],
label_path=mnist_files["train_labels"],
)),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs=dict(
num_examples=_TEST_EXAMPLES,
data_path=mnist_files["test_data"],
label_path=mnist_files["test_labels"],
)),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"# Download the full MNIST Database",
"filenames",
"=",
"{",
"\"train_data\"",
":",
"_MNIST_TRAIN_DATA_FILENAME",
",",
"\"train_labels\"",
":",
"_MNIST_TRAIN_LABELS_FILENAME",
",",
"\"test_data\"",
":",
"_MNIST_TEST_DATA_FILENAME",
",",
"\"test_labels\"",
":",
"_MNIST_TEST_LABELS_FILENAME",
",",
"}",
"mnist_files",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"{",
"k",
":",
"urllib",
".",
"parse",
".",
"urljoin",
"(",
"self",
".",
"URL",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"filenames",
".",
"items",
"(",
")",
"}",
")",
"# MNIST provides TRAIN and TEST splits, not a VALIDATION split, so we only",
"# write the TRAIN and TEST splits to disk.",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"dict",
"(",
"num_examples",
"=",
"_TRAIN_EXAMPLES",
",",
"data_path",
"=",
"mnist_files",
"[",
"\"train_data\"",
"]",
",",
"label_path",
"=",
"mnist_files",
"[",
"\"train_labels\"",
"]",
",",
")",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TEST",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"dict",
"(",
"num_examples",
"=",
"_TEST_EXAMPLES",
",",
"data_path",
"=",
"mnist_files",
"[",
"\"test_data\"",
"]",
",",
"label_path",
"=",
"mnist_files",
"[",
"\"test_labels\"",
"]",
",",
")",
")",
",",
"]"
] |
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/mnist.py#L113-L144
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/mnist.py
|
MNIST._generate_examples
|
def _generate_examples(self, num_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
"""
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
# Data is shuffled automatically to distribute classes uniformly.
for image, label in data:
yield {
"image": image,
"label": label,
}
|
python
|
def _generate_examples(self, num_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
"""
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
# Data is shuffled automatically to distribute classes uniformly.
for image, label in data:
yield {
"image": image,
"label": label,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"num_examples",
",",
"data_path",
",",
"label_path",
")",
":",
"images",
"=",
"_extract_mnist_images",
"(",
"data_path",
",",
"num_examples",
")",
"labels",
"=",
"_extract_mnist_labels",
"(",
"label_path",
",",
"num_examples",
")",
"data",
"=",
"list",
"(",
"zip",
"(",
"images",
",",
"labels",
")",
")",
"# Data is shuffled automatically to distribute classes uniformly.",
"for",
"image",
",",
"label",
"in",
"data",
":",
"yield",
"{",
"\"image\"",
":",
"image",
",",
"\"label\"",
":",
"label",
",",
"}"
] |
Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
|
[
"Generate",
"MNIST",
"examples",
"as",
"dicts",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/mnist.py#L146-L166
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/oxford_flowers102.py
|
OxfordFlowers102._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=os.path.join(_BASE_URL, "102flowers.tgz"),
extract_method=tfds.download.ExtractMethod.TAR),
"labels": os.path.join(_BASE_URL, "imagelabels.mat"),
"setid": os.path.join(_BASE_URL, "setid.mat"),
})
gen_kwargs = dict(
images_dir_path=os.path.join(dl_paths["images"], "jpg"),
labels_path=dl_paths["labels"],
setid_path=dl_paths["setid"],
)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=1,
gen_kwargs=dict(split_name="trnid", **gen_kwargs)),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs=dict(split_name="tstid", **gen_kwargs)),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=1,
gen_kwargs=dict(split_name="valid", **gen_kwargs)),
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=os.path.join(_BASE_URL, "102flowers.tgz"),
extract_method=tfds.download.ExtractMethod.TAR),
"labels": os.path.join(_BASE_URL, "imagelabels.mat"),
"setid": os.path.join(_BASE_URL, "setid.mat"),
})
gen_kwargs = dict(
images_dir_path=os.path.join(dl_paths["images"], "jpg"),
labels_path=dl_paths["labels"],
setid_path=dl_paths["setid"],
)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=1,
gen_kwargs=dict(split_name="trnid", **gen_kwargs)),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs=dict(split_name="tstid", **gen_kwargs)),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=1,
gen_kwargs=dict(split_name="valid", **gen_kwargs)),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"# Download images and annotations that come in separate archives.",
"# Note, that the extension of archives is .tar.gz even though the actual",
"# archives format is uncompressed tar.",
"dl_paths",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"{",
"\"images\"",
":",
"tfds",
".",
"download",
".",
"Resource",
"(",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_BASE_URL",
",",
"\"102flowers.tgz\"",
")",
",",
"extract_method",
"=",
"tfds",
".",
"download",
".",
"ExtractMethod",
".",
"TAR",
")",
",",
"\"labels\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"_BASE_URL",
",",
"\"imagelabels.mat\"",
")",
",",
"\"setid\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"_BASE_URL",
",",
"\"setid.mat\"",
")",
",",
"}",
")",
"gen_kwargs",
"=",
"dict",
"(",
"images_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_paths",
"[",
"\"images\"",
"]",
",",
"\"jpg\"",
")",
",",
"labels_path",
"=",
"dl_paths",
"[",
"\"labels\"",
"]",
",",
"setid_path",
"=",
"dl_paths",
"[",
"\"setid\"",
"]",
",",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"dict",
"(",
"split_name",
"=",
"\"trnid\"",
",",
"*",
"*",
"gen_kwargs",
")",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TEST",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"dict",
"(",
"split_name",
"=",
"\"tstid\"",
",",
"*",
"*",
"gen_kwargs",
")",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"VALIDATION",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"dict",
"(",
"split_name",
"=",
"\"valid\"",
",",
"*",
"*",
"gen_kwargs",
")",
")",
",",
"]"
] |
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/oxford_flowers102.py#L70-L102
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/oxford_flowers102.py
|
OxfordFlowers102._generate_examples
|
def _generate_examples(self, images_dir_path, labels_path, setid_path,
split_name):
"""Yields examples."""
with tf.io.gfile.GFile(labels_path, "rb") as f:
labels = tfds.core.lazy_imports.scipy.io.loadmat(f)["labels"][0]
with tf.io.gfile.GFile(setid_path, "rb") as f:
examples = tfds.core.lazy_imports.scipy.io.loadmat(f)[split_name][0]
for image_id in examples:
file_name = "image_%05d.jpg" % image_id
yield {
"image": os.path.join(images_dir_path, file_name),
"label": labels[image_id - 1] - 1,
"file_name": file_name,
}
|
python
|
def _generate_examples(self, images_dir_path, labels_path, setid_path,
split_name):
"""Yields examples."""
with tf.io.gfile.GFile(labels_path, "rb") as f:
labels = tfds.core.lazy_imports.scipy.io.loadmat(f)["labels"][0]
with tf.io.gfile.GFile(setid_path, "rb") as f:
examples = tfds.core.lazy_imports.scipy.io.loadmat(f)[split_name][0]
for image_id in examples:
file_name = "image_%05d.jpg" % image_id
yield {
"image": os.path.join(images_dir_path, file_name),
"label": labels[image_id - 1] - 1,
"file_name": file_name,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"images_dir_path",
",",
"labels_path",
",",
"setid_path",
",",
"split_name",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"labels",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"scipy",
".",
"io",
".",
"loadmat",
"(",
"f",
")",
"[",
"\"labels\"",
"]",
"[",
"0",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"setid_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"examples",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"scipy",
".",
"io",
".",
"loadmat",
"(",
"f",
")",
"[",
"split_name",
"]",
"[",
"0",
"]",
"for",
"image_id",
"in",
"examples",
":",
"file_name",
"=",
"\"image_%05d.jpg\"",
"%",
"image_id",
"yield",
"{",
"\"image\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"images_dir_path",
",",
"file_name",
")",
",",
"\"label\"",
":",
"labels",
"[",
"image_id",
"-",
"1",
"]",
"-",
"1",
",",
"\"file_name\"",
":",
"file_name",
",",
"}"
] |
Yields examples.
|
[
"Yields",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/oxford_flowers102.py#L104-L118
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
get_dataset_feature_statistics
|
def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics()
# Make this to the best of our abilities.
schema = schema_pb2.Schema()
dataset = builder.as_dataset(split=split)
# Just computing the number of examples for now.
statistics.num_examples = 0
# Feature dictionaries.
feature_to_num_examples = collections.defaultdict(int)
feature_to_min = {}
feature_to_max = {}
np_dataset = dataset_utils.as_numpy(dataset)
for example in utils.tqdm(np_dataset, unit=" examples", leave=False):
statistics.num_examples += 1
assert isinstance(example, dict)
feature_names = sorted(example.keys())
for feature_name in feature_names:
# Update the number of examples this feature appears in.
feature_to_num_examples[feature_name] += 1
feature_np = example[feature_name]
# For compatibility in graph and eager mode, we can get PODs here and
# everything may not be neatly wrapped up in numpy's ndarray.
feature_dtype = type(feature_np)
if isinstance(feature_np, np.ndarray):
# If we have an empty array, then don't proceed further with computing
# statistics on it.
if feature_np.size == 0:
continue
feature_dtype = feature_np.dtype.type
feature_min, feature_max = None, None
is_numeric = (np.issubdtype(feature_dtype, np.number) or
feature_dtype == np.bool_)
if is_numeric:
feature_min = np.min(feature_np)
feature_max = np.max(feature_np)
# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add
# logic for that.
# Set or update the min, max.
if is_numeric:
if ((feature_name not in feature_to_min) or
(feature_to_min[feature_name] > feature_min)):
feature_to_min[feature_name] = feature_min
if ((feature_name not in feature_to_max) or
(feature_to_max[feature_name] < feature_max)):
feature_to_max[feature_name] = feature_max
# Start here, we've processed all examples.
output_shapes_dict = dataset.output_shapes
output_types_dict = dataset.output_types
for feature_name in sorted(feature_to_num_examples.keys()):
# Try to fill in the schema.
feature = schema.feature.add()
feature.name = feature_name
# TODO(afrozm): Make this work with nested structures, currently the Schema
# proto has no support for it.
maybe_feature_shape = output_shapes_dict[feature_name]
if not isinstance(maybe_feature_shape, tf.TensorShape):
logging.error(
"Statistics generation doesn't work for nested structures yet")
continue
for dim in maybe_feature_shape.as_list():
# We denote `None`s as -1 in the shape proto.
feature.shape.dim.add().size = dim if dim else -1
feature_type = output_types_dict[feature_name]
feature.type = _FEATURE_TYPE_MAP.get(feature_type, schema_pb2.BYTES)
common_statistics = statistics_pb2.CommonStatistics()
common_statistics.num_non_missing = feature_to_num_examples[feature_name]
common_statistics.num_missing = (
statistics.num_examples - common_statistics.num_non_missing)
feature_name_statistics = statistics.features.add()
feature_name_statistics.name = feature_name
# TODO(afrozm): This can be skipped, since type information was added to
# the Schema.
feature_name_statistics.type = _SCHEMA_TYPE_MAP.get(
feature.type, statistics_pb2.FeatureNameStatistics.BYTES)
if feature.type == schema_pb2.INT or feature.type == schema_pb2.FLOAT:
numeric_statistics = statistics_pb2.NumericStatistics()
numeric_statistics.min = feature_to_min[feature_name]
numeric_statistics.max = feature_to_max[feature_name]
numeric_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.num_stats.CopyFrom(numeric_statistics)
else:
# Let's shove it into BytesStatistics for now.
bytes_statistics = statistics_pb2.BytesStatistics()
bytes_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.bytes_stats.CopyFrom(bytes_statistics)
return statistics, schema
|
python
|
def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics()
# Make this to the best of our abilities.
schema = schema_pb2.Schema()
dataset = builder.as_dataset(split=split)
# Just computing the number of examples for now.
statistics.num_examples = 0
# Feature dictionaries.
feature_to_num_examples = collections.defaultdict(int)
feature_to_min = {}
feature_to_max = {}
np_dataset = dataset_utils.as_numpy(dataset)
for example in utils.tqdm(np_dataset, unit=" examples", leave=False):
statistics.num_examples += 1
assert isinstance(example, dict)
feature_names = sorted(example.keys())
for feature_name in feature_names:
# Update the number of examples this feature appears in.
feature_to_num_examples[feature_name] += 1
feature_np = example[feature_name]
# For compatibility in graph and eager mode, we can get PODs here and
# everything may not be neatly wrapped up in numpy's ndarray.
feature_dtype = type(feature_np)
if isinstance(feature_np, np.ndarray):
# If we have an empty array, then don't proceed further with computing
# statistics on it.
if feature_np.size == 0:
continue
feature_dtype = feature_np.dtype.type
feature_min, feature_max = None, None
is_numeric = (np.issubdtype(feature_dtype, np.number) or
feature_dtype == np.bool_)
if is_numeric:
feature_min = np.min(feature_np)
feature_max = np.max(feature_np)
# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add
# logic for that.
# Set or update the min, max.
if is_numeric:
if ((feature_name not in feature_to_min) or
(feature_to_min[feature_name] > feature_min)):
feature_to_min[feature_name] = feature_min
if ((feature_name not in feature_to_max) or
(feature_to_max[feature_name] < feature_max)):
feature_to_max[feature_name] = feature_max
# Start here, we've processed all examples.
output_shapes_dict = dataset.output_shapes
output_types_dict = dataset.output_types
for feature_name in sorted(feature_to_num_examples.keys()):
# Try to fill in the schema.
feature = schema.feature.add()
feature.name = feature_name
# TODO(afrozm): Make this work with nested structures, currently the Schema
# proto has no support for it.
maybe_feature_shape = output_shapes_dict[feature_name]
if not isinstance(maybe_feature_shape, tf.TensorShape):
logging.error(
"Statistics generation doesn't work for nested structures yet")
continue
for dim in maybe_feature_shape.as_list():
# We denote `None`s as -1 in the shape proto.
feature.shape.dim.add().size = dim if dim else -1
feature_type = output_types_dict[feature_name]
feature.type = _FEATURE_TYPE_MAP.get(feature_type, schema_pb2.BYTES)
common_statistics = statistics_pb2.CommonStatistics()
common_statistics.num_non_missing = feature_to_num_examples[feature_name]
common_statistics.num_missing = (
statistics.num_examples - common_statistics.num_non_missing)
feature_name_statistics = statistics.features.add()
feature_name_statistics.name = feature_name
# TODO(afrozm): This can be skipped, since type information was added to
# the Schema.
feature_name_statistics.type = _SCHEMA_TYPE_MAP.get(
feature.type, statistics_pb2.FeatureNameStatistics.BYTES)
if feature.type == schema_pb2.INT or feature.type == schema_pb2.FLOAT:
numeric_statistics = statistics_pb2.NumericStatistics()
numeric_statistics.min = feature_to_min[feature_name]
numeric_statistics.max = feature_to_max[feature_name]
numeric_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.num_stats.CopyFrom(numeric_statistics)
else:
# Let's shove it into BytesStatistics for now.
bytes_statistics = statistics_pb2.BytesStatistics()
bytes_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.bytes_stats.CopyFrom(bytes_statistics)
return statistics, schema
|
[
"def",
"get_dataset_feature_statistics",
"(",
"builder",
",",
"split",
")",
":",
"statistics",
"=",
"statistics_pb2",
".",
"DatasetFeatureStatistics",
"(",
")",
"# Make this to the best of our abilities.",
"schema",
"=",
"schema_pb2",
".",
"Schema",
"(",
")",
"dataset",
"=",
"builder",
".",
"as_dataset",
"(",
"split",
"=",
"split",
")",
"# Just computing the number of examples for now.",
"statistics",
".",
"num_examples",
"=",
"0",
"# Feature dictionaries.",
"feature_to_num_examples",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"feature_to_min",
"=",
"{",
"}",
"feature_to_max",
"=",
"{",
"}",
"np_dataset",
"=",
"dataset_utils",
".",
"as_numpy",
"(",
"dataset",
")",
"for",
"example",
"in",
"utils",
".",
"tqdm",
"(",
"np_dataset",
",",
"unit",
"=",
"\" examples\"",
",",
"leave",
"=",
"False",
")",
":",
"statistics",
".",
"num_examples",
"+=",
"1",
"assert",
"isinstance",
"(",
"example",
",",
"dict",
")",
"feature_names",
"=",
"sorted",
"(",
"example",
".",
"keys",
"(",
")",
")",
"for",
"feature_name",
"in",
"feature_names",
":",
"# Update the number of examples this feature appears in.",
"feature_to_num_examples",
"[",
"feature_name",
"]",
"+=",
"1",
"feature_np",
"=",
"example",
"[",
"feature_name",
"]",
"# For compatibility in graph and eager mode, we can get PODs here and",
"# everything may not be neatly wrapped up in numpy's ndarray.",
"feature_dtype",
"=",
"type",
"(",
"feature_np",
")",
"if",
"isinstance",
"(",
"feature_np",
",",
"np",
".",
"ndarray",
")",
":",
"# If we have an empty array, then don't proceed further with computing",
"# statistics on it.",
"if",
"feature_np",
".",
"size",
"==",
"0",
":",
"continue",
"feature_dtype",
"=",
"feature_np",
".",
"dtype",
".",
"type",
"feature_min",
",",
"feature_max",
"=",
"None",
",",
"None",
"is_numeric",
"=",
"(",
"np",
".",
"issubdtype",
"(",
"feature_dtype",
",",
"np",
".",
"number",
")",
"or",
"feature_dtype",
"==",
"np",
".",
"bool_",
")",
"if",
"is_numeric",
":",
"feature_min",
"=",
"np",
".",
"min",
"(",
"feature_np",
")",
"feature_max",
"=",
"np",
".",
"max",
"(",
"feature_np",
")",
"# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add",
"# logic for that.",
"# Set or update the min, max.",
"if",
"is_numeric",
":",
"if",
"(",
"(",
"feature_name",
"not",
"in",
"feature_to_min",
")",
"or",
"(",
"feature_to_min",
"[",
"feature_name",
"]",
">",
"feature_min",
")",
")",
":",
"feature_to_min",
"[",
"feature_name",
"]",
"=",
"feature_min",
"if",
"(",
"(",
"feature_name",
"not",
"in",
"feature_to_max",
")",
"or",
"(",
"feature_to_max",
"[",
"feature_name",
"]",
"<",
"feature_max",
")",
")",
":",
"feature_to_max",
"[",
"feature_name",
"]",
"=",
"feature_max",
"# Start here, we've processed all examples.",
"output_shapes_dict",
"=",
"dataset",
".",
"output_shapes",
"output_types_dict",
"=",
"dataset",
".",
"output_types",
"for",
"feature_name",
"in",
"sorted",
"(",
"feature_to_num_examples",
".",
"keys",
"(",
")",
")",
":",
"# Try to fill in the schema.",
"feature",
"=",
"schema",
".",
"feature",
".",
"add",
"(",
")",
"feature",
".",
"name",
"=",
"feature_name",
"# TODO(afrozm): Make this work with nested structures, currently the Schema",
"# proto has no support for it.",
"maybe_feature_shape",
"=",
"output_shapes_dict",
"[",
"feature_name",
"]",
"if",
"not",
"isinstance",
"(",
"maybe_feature_shape",
",",
"tf",
".",
"TensorShape",
")",
":",
"logging",
".",
"error",
"(",
"\"Statistics generation doesn't work for nested structures yet\"",
")",
"continue",
"for",
"dim",
"in",
"maybe_feature_shape",
".",
"as_list",
"(",
")",
":",
"# We denote `None`s as -1 in the shape proto.",
"feature",
".",
"shape",
".",
"dim",
".",
"add",
"(",
")",
".",
"size",
"=",
"dim",
"if",
"dim",
"else",
"-",
"1",
"feature_type",
"=",
"output_types_dict",
"[",
"feature_name",
"]",
"feature",
".",
"type",
"=",
"_FEATURE_TYPE_MAP",
".",
"get",
"(",
"feature_type",
",",
"schema_pb2",
".",
"BYTES",
")",
"common_statistics",
"=",
"statistics_pb2",
".",
"CommonStatistics",
"(",
")",
"common_statistics",
".",
"num_non_missing",
"=",
"feature_to_num_examples",
"[",
"feature_name",
"]",
"common_statistics",
".",
"num_missing",
"=",
"(",
"statistics",
".",
"num_examples",
"-",
"common_statistics",
".",
"num_non_missing",
")",
"feature_name_statistics",
"=",
"statistics",
".",
"features",
".",
"add",
"(",
")",
"feature_name_statistics",
".",
"name",
"=",
"feature_name",
"# TODO(afrozm): This can be skipped, since type information was added to",
"# the Schema.",
"feature_name_statistics",
".",
"type",
"=",
"_SCHEMA_TYPE_MAP",
".",
"get",
"(",
"feature",
".",
"type",
",",
"statistics_pb2",
".",
"FeatureNameStatistics",
".",
"BYTES",
")",
"if",
"feature",
".",
"type",
"==",
"schema_pb2",
".",
"INT",
"or",
"feature",
".",
"type",
"==",
"schema_pb2",
".",
"FLOAT",
":",
"numeric_statistics",
"=",
"statistics_pb2",
".",
"NumericStatistics",
"(",
")",
"numeric_statistics",
".",
"min",
"=",
"feature_to_min",
"[",
"feature_name",
"]",
"numeric_statistics",
".",
"max",
"=",
"feature_to_max",
"[",
"feature_name",
"]",
"numeric_statistics",
".",
"common_stats",
".",
"CopyFrom",
"(",
"common_statistics",
")",
"feature_name_statistics",
".",
"num_stats",
".",
"CopyFrom",
"(",
"numeric_statistics",
")",
"else",
":",
"# Let's shove it into BytesStatistics for now.",
"bytes_statistics",
"=",
"statistics_pb2",
".",
"BytesStatistics",
"(",
")",
"bytes_statistics",
".",
"common_stats",
".",
"CopyFrom",
"(",
"common_statistics",
")",
"feature_name_statistics",
".",
"bytes_stats",
".",
"CopyFrom",
"(",
"bytes_statistics",
")",
"return",
"statistics",
",",
"schema"
] |
Calculate statistics for the specified split.
|
[
"Calculate",
"statistics",
"for",
"the",
"specified",
"split",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L443-L556
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
read_from_json
|
def read_from_json(json_filename):
"""Read JSON-formatted proto into DatasetInfo proto."""
with tf.io.gfile.GFile(json_filename) as f:
dataset_info_json_str = f.read()
# Parse it back into a proto.
parsed_proto = json_format.Parse(dataset_info_json_str,
dataset_info_pb2.DatasetInfo())
return parsed_proto
|
python
|
def read_from_json(json_filename):
"""Read JSON-formatted proto into DatasetInfo proto."""
with tf.io.gfile.GFile(json_filename) as f:
dataset_info_json_str = f.read()
# Parse it back into a proto.
parsed_proto = json_format.Parse(dataset_info_json_str,
dataset_info_pb2.DatasetInfo())
return parsed_proto
|
[
"def",
"read_from_json",
"(",
"json_filename",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"json_filename",
")",
"as",
"f",
":",
"dataset_info_json_str",
"=",
"f",
".",
"read",
"(",
")",
"# Parse it back into a proto.",
"parsed_proto",
"=",
"json_format",
".",
"Parse",
"(",
"dataset_info_json_str",
",",
"dataset_info_pb2",
".",
"DatasetInfo",
"(",
")",
")",
"return",
"parsed_proto"
] |
Read JSON-formatted proto into DatasetInfo proto.
|
[
"Read",
"JSON",
"-",
"formatted",
"proto",
"into",
"DatasetInfo",
"proto",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L559-L566
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
DatasetInfo.full_name
|
def full_name(self):
"""Full canonical name: (<dataset_name>/<config_name>/<version>)."""
names = [self._builder.name]
if self._builder.builder_config:
names.append(self._builder.builder_config.name)
names.append(str(self.version))
return posixpath.join(*names)
|
python
|
def full_name(self):
"""Full canonical name: (<dataset_name>/<config_name>/<version>)."""
names = [self._builder.name]
if self._builder.builder_config:
names.append(self._builder.builder_config.name)
names.append(str(self.version))
return posixpath.join(*names)
|
[
"def",
"full_name",
"(",
"self",
")",
":",
"names",
"=",
"[",
"self",
".",
"_builder",
".",
"name",
"]",
"if",
"self",
".",
"_builder",
".",
"builder_config",
":",
"names",
".",
"append",
"(",
"self",
".",
"_builder",
".",
"builder_config",
".",
"name",
")",
"names",
".",
"append",
"(",
"str",
"(",
"self",
".",
"version",
")",
")",
"return",
"posixpath",
".",
"join",
"(",
"*",
"names",
")"
] |
Full canonical name: (<dataset_name>/<config_name>/<version>).
|
[
"Full",
"canonical",
"name",
":",
"(",
"<dataset_name",
">",
"/",
"<config_name",
">",
"/",
"<version",
">",
")",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L150-L156
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
DatasetInfo.update_splits_if_different
|
def update_splits_if_different(self, split_dict):
"""Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split
"""
assert isinstance(split_dict, splits_lib.SplitDict)
# If splits are already defined and identical, then we do not update
if self._splits and splits_lib.check_splits_equals(
self._splits, split_dict):
return
self._set_splits(split_dict)
|
python
|
def update_splits_if_different(self, split_dict):
"""Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split
"""
assert isinstance(split_dict, splits_lib.SplitDict)
# If splits are already defined and identical, then we do not update
if self._splits and splits_lib.check_splits_equals(
self._splits, split_dict):
return
self._set_splits(split_dict)
|
[
"def",
"update_splits_if_different",
"(",
"self",
",",
"split_dict",
")",
":",
"assert",
"isinstance",
"(",
"split_dict",
",",
"splits_lib",
".",
"SplitDict",
")",
"# If splits are already defined and identical, then we do not update",
"if",
"self",
".",
"_splits",
"and",
"splits_lib",
".",
"check_splits_equals",
"(",
"self",
".",
"_splits",
",",
"split_dict",
")",
":",
"return",
"self",
".",
"_set_splits",
"(",
"split_dict",
")"
] |
Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split
|
[
"Overwrite",
"the",
"splits",
"if",
"they",
"are",
"different",
"from",
"the",
"current",
"ones",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L197-L217
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
DatasetInfo._set_splits
|
def _set_splits(self, split_dict):
"""Split setter (private method)."""
# Update the dictionary representation.
# Use from/to proto for a clean copy
self._splits = split_dict.copy()
# Update the proto
del self.as_proto.splits[:] # Clear previous
for split_info in split_dict.to_proto():
self.as_proto.splits.add().CopyFrom(split_info)
|
python
|
def _set_splits(self, split_dict):
"""Split setter (private method)."""
# Update the dictionary representation.
# Use from/to proto for a clean copy
self._splits = split_dict.copy()
# Update the proto
del self.as_proto.splits[:] # Clear previous
for split_info in split_dict.to_proto():
self.as_proto.splits.add().CopyFrom(split_info)
|
[
"def",
"_set_splits",
"(",
"self",
",",
"split_dict",
")",
":",
"# Update the dictionary representation.",
"# Use from/to proto for a clean copy",
"self",
".",
"_splits",
"=",
"split_dict",
".",
"copy",
"(",
")",
"# Update the proto",
"del",
"self",
".",
"as_proto",
".",
"splits",
"[",
":",
"]",
"# Clear previous",
"for",
"split_info",
"in",
"split_dict",
".",
"to_proto",
"(",
")",
":",
"self",
".",
"as_proto",
".",
"splits",
".",
"add",
"(",
")",
".",
"CopyFrom",
"(",
"split_info",
")"
] |
Split setter (private method).
|
[
"Split",
"setter",
"(",
"private",
"method",
")",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L219-L228
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
DatasetInfo._compute_dynamic_properties
|
def _compute_dynamic_properties(self, builder):
"""Update from the DatasetBuilder."""
# Fill other things by going over the dataset.
splits = self.splits
for split_info in utils.tqdm(
splits.values(), desc="Computing statistics...", unit=" split"):
try:
split_name = split_info.name
# Fill DatasetFeatureStatistics.
dataset_feature_statistics, schema = get_dataset_feature_statistics(
builder, split_name)
# Add the statistics to this split.
split_info.statistics.CopyFrom(dataset_feature_statistics)
# Set the schema at the top-level since this is independent of the
# split.
self.as_proto.schema.CopyFrom(schema)
except tf.errors.InvalidArgumentError:
# This means there is no such split, even though it was specified in the
# info, the least we can do is to log this.
logging.error(("%s's info() property specifies split %s, but it "
"doesn't seem to have been generated. Please ensure "
"that the data was downloaded for this split and re-run "
"download_and_prepare."), self.name, split_name)
raise
# Set splits to trigger proto update in setter
self._set_splits(splits)
|
python
|
def _compute_dynamic_properties(self, builder):
"""Update from the DatasetBuilder."""
# Fill other things by going over the dataset.
splits = self.splits
for split_info in utils.tqdm(
splits.values(), desc="Computing statistics...", unit=" split"):
try:
split_name = split_info.name
# Fill DatasetFeatureStatistics.
dataset_feature_statistics, schema = get_dataset_feature_statistics(
builder, split_name)
# Add the statistics to this split.
split_info.statistics.CopyFrom(dataset_feature_statistics)
# Set the schema at the top-level since this is independent of the
# split.
self.as_proto.schema.CopyFrom(schema)
except tf.errors.InvalidArgumentError:
# This means there is no such split, even though it was specified in the
# info, the least we can do is to log this.
logging.error(("%s's info() property specifies split %s, but it "
"doesn't seem to have been generated. Please ensure "
"that the data was downloaded for this split and re-run "
"download_and_prepare."), self.name, split_name)
raise
# Set splits to trigger proto update in setter
self._set_splits(splits)
|
[
"def",
"_compute_dynamic_properties",
"(",
"self",
",",
"builder",
")",
":",
"# Fill other things by going over the dataset.",
"splits",
"=",
"self",
".",
"splits",
"for",
"split_info",
"in",
"utils",
".",
"tqdm",
"(",
"splits",
".",
"values",
"(",
")",
",",
"desc",
"=",
"\"Computing statistics...\"",
",",
"unit",
"=",
"\" split\"",
")",
":",
"try",
":",
"split_name",
"=",
"split_info",
".",
"name",
"# Fill DatasetFeatureStatistics.",
"dataset_feature_statistics",
",",
"schema",
"=",
"get_dataset_feature_statistics",
"(",
"builder",
",",
"split_name",
")",
"# Add the statistics to this split.",
"split_info",
".",
"statistics",
".",
"CopyFrom",
"(",
"dataset_feature_statistics",
")",
"# Set the schema at the top-level since this is independent of the",
"# split.",
"self",
".",
"as_proto",
".",
"schema",
".",
"CopyFrom",
"(",
"schema",
")",
"except",
"tf",
".",
"errors",
".",
"InvalidArgumentError",
":",
"# This means there is no such split, even though it was specified in the",
"# info, the least we can do is to log this.",
"logging",
".",
"error",
"(",
"(",
"\"%s's info() property specifies split %s, but it \"",
"\"doesn't seem to have been generated. Please ensure \"",
"\"that the data was downloaded for this split and re-run \"",
"\"download_and_prepare.\"",
")",
",",
"self",
".",
"name",
",",
"split_name",
")",
"raise",
"# Set splits to trigger proto update in setter",
"self",
".",
"_set_splits",
"(",
"splits",
")"
] |
Update from the DatasetBuilder.
|
[
"Update",
"from",
"the",
"DatasetBuilder",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L249-L278
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
DatasetInfo.write_to_directory
|
def write_to_directory(self, dataset_info_dir):
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the metadata from the features (vocabulary, labels,...)
if self.features:
self.features.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_filename(dataset_info_dir),
"w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_filename(dataset_info_dir),
"w") as f:
f.write(self.as_json)
|
python
|
def write_to_directory(self, dataset_info_dir):
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the metadata from the features (vocabulary, labels,...)
if self.features:
self.features.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_filename(dataset_info_dir),
"w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_filename(dataset_info_dir),
"w") as f:
f.write(self.as_json)
|
[
"def",
"write_to_directory",
"(",
"self",
",",
"dataset_info_dir",
")",
":",
"# Save the metadata from the features (vocabulary, labels,...)",
"if",
"self",
".",
"features",
":",
"self",
".",
"features",
".",
"save_metadata",
"(",
"dataset_info_dir",
")",
"if",
"self",
".",
"redistribution_info",
".",
"license",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"self",
".",
"_license_filename",
"(",
"dataset_info_dir",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"redistribution_info",
".",
"license",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"self",
".",
"_dataset_info_filename",
"(",
"dataset_info_dir",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"as_json",
")"
] |
Write `DatasetInfo` as JSON to `dataset_info_dir`.
|
[
"Write",
"DatasetInfo",
"as",
"JSON",
"to",
"dataset_info_dir",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L284-L297
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
DatasetInfo.read_from_directory
|
def read_from_directory(self, dataset_info_dir):
"""Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not dataset_info_dir:
raise ValueError(
"Calling read_from_directory with undefined dataset_info_dir.")
json_filename = self._dataset_info_filename(dataset_info_dir)
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits))
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True
|
python
|
def read_from_directory(self, dataset_info_dir):
"""Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not dataset_info_dir:
raise ValueError(
"Calling read_from_directory with undefined dataset_info_dir.")
json_filename = self._dataset_info_filename(dataset_info_dir)
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits))
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True
|
[
"def",
"read_from_directory",
"(",
"self",
",",
"dataset_info_dir",
")",
":",
"if",
"not",
"dataset_info_dir",
":",
"raise",
"ValueError",
"(",
"\"Calling read_from_directory with undefined dataset_info_dir.\"",
")",
"json_filename",
"=",
"self",
".",
"_dataset_info_filename",
"(",
"dataset_info_dir",
")",
"# Load the metadata from disk",
"parsed_proto",
"=",
"read_from_json",
"(",
"json_filename",
")",
"# Update splits",
"self",
".",
"_set_splits",
"(",
"splits_lib",
".",
"SplitDict",
".",
"from_proto",
"(",
"parsed_proto",
".",
"splits",
")",
")",
"# Restore the feature metadata (vocabulary, labels names,...)",
"if",
"self",
".",
"features",
":",
"self",
".",
"features",
".",
"load_metadata",
"(",
"dataset_info_dir",
")",
"# Update fields which are not defined in the code. This means that",
"# the code will overwrite fields which are present in",
"# dataset_info.json.",
"for",
"field_name",
",",
"field",
"in",
"self",
".",
"as_proto",
".",
"DESCRIPTOR",
".",
"fields_by_name",
".",
"items",
"(",
")",
":",
"field_value",
"=",
"getattr",
"(",
"self",
".",
"_info_proto",
",",
"field_name",
")",
"field_value_restored",
"=",
"getattr",
"(",
"parsed_proto",
",",
"field_name",
")",
"try",
":",
"is_defined",
"=",
"self",
".",
"_info_proto",
".",
"HasField",
"(",
"field_name",
")",
"except",
"ValueError",
":",
"is_defined",
"=",
"bool",
"(",
"field_value",
")",
"try",
":",
"is_defined_in_restored",
"=",
"parsed_proto",
".",
"HasField",
"(",
"field_name",
")",
"except",
"ValueError",
":",
"is_defined_in_restored",
"=",
"bool",
"(",
"field_value_restored",
")",
"# If field is defined in code, we ignore the value",
"if",
"is_defined",
":",
"if",
"field_value",
"!=",
"field_value_restored",
":",
"logging",
".",
"info",
"(",
"\"Field info.%s from disk and from code do not match. Keeping \"",
"\"the one from code.\"",
",",
"field_name",
")",
"continue",
"# If the field is also not defined in JSON file, we do nothing",
"if",
"not",
"is_defined_in_restored",
":",
"continue",
"# Otherwise, we restore the dataset_info.json value",
"if",
"field",
".",
"type",
"==",
"field",
".",
"TYPE_MESSAGE",
":",
"field_value",
".",
"MergeFrom",
"(",
"field_value_restored",
")",
"else",
":",
"setattr",
"(",
"self",
".",
"_info_proto",
",",
"field_name",
",",
"field_value_restored",
")",
"if",
"self",
".",
"_builder",
".",
"_version",
"!=",
"self",
".",
"version",
":",
"# pylint: disable=protected-access",
"raise",
"AssertionError",
"(",
"\"The constructed DatasetInfo instance and the restored proto version \"",
"\"do not match. Builder version: {}. Proto version: {}\"",
".",
"format",
"(",
"self",
".",
"_builder",
".",
"_version",
",",
"self",
".",
"version",
")",
")",
"# pylint: disable=protected-access",
"# Mark as fully initialized.",
"self",
".",
"_fully_initialized",
"=",
"True"
] |
Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
|
[
"Update",
"DatasetInfo",
"from",
"the",
"JSON",
"file",
"in",
"dataset_info_dir",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L299-L367
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_info.py
|
DatasetInfo.initialize_from_bucket
|
def initialize_from_bucket(self):
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
if not data_files:
return
logging.info("Loading info from GCS for %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
gcs_utils.download_gcs_file(fname, out_fname)
self.read_from_directory(tmp_dir)
|
python
|
def initialize_from_bucket(self):
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
if not data_files:
return
logging.info("Loading info from GCS for %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
gcs_utils.download_gcs_file(fname, out_fname)
self.read_from_directory(tmp_dir)
|
[
"def",
"initialize_from_bucket",
"(",
"self",
")",
":",
"# In order to support Colab, we use the HTTP GCS API to access the metadata",
"# files. They are copied locally and then loaded.",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"\"tfds\"",
")",
"data_files",
"=",
"gcs_utils",
".",
"gcs_dataset_info_files",
"(",
"self",
".",
"full_name",
")",
"if",
"not",
"data_files",
":",
"return",
"logging",
".",
"info",
"(",
"\"Loading info from GCS for %s\"",
",",
"self",
".",
"full_name",
")",
"for",
"fname",
"in",
"data_files",
":",
"out_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
")",
"gcs_utils",
".",
"download_gcs_file",
"(",
"fname",
",",
"out_fname",
")",
"self",
".",
"read_from_directory",
"(",
"tmp_dir",
")"
] |
Initialize DatasetInfo from GCS bucket info files.
|
[
"Initialize",
"DatasetInfo",
"from",
"GCS",
"bucket",
"info",
"files",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L369-L381
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/cycle_gan.py
|
CycleGAN._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _DL_URLS[self.builder_config.name]
data_dirs = dl_manager.download_and_extract(url)
path_to_dataset = os.path.join(data_dirs, tf.io.gfile.listdir(data_dirs)[0])
train_a_path = os.path.join(path_to_dataset, "trainA")
train_b_path = os.path.join(path_to_dataset, "trainB")
test_a_path = os.path.join(path_to_dataset, "testA")
test_b_path = os.path.join(path_to_dataset, "testB")
return [
tfds.core.SplitGenerator(
name="trainA",
num_shards=10,
gen_kwargs={
"path": train_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="trainB",
num_shards=10,
gen_kwargs={
"path": train_b_path,
"label": "B",
}),
tfds.core.SplitGenerator(
name="testA",
num_shards=1,
gen_kwargs={
"path": test_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="testB",
num_shards=1,
gen_kwargs={
"path": test_b_path,
"label": "B",
}),
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _DL_URLS[self.builder_config.name]
data_dirs = dl_manager.download_and_extract(url)
path_to_dataset = os.path.join(data_dirs, tf.io.gfile.listdir(data_dirs)[0])
train_a_path = os.path.join(path_to_dataset, "trainA")
train_b_path = os.path.join(path_to_dataset, "trainB")
test_a_path = os.path.join(path_to_dataset, "testA")
test_b_path = os.path.join(path_to_dataset, "testB")
return [
tfds.core.SplitGenerator(
name="trainA",
num_shards=10,
gen_kwargs={
"path": train_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="trainB",
num_shards=10,
gen_kwargs={
"path": train_b_path,
"label": "B",
}),
tfds.core.SplitGenerator(
name="testA",
num_shards=1,
gen_kwargs={
"path": test_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="testB",
num_shards=1,
gen_kwargs={
"path": test_b_path,
"label": "B",
}),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"url",
"=",
"_DL_URLS",
"[",
"self",
".",
"builder_config",
".",
"name",
"]",
"data_dirs",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"url",
")",
"path_to_dataset",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dirs",
",",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"data_dirs",
")",
"[",
"0",
"]",
")",
"train_a_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_to_dataset",
",",
"\"trainA\"",
")",
"train_b_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_to_dataset",
",",
"\"trainB\"",
")",
"test_a_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_to_dataset",
",",
"\"testA\"",
")",
"test_b_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_to_dataset",
",",
"\"testB\"",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"\"trainA\"",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"{",
"\"path\"",
":",
"train_a_path",
",",
"\"label\"",
":",
"\"A\"",
",",
"}",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"\"trainB\"",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"{",
"\"path\"",
":",
"train_b_path",
",",
"\"label\"",
":",
"\"B\"",
",",
"}",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"\"testA\"",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"{",
"\"path\"",
":",
"test_a_path",
",",
"\"label\"",
":",
"\"A\"",
",",
"}",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"\"testB\"",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"{",
"\"path\"",
":",
"test_b_path",
",",
"\"label\"",
":",
"\"B\"",
",",
"}",
")",
",",
"]"
] |
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cycle_gan.py#L108-L149
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
_map_promise
|
def _map_promise(map_fn, all_inputs):
"""Map the function into each element and resolve the promise."""
all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
res = utils.map_nested(_wait_on_promise, all_promises)
return res
|
python
|
def _map_promise(map_fn, all_inputs):
"""Map the function into each element and resolve the promise."""
all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
res = utils.map_nested(_wait_on_promise, all_promises)
return res
|
[
"def",
"_map_promise",
"(",
"map_fn",
",",
"all_inputs",
")",
":",
"all_promises",
"=",
"utils",
".",
"map_nested",
"(",
"map_fn",
",",
"all_inputs",
")",
"# Apply the function",
"res",
"=",
"utils",
".",
"map_nested",
"(",
"_wait_on_promise",
",",
"all_promises",
")",
"return",
"res"
] |
Map the function into each element and resolve the promise.
|
[
"Map",
"the",
"function",
"into",
"each",
"element",
"and",
"resolve",
"the",
"promise",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L392-L396
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager._handle_download_result
|
def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):
"""Store dled file to definitive place, write INFO file, return path."""
fnames = tf.io.gfile.listdir(tmp_dir_path)
if len(fnames) > 1:
raise AssertionError('More than one file in %s.' % tmp_dir_path)
original_fname = fnames[0]
tmp_path = os.path.join(tmp_dir_path, original_fname)
self._recorded_sizes_checksums[resource.url] = (dl_size, sha256)
if self._register_checksums:
self._record_sizes_checksums()
elif (dl_size, sha256) != self._sizes_checksums.get(resource.url, None):
raise NonMatchingChecksumError(resource.url, tmp_path)
download_path = self._get_final_dl_path(resource.url, sha256)
resource_lib.write_info_file(resource, download_path, self._dataset_name,
original_fname)
# Unconditionally overwrite because either file doesn't exist or
# FORCE_DOWNLOAD=true
tf.io.gfile.rename(tmp_path, download_path, overwrite=True)
tf.io.gfile.rmtree(tmp_dir_path)
return download_path
|
python
|
def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):
"""Store dled file to definitive place, write INFO file, return path."""
fnames = tf.io.gfile.listdir(tmp_dir_path)
if len(fnames) > 1:
raise AssertionError('More than one file in %s.' % tmp_dir_path)
original_fname = fnames[0]
tmp_path = os.path.join(tmp_dir_path, original_fname)
self._recorded_sizes_checksums[resource.url] = (dl_size, sha256)
if self._register_checksums:
self._record_sizes_checksums()
elif (dl_size, sha256) != self._sizes_checksums.get(resource.url, None):
raise NonMatchingChecksumError(resource.url, tmp_path)
download_path = self._get_final_dl_path(resource.url, sha256)
resource_lib.write_info_file(resource, download_path, self._dataset_name,
original_fname)
# Unconditionally overwrite because either file doesn't exist or
# FORCE_DOWNLOAD=true
tf.io.gfile.rename(tmp_path, download_path, overwrite=True)
tf.io.gfile.rmtree(tmp_dir_path)
return download_path
|
[
"def",
"_handle_download_result",
"(",
"self",
",",
"resource",
",",
"tmp_dir_path",
",",
"sha256",
",",
"dl_size",
")",
":",
"fnames",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"tmp_dir_path",
")",
"if",
"len",
"(",
"fnames",
")",
">",
"1",
":",
"raise",
"AssertionError",
"(",
"'More than one file in %s.'",
"%",
"tmp_dir_path",
")",
"original_fname",
"=",
"fnames",
"[",
"0",
"]",
"tmp_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir_path",
",",
"original_fname",
")",
"self",
".",
"_recorded_sizes_checksums",
"[",
"resource",
".",
"url",
"]",
"=",
"(",
"dl_size",
",",
"sha256",
")",
"if",
"self",
".",
"_register_checksums",
":",
"self",
".",
"_record_sizes_checksums",
"(",
")",
"elif",
"(",
"dl_size",
",",
"sha256",
")",
"!=",
"self",
".",
"_sizes_checksums",
".",
"get",
"(",
"resource",
".",
"url",
",",
"None",
")",
":",
"raise",
"NonMatchingChecksumError",
"(",
"resource",
".",
"url",
",",
"tmp_path",
")",
"download_path",
"=",
"self",
".",
"_get_final_dl_path",
"(",
"resource",
".",
"url",
",",
"sha256",
")",
"resource_lib",
".",
"write_info_file",
"(",
"resource",
",",
"download_path",
",",
"self",
".",
"_dataset_name",
",",
"original_fname",
")",
"# Unconditionally overwrite because either file doesn't exist or",
"# FORCE_DOWNLOAD=true",
"tf",
".",
"io",
".",
"gfile",
".",
"rename",
"(",
"tmp_path",
",",
"download_path",
",",
"overwrite",
"=",
"True",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"rmtree",
"(",
"tmp_dir_path",
")",
"return",
"download_path"
] |
Store dled file to definitive place, write INFO file, return path.
|
[
"Store",
"dled",
"file",
"to",
"definitive",
"place",
"write",
"INFO",
"file",
"return",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L196-L215
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager._download
|
def _download(self, resource):
"""Download resource, returns Promise->path to downloaded file."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
url = resource.url
if url in self._sizes_checksums:
expected_sha256 = self._sizes_checksums[url][1]
download_path = self._get_final_dl_path(url, expected_sha256)
if not self._force_download and resource.exists_locally(download_path):
logging.info('URL %s already downloaded: reusing %s.',
url, download_path)
self._recorded_sizes_checksums[url] = self._sizes_checksums[url]
return promise.Promise.resolve(download_path)
# There is a slight difference between downloader and extractor here:
# the extractor manages its own temp directory, while the DownloadManager
# manages the temp directory of downloader.
download_dir_path = os.path.join(
self._download_dir,
'%s.tmp.%s' % (resource_lib.get_dl_dirname(url), uuid.uuid4().hex))
tf.io.gfile.makedirs(download_dir_path)
logging.info('Downloading %s into %s...', url, download_dir_path)
def callback(val):
checksum, dl_size = val
return self._handle_download_result(
resource, download_dir_path, checksum, dl_size)
return self._downloader.download(url, download_dir_path).then(callback)
|
python
|
def _download(self, resource):
"""Download resource, returns Promise->path to downloaded file."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
url = resource.url
if url in self._sizes_checksums:
expected_sha256 = self._sizes_checksums[url][1]
download_path = self._get_final_dl_path(url, expected_sha256)
if not self._force_download and resource.exists_locally(download_path):
logging.info('URL %s already downloaded: reusing %s.',
url, download_path)
self._recorded_sizes_checksums[url] = self._sizes_checksums[url]
return promise.Promise.resolve(download_path)
# There is a slight difference between downloader and extractor here:
# the extractor manages its own temp directory, while the DownloadManager
# manages the temp directory of downloader.
download_dir_path = os.path.join(
self._download_dir,
'%s.tmp.%s' % (resource_lib.get_dl_dirname(url), uuid.uuid4().hex))
tf.io.gfile.makedirs(download_dir_path)
logging.info('Downloading %s into %s...', url, download_dir_path)
def callback(val):
checksum, dl_size = val
return self._handle_download_result(
resource, download_dir_path, checksum, dl_size)
return self._downloader.download(url, download_dir_path).then(callback)
|
[
"def",
"_download",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"url",
"=",
"resource",
")",
"url",
"=",
"resource",
".",
"url",
"if",
"url",
"in",
"self",
".",
"_sizes_checksums",
":",
"expected_sha256",
"=",
"self",
".",
"_sizes_checksums",
"[",
"url",
"]",
"[",
"1",
"]",
"download_path",
"=",
"self",
".",
"_get_final_dl_path",
"(",
"url",
",",
"expected_sha256",
")",
"if",
"not",
"self",
".",
"_force_download",
"and",
"resource",
".",
"exists_locally",
"(",
"download_path",
")",
":",
"logging",
".",
"info",
"(",
"'URL %s already downloaded: reusing %s.'",
",",
"url",
",",
"download_path",
")",
"self",
".",
"_recorded_sizes_checksums",
"[",
"url",
"]",
"=",
"self",
".",
"_sizes_checksums",
"[",
"url",
"]",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"download_path",
")",
"# There is a slight difference between downloader and extractor here:",
"# the extractor manages its own temp directory, while the DownloadManager",
"# manages the temp directory of downloader.",
"download_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_download_dir",
",",
"'%s.tmp.%s'",
"%",
"(",
"resource_lib",
".",
"get_dl_dirname",
"(",
"url",
")",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"download_dir_path",
")",
"logging",
".",
"info",
"(",
"'Downloading %s into %s...'",
",",
"url",
",",
"download_dir_path",
")",
"def",
"callback",
"(",
"val",
")",
":",
"checksum",
",",
"dl_size",
"=",
"val",
"return",
"self",
".",
"_handle_download_result",
"(",
"resource",
",",
"download_dir_path",
",",
"checksum",
",",
"dl_size",
")",
"return",
"self",
".",
"_downloader",
".",
"download",
"(",
"url",
",",
"download_dir_path",
")",
".",
"then",
"(",
"callback",
")"
] |
Download resource, returns Promise->path to downloaded file.
|
[
"Download",
"resource",
"returns",
"Promise",
"-",
">",
"path",
"to",
"downloaded",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L221-L247
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager._extract
|
def _extract(self, resource):
"""Extract a single archive, returns Promise->path to extraction result."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
path = resource.path
extract_method = resource.extract_method
if extract_method == resource_lib.ExtractMethod.NO_EXTRACT:
logging.info('Skipping extraction for %s (method=NO_EXTRACT).', path)
return promise.Promise.resolve(path)
method_name = resource_lib.ExtractMethod(extract_method).name
extract_path = os.path.join(self._extract_dir,
'%s.%s' % (method_name, os.path.basename(path)))
if not self._force_extraction and tf.io.gfile.exists(extract_path):
logging.info('Reusing extraction of %s at %s.', path, extract_path)
return promise.Promise.resolve(extract_path)
return self._extractor.extract(path, extract_method, extract_path)
|
python
|
def _extract(self, resource):
"""Extract a single archive, returns Promise->path to extraction result."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
path = resource.path
extract_method = resource.extract_method
if extract_method == resource_lib.ExtractMethod.NO_EXTRACT:
logging.info('Skipping extraction for %s (method=NO_EXTRACT).', path)
return promise.Promise.resolve(path)
method_name = resource_lib.ExtractMethod(extract_method).name
extract_path = os.path.join(self._extract_dir,
'%s.%s' % (method_name, os.path.basename(path)))
if not self._force_extraction and tf.io.gfile.exists(extract_path):
logging.info('Reusing extraction of %s at %s.', path, extract_path)
return promise.Promise.resolve(extract_path)
return self._extractor.extract(path, extract_method, extract_path)
|
[
"def",
"_extract",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"path",
"=",
"resource",
")",
"path",
"=",
"resource",
".",
"path",
"extract_method",
"=",
"resource",
".",
"extract_method",
"if",
"extract_method",
"==",
"resource_lib",
".",
"ExtractMethod",
".",
"NO_EXTRACT",
":",
"logging",
".",
"info",
"(",
"'Skipping extraction for %s (method=NO_EXTRACT).'",
",",
"path",
")",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"path",
")",
"method_name",
"=",
"resource_lib",
".",
"ExtractMethod",
"(",
"extract_method",
")",
".",
"name",
"extract_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_extract_dir",
",",
"'%s.%s'",
"%",
"(",
"method_name",
",",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
")",
")",
"if",
"not",
"self",
".",
"_force_extraction",
"and",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"extract_path",
")",
":",
"logging",
".",
"info",
"(",
"'Reusing extraction of %s at %s.'",
",",
"path",
",",
"extract_path",
")",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"extract_path",
")",
"return",
"self",
".",
"_extractor",
".",
"extract",
"(",
"path",
",",
"extract_method",
",",
"extract_path",
")"
] |
Extract a single archive, returns Promise->path to extraction result.
|
[
"Extract",
"a",
"single",
"archive",
"returns",
"Promise",
"-",
">",
"path",
"to",
"extraction",
"result",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L251-L266
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager._download_extract
|
def _download_extract(self, resource):
"""Download-extract `Resource` or url, returns Promise->path."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
def callback(path):
resource.path = path
return self._extract(resource)
return self._download(resource).then(callback)
|
python
|
def _download_extract(self, resource):
"""Download-extract `Resource` or url, returns Promise->path."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
def callback(path):
resource.path = path
return self._extract(resource)
return self._download(resource).then(callback)
|
[
"def",
"_download_extract",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"url",
"=",
"resource",
")",
"def",
"callback",
"(",
"path",
")",
":",
"resource",
".",
"path",
"=",
"path",
"return",
"self",
".",
"_extract",
"(",
"resource",
")",
"return",
"self",
".",
"_download",
"(",
"resource",
")",
".",
"then",
"(",
"callback",
")"
] |
Download-extract `Resource` or url, returns Promise->path.
|
[
"Download",
"-",
"extract",
"Resource",
"or",
"url",
"returns",
"Promise",
"-",
">",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L270-L277
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager.download_kaggle_data
|
def download_kaggle_data(self, competition_name):
"""Download data for a given Kaggle competition."""
with self._downloader.tqdm():
kaggle_downloader = self._downloader.kaggle_downloader(competition_name)
urls = kaggle_downloader.competition_urls
files = kaggle_downloader.competition_files
return _map_promise(self._download,
dict((f, u) for (f, u) in zip(files, urls)))
|
python
|
def download_kaggle_data(self, competition_name):
"""Download data for a given Kaggle competition."""
with self._downloader.tqdm():
kaggle_downloader = self._downloader.kaggle_downloader(competition_name)
urls = kaggle_downloader.competition_urls
files = kaggle_downloader.competition_files
return _map_promise(self._download,
dict((f, u) for (f, u) in zip(files, urls)))
|
[
"def",
"download_kaggle_data",
"(",
"self",
",",
"competition_name",
")",
":",
"with",
"self",
".",
"_downloader",
".",
"tqdm",
"(",
")",
":",
"kaggle_downloader",
"=",
"self",
".",
"_downloader",
".",
"kaggle_downloader",
"(",
"competition_name",
")",
"urls",
"=",
"kaggle_downloader",
".",
"competition_urls",
"files",
"=",
"kaggle_downloader",
".",
"competition_files",
"return",
"_map_promise",
"(",
"self",
".",
"_download",
",",
"dict",
"(",
"(",
"f",
",",
"u",
")",
"for",
"(",
"f",
",",
"u",
")",
"in",
"zip",
"(",
"files",
",",
"urls",
")",
")",
")"
] |
Download data for a given Kaggle competition.
|
[
"Download",
"data",
"for",
"a",
"given",
"Kaggle",
"competition",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L279-L286
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager.download
|
def download(self, url_or_urls):
"""Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
return _map_promise(self._download, url_or_urls)
|
python
|
def download(self, url_or_urls):
"""Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
return _map_promise(self._download, url_or_urls)
|
[
"def",
"download",
"(",
"self",
",",
"url_or_urls",
")",
":",
"# Add progress bar to follow the download state",
"with",
"self",
".",
"_downloader",
".",
"tqdm",
"(",
")",
":",
"return",
"_map_promise",
"(",
"self",
".",
"_download",
",",
"url_or_urls",
")"
] |
Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
|
[
"Download",
"given",
"url",
"(",
"s",
")",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L288-L301
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager.iter_archive
|
def iter_archive(self, resource):
"""Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
"""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
return extractor.iter_archive(resource.path, resource.extract_method)
|
python
|
def iter_archive(self, resource):
"""Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
"""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
return extractor.iter_archive(resource.path, resource.extract_method)
|
[
"def",
"iter_archive",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"path",
"=",
"resource",
")",
"return",
"extractor",
".",
"iter_archive",
"(",
"resource",
".",
"path",
",",
"resource",
".",
"extract_method",
")"
] |
Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
|
[
"Returns",
"iterator",
"over",
"files",
"within",
"archive",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L303-L317
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager.extract
|
def extract(self, path_or_paths):
"""Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method is deduced
from downloaded file name.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
"""
# Add progress bar to follow the download state
with self._extractor.tqdm():
return _map_promise(self._extract, path_or_paths)
|
python
|
def extract(self, path_or_paths):
"""Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method is deduced
from downloaded file name.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
"""
# Add progress bar to follow the download state
with self._extractor.tqdm():
return _map_promise(self._extract, path_or_paths)
|
[
"def",
"extract",
"(",
"self",
",",
"path_or_paths",
")",
":",
"# Add progress bar to follow the download state",
"with",
"self",
".",
"_extractor",
".",
"tqdm",
"(",
")",
":",
"return",
"_map_promise",
"(",
"self",
".",
"_extract",
",",
"path_or_paths",
")"
] |
Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method is deduced
from downloaded file name.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
|
[
"Extract",
"given",
"path",
"(",
"s",
")",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L319-L335
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager.download_and_extract
|
def download_and_extract(self, url_or_urls):
"""Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
with self._extractor.tqdm():
return _map_promise(self._download_extract, url_or_urls)
|
python
|
def download_and_extract(self, url_or_urls):
"""Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
with self._extractor.tqdm():
return _map_promise(self._download_extract, url_or_urls)
|
[
"def",
"download_and_extract",
"(",
"self",
",",
"url_or_urls",
")",
":",
"# Add progress bar to follow the download state",
"with",
"self",
".",
"_downloader",
".",
"tqdm",
"(",
")",
":",
"with",
"self",
".",
"_extractor",
".",
"tqdm",
"(",
")",
":",
"return",
"_map_promise",
"(",
"self",
".",
"_download_extract",
",",
"url_or_urls",
")"
] |
Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
|
[
"Download",
"and",
"extract",
"given",
"url_or_urls",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L337-L359
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/download_manager.py
|
DownloadManager.manual_dir
|
def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
'dataset artifacts in there.'.format(self._manual_dir))
return self._manual_dir
|
python
|
def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
'dataset artifacts in there.'.format(self._manual_dir))
return self._manual_dir
|
[
"def",
"manual_dir",
"(",
"self",
")",
":",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"self",
".",
"_manual_dir",
")",
":",
"raise",
"AssertionError",
"(",
"'Manual directory {} does not exist. Create it and download/extract '",
"'dataset artifacts in there.'",
".",
"format",
"(",
"self",
".",
"_manual_dir",
")",
")",
"return",
"self",
".",
"_manual_dir"
] |
Returns the directory containing the manually extracted data.
|
[
"Returns",
"the",
"directory",
"containing",
"the",
"manually",
"extracted",
"data",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L362-L368
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/cifar10_corrupted.py
|
_make_builder_configs
|
def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 75 Cifar10CorruptedConfig objects, corresponding to
the 15 corruption types and 5 severities.
Returns:
A list of 75 Cifar10CorruptedConfig objects.
"""
config_list = []
for corruption in _CORRUPTIONS:
for severity in range(1, 6):
config_list.append(
Cifar10CorruptedConfig(
name=corruption + '_' + str(severity),
version='0.0.1',
description='Corruption method: ' + corruption +
', severity level: ' + str(severity),
corruption_type=corruption,
severity=severity,
))
return config_list
|
python
|
def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 75 Cifar10CorruptedConfig objects, corresponding to
the 15 corruption types and 5 severities.
Returns:
A list of 75 Cifar10CorruptedConfig objects.
"""
config_list = []
for corruption in _CORRUPTIONS:
for severity in range(1, 6):
config_list.append(
Cifar10CorruptedConfig(
name=corruption + '_' + str(severity),
version='0.0.1',
description='Corruption method: ' + corruption +
', severity level: ' + str(severity),
corruption_type=corruption,
severity=severity,
))
return config_list
|
[
"def",
"_make_builder_configs",
"(",
")",
":",
"config_list",
"=",
"[",
"]",
"for",
"corruption",
"in",
"_CORRUPTIONS",
":",
"for",
"severity",
"in",
"range",
"(",
"1",
",",
"6",
")",
":",
"config_list",
".",
"append",
"(",
"Cifar10CorruptedConfig",
"(",
"name",
"=",
"corruption",
"+",
"'_'",
"+",
"str",
"(",
"severity",
")",
",",
"version",
"=",
"'0.0.1'",
",",
"description",
"=",
"'Corruption method: '",
"+",
"corruption",
"+",
"', severity level: '",
"+",
"str",
"(",
"severity",
")",
",",
"corruption_type",
"=",
"corruption",
",",
"severity",
"=",
"severity",
",",
")",
")",
"return",
"config_list"
] |
Construct a list of BuilderConfigs.
Construct a list of 75 Cifar10CorruptedConfig objects, corresponding to
the 15 corruption types and 5 severities.
Returns:
A list of 75 Cifar10CorruptedConfig objects.
|
[
"Construct",
"a",
"list",
"of",
"BuilderConfigs",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L93-L114
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/cifar10_corrupted.py
|
Cifar10Corrupted._split_generators
|
def _split_generators(self, dl_manager):
"""Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
"""
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})
]
|
python
|
def _split_generators(self, dl_manager):
"""Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
"""
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"path",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"_DOWNLOAD_URL",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TEST",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"{",
"'data_dir'",
":",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"_DIRNAME",
")",
"}",
")",
"]"
] |
Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
|
[
"Return",
"the",
"test",
"split",
"of",
"Cifar10",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L138-L153
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/cifar10_corrupted.py
|
Cifar10Corrupted._generate_examples
|
def _generate_examples(self, data_dir):
"""Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
"""
corruption = self.builder_config.corruption
severity = self.builder_config.severity
images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption])
labels_file = os.path.join(data_dir, _LABELS_FILENAME)
with tf.io.gfile.GFile(labels_file, mode='rb') as f:
labels = np.load(f)
num_images = labels.shape[0] // 5
# Labels are stacked 5 times so we can just read the first iteration
labels = labels[:num_images]
with tf.io.gfile.GFile(images_file, mode='rb') as f:
images = np.load(f)
# Slice images corresponding to correct severity level
images = images[(severity - 1) * num_images:severity * num_images]
for image, label in zip(images, labels):
yield {
'image': image,
'label': label,
}
|
python
|
def _generate_examples(self, data_dir):
"""Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
"""
corruption = self.builder_config.corruption
severity = self.builder_config.severity
images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption])
labels_file = os.path.join(data_dir, _LABELS_FILENAME)
with tf.io.gfile.GFile(labels_file, mode='rb') as f:
labels = np.load(f)
num_images = labels.shape[0] // 5
# Labels are stacked 5 times so we can just read the first iteration
labels = labels[:num_images]
with tf.io.gfile.GFile(images_file, mode='rb') as f:
images = np.load(f)
# Slice images corresponding to correct severity level
images = images[(severity - 1) * num_images:severity * num_images]
for image, label in zip(images, labels):
yield {
'image': image,
'label': label,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"data_dir",
")",
":",
"corruption",
"=",
"self",
".",
"builder_config",
".",
"corruption",
"severity",
"=",
"self",
".",
"builder_config",
".",
"severity",
"images_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"_CORRUPTIONS_TO_FILENAMES",
"[",
"corruption",
"]",
")",
"labels_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"_LABELS_FILENAME",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_file",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"labels",
"=",
"np",
".",
"load",
"(",
"f",
")",
"num_images",
"=",
"labels",
".",
"shape",
"[",
"0",
"]",
"//",
"5",
"# Labels are stacked 5 times so we can just read the first iteration",
"labels",
"=",
"labels",
"[",
":",
"num_images",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"images_file",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"images",
"=",
"np",
".",
"load",
"(",
"f",
")",
"# Slice images corresponding to correct severity level",
"images",
"=",
"images",
"[",
"(",
"severity",
"-",
"1",
")",
"*",
"num_images",
":",
"severity",
"*",
"num_images",
"]",
"for",
"image",
",",
"label",
"in",
"zip",
"(",
"images",
",",
"labels",
")",
":",
"yield",
"{",
"'image'",
":",
"image",
",",
"'label'",
":",
"label",
",",
"}"
] |
Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
|
[
"Generate",
"corrupted",
"Cifar10",
"test",
"data",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L155-L189
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/document_datasets.py
|
document_single_builder
|
def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
mod_name = builder.__class__.__module__
cls_name = builder.__class__.__name__
mod_file = sys.modules[mod_name].__file__
if mod_file.endswith("pyc"):
mod_file = mod_file[:-1]
description_prefix = ""
if builder.builder_configs:
# Dataset with configs; document each one
config_docs = []
for config in builder.BUILDER_CONFIGS:
builder = tfds.builder(builder.name, config=config)
info = builder.info
# TODO(rsepassi): document the actual config object
config_doc = SINGLE_CONFIG_ENTRY.format(
builder_name=builder.name,
config_name=config.name,
description=config.description,
version=config.version,
feature_information=make_feature_information(info),
size=tfds.units.size_str(info.size_in_bytes),
)
config_docs.append(config_doc)
out_str = DATASET_WITH_CONFIGS_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
config_names="\n".join([
CONFIG_BULLET.format(name=config.name,
description=config.description,
version=config.version,
size=tfds.units.size_str(tfds.builder(
builder.name, config=config)
.info.size_in_bytes))
for config in builder.BUILDER_CONFIGS]),
config_cls="%s.%s" % (tfds_mod_name(mod_name),
type(builder.builder_config).__name__),
configs="\n".join(config_docs),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
statistics_information=make_statistics_information(info),
description=builder.info.description,
description_prefix=description_prefix,
)
else:
info = builder.info
out_str = DATASET_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
description=info.description,
description_prefix=description_prefix,
version=info.version,
feature_information=make_feature_information(info),
statistics_information=make_statistics_information(info),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
size=tfds.units.size_str(info.size_in_bytes),
)
out_str = schema_org(builder) + "\n" + out_str
return out_str
|
python
|
def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
mod_name = builder.__class__.__module__
cls_name = builder.__class__.__name__
mod_file = sys.modules[mod_name].__file__
if mod_file.endswith("pyc"):
mod_file = mod_file[:-1]
description_prefix = ""
if builder.builder_configs:
# Dataset with configs; document each one
config_docs = []
for config in builder.BUILDER_CONFIGS:
builder = tfds.builder(builder.name, config=config)
info = builder.info
# TODO(rsepassi): document the actual config object
config_doc = SINGLE_CONFIG_ENTRY.format(
builder_name=builder.name,
config_name=config.name,
description=config.description,
version=config.version,
feature_information=make_feature_information(info),
size=tfds.units.size_str(info.size_in_bytes),
)
config_docs.append(config_doc)
out_str = DATASET_WITH_CONFIGS_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
config_names="\n".join([
CONFIG_BULLET.format(name=config.name,
description=config.description,
version=config.version,
size=tfds.units.size_str(tfds.builder(
builder.name, config=config)
.info.size_in_bytes))
for config in builder.BUILDER_CONFIGS]),
config_cls="%s.%s" % (tfds_mod_name(mod_name),
type(builder.builder_config).__name__),
configs="\n".join(config_docs),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
statistics_information=make_statistics_information(info),
description=builder.info.description,
description_prefix=description_prefix,
)
else:
info = builder.info
out_str = DATASET_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
description=info.description,
description_prefix=description_prefix,
version=info.version,
feature_information=make_feature_information(info),
statistics_information=make_statistics_information(info),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
size=tfds.units.size_str(info.size_in_bytes),
)
out_str = schema_org(builder) + "\n" + out_str
return out_str
|
[
"def",
"document_single_builder",
"(",
"builder",
")",
":",
"mod_name",
"=",
"builder",
".",
"__class__",
".",
"__module__",
"cls_name",
"=",
"builder",
".",
"__class__",
".",
"__name__",
"mod_file",
"=",
"sys",
".",
"modules",
"[",
"mod_name",
"]",
".",
"__file__",
"if",
"mod_file",
".",
"endswith",
"(",
"\"pyc\"",
")",
":",
"mod_file",
"=",
"mod_file",
"[",
":",
"-",
"1",
"]",
"description_prefix",
"=",
"\"\"",
"if",
"builder",
".",
"builder_configs",
":",
"# Dataset with configs; document each one",
"config_docs",
"=",
"[",
"]",
"for",
"config",
"in",
"builder",
".",
"BUILDER_CONFIGS",
":",
"builder",
"=",
"tfds",
".",
"builder",
"(",
"builder",
".",
"name",
",",
"config",
"=",
"config",
")",
"info",
"=",
"builder",
".",
"info",
"# TODO(rsepassi): document the actual config object",
"config_doc",
"=",
"SINGLE_CONFIG_ENTRY",
".",
"format",
"(",
"builder_name",
"=",
"builder",
".",
"name",
",",
"config_name",
"=",
"config",
".",
"name",
",",
"description",
"=",
"config",
".",
"description",
",",
"version",
"=",
"config",
".",
"version",
",",
"feature_information",
"=",
"make_feature_information",
"(",
"info",
")",
",",
"size",
"=",
"tfds",
".",
"units",
".",
"size_str",
"(",
"info",
".",
"size_in_bytes",
")",
",",
")",
"config_docs",
".",
"append",
"(",
"config_doc",
")",
"out_str",
"=",
"DATASET_WITH_CONFIGS_ENTRY",
".",
"format",
"(",
"snakecase_name",
"=",
"builder",
".",
"name",
",",
"module_and_class",
"=",
"\"%s.%s\"",
"%",
"(",
"tfds_mod_name",
"(",
"mod_name",
")",
",",
"cls_name",
")",
",",
"cls_url",
"=",
"cls_url",
"(",
"mod_name",
")",
",",
"config_names",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"CONFIG_BULLET",
".",
"format",
"(",
"name",
"=",
"config",
".",
"name",
",",
"description",
"=",
"config",
".",
"description",
",",
"version",
"=",
"config",
".",
"version",
",",
"size",
"=",
"tfds",
".",
"units",
".",
"size_str",
"(",
"tfds",
".",
"builder",
"(",
"builder",
".",
"name",
",",
"config",
"=",
"config",
")",
".",
"info",
".",
"size_in_bytes",
")",
")",
"for",
"config",
"in",
"builder",
".",
"BUILDER_CONFIGS",
"]",
")",
",",
"config_cls",
"=",
"\"%s.%s\"",
"%",
"(",
"tfds_mod_name",
"(",
"mod_name",
")",
",",
"type",
"(",
"builder",
".",
"builder_config",
")",
".",
"__name__",
")",
",",
"configs",
"=",
"\"\\n\"",
".",
"join",
"(",
"config_docs",
")",
",",
"urls",
"=",
"format_urls",
"(",
"info",
".",
"urls",
")",
",",
"url",
"=",
"url_from_info",
"(",
"info",
")",
",",
"supervised_keys",
"=",
"str",
"(",
"info",
".",
"supervised_keys",
")",
",",
"citation",
"=",
"make_citation",
"(",
"info",
".",
"citation",
")",
",",
"statistics_information",
"=",
"make_statistics_information",
"(",
"info",
")",
",",
"description",
"=",
"builder",
".",
"info",
".",
"description",
",",
"description_prefix",
"=",
"description_prefix",
",",
")",
"else",
":",
"info",
"=",
"builder",
".",
"info",
"out_str",
"=",
"DATASET_ENTRY",
".",
"format",
"(",
"snakecase_name",
"=",
"builder",
".",
"name",
",",
"module_and_class",
"=",
"\"%s.%s\"",
"%",
"(",
"tfds_mod_name",
"(",
"mod_name",
")",
",",
"cls_name",
")",
",",
"cls_url",
"=",
"cls_url",
"(",
"mod_name",
")",
",",
"description",
"=",
"info",
".",
"description",
",",
"description_prefix",
"=",
"description_prefix",
",",
"version",
"=",
"info",
".",
"version",
",",
"feature_information",
"=",
"make_feature_information",
"(",
"info",
")",
",",
"statistics_information",
"=",
"make_statistics_information",
"(",
"info",
")",
",",
"urls",
"=",
"format_urls",
"(",
"info",
".",
"urls",
")",
",",
"url",
"=",
"url_from_info",
"(",
"info",
")",
",",
"supervised_keys",
"=",
"str",
"(",
"info",
".",
"supervised_keys",
")",
",",
"citation",
"=",
"make_citation",
"(",
"info",
".",
"citation",
")",
",",
"size",
"=",
"tfds",
".",
"units",
".",
"size_str",
"(",
"info",
".",
"size_in_bytes",
")",
",",
")",
"out_str",
"=",
"schema_org",
"(",
"builder",
")",
"+",
"\"\\n\"",
"+",
"out_str",
"return",
"out_str"
] |
Doc string for a single builder, with or without configs.
|
[
"Doc",
"string",
"for",
"a",
"single",
"builder",
"with",
"or",
"without",
"configs",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L196-L265
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/document_datasets.py
|
make_module_to_builder_dict
|
def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(list)))
# pylint: enable=g-long-lambda
if datasets:
builders = [tfds.builder(name) for name in datasets]
else:
builders = [
tfds.builder(name)
for name in tfds.list_builders()
if name not in BUILDER_BLACKLIST
] + [tfds.builder("image_label_folder", dataset_name="image_label_folder")]
for builder in builders:
mod_name = builder.__class__.__module__
modules = mod_name.split(".")
if "testing" in modules:
continue
current_mod_ctr = module_to_builder
for mod in modules:
current_mod_ctr = current_mod_ctr[mod]
current_mod_ctr.append(builder)
module_to_builder = module_to_builder["tensorflow_datasets"]
return module_to_builder
|
python
|
def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(list)))
# pylint: enable=g-long-lambda
if datasets:
builders = [tfds.builder(name) for name in datasets]
else:
builders = [
tfds.builder(name)
for name in tfds.list_builders()
if name not in BUILDER_BLACKLIST
] + [tfds.builder("image_label_folder", dataset_name="image_label_folder")]
for builder in builders:
mod_name = builder.__class__.__module__
modules = mod_name.split(".")
if "testing" in modules:
continue
current_mod_ctr = module_to_builder
for mod in modules:
current_mod_ctr = current_mod_ctr[mod]
current_mod_ctr.append(builder)
module_to_builder = module_to_builder["tensorflow_datasets"]
return module_to_builder
|
[
"def",
"make_module_to_builder_dict",
"(",
"datasets",
"=",
"None",
")",
":",
"# pylint: disable=g-long-lambda",
"# dict to hold tfds->image->mnist->[builders]",
"module_to_builder",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"collections",
".",
"defaultdict",
"(",
"list",
")",
")",
")",
"# pylint: enable=g-long-lambda",
"if",
"datasets",
":",
"builders",
"=",
"[",
"tfds",
".",
"builder",
"(",
"name",
")",
"for",
"name",
"in",
"datasets",
"]",
"else",
":",
"builders",
"=",
"[",
"tfds",
".",
"builder",
"(",
"name",
")",
"for",
"name",
"in",
"tfds",
".",
"list_builders",
"(",
")",
"if",
"name",
"not",
"in",
"BUILDER_BLACKLIST",
"]",
"+",
"[",
"tfds",
".",
"builder",
"(",
"\"image_label_folder\"",
",",
"dataset_name",
"=",
"\"image_label_folder\"",
")",
"]",
"for",
"builder",
"in",
"builders",
":",
"mod_name",
"=",
"builder",
".",
"__class__",
".",
"__module__",
"modules",
"=",
"mod_name",
".",
"split",
"(",
"\".\"",
")",
"if",
"\"testing\"",
"in",
"modules",
":",
"continue",
"current_mod_ctr",
"=",
"module_to_builder",
"for",
"mod",
"in",
"modules",
":",
"current_mod_ctr",
"=",
"current_mod_ctr",
"[",
"mod",
"]",
"current_mod_ctr",
".",
"append",
"(",
"builder",
")",
"module_to_builder",
"=",
"module_to_builder",
"[",
"\"tensorflow_datasets\"",
"]",
"return",
"module_to_builder"
] |
Get all builders organized by module in nested dicts.
|
[
"Get",
"all",
"builders",
"organized",
"by",
"module",
"in",
"nested",
"dicts",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L275-L305
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/document_datasets.py
|
_pprint_features_dict
|
def _pprint_features_dict(features_dict, indent=0, add_prefix=True):
"""Pretty-print tfds.features.FeaturesDict."""
first_last_indent_str = " " * indent
indent_str = " " * (indent + 4)
first_line = "%s%s({" % (
first_last_indent_str if add_prefix else "",
type(features_dict).__name__,
)
lines = [first_line]
for k in sorted(list(features_dict.keys())):
v = features_dict[k]
if isinstance(v, tfds.features.FeaturesDict):
v_str = _pprint_features_dict(v, indent + 4, False)
else:
v_str = str(v)
lines.append("%s'%s': %s," % (indent_str, k, v_str))
lines.append("%s})" % first_last_indent_str)
return "\n".join(lines)
|
python
|
def _pprint_features_dict(features_dict, indent=0, add_prefix=True):
"""Pretty-print tfds.features.FeaturesDict."""
first_last_indent_str = " " * indent
indent_str = " " * (indent + 4)
first_line = "%s%s({" % (
first_last_indent_str if add_prefix else "",
type(features_dict).__name__,
)
lines = [first_line]
for k in sorted(list(features_dict.keys())):
v = features_dict[k]
if isinstance(v, tfds.features.FeaturesDict):
v_str = _pprint_features_dict(v, indent + 4, False)
else:
v_str = str(v)
lines.append("%s'%s': %s," % (indent_str, k, v_str))
lines.append("%s})" % first_last_indent_str)
return "\n".join(lines)
|
[
"def",
"_pprint_features_dict",
"(",
"features_dict",
",",
"indent",
"=",
"0",
",",
"add_prefix",
"=",
"True",
")",
":",
"first_last_indent_str",
"=",
"\" \"",
"*",
"indent",
"indent_str",
"=",
"\" \"",
"*",
"(",
"indent",
"+",
"4",
")",
"first_line",
"=",
"\"%s%s({\"",
"%",
"(",
"first_last_indent_str",
"if",
"add_prefix",
"else",
"\"\"",
",",
"type",
"(",
"features_dict",
")",
".",
"__name__",
",",
")",
"lines",
"=",
"[",
"first_line",
"]",
"for",
"k",
"in",
"sorted",
"(",
"list",
"(",
"features_dict",
".",
"keys",
"(",
")",
")",
")",
":",
"v",
"=",
"features_dict",
"[",
"k",
"]",
"if",
"isinstance",
"(",
"v",
",",
"tfds",
".",
"features",
".",
"FeaturesDict",
")",
":",
"v_str",
"=",
"_pprint_features_dict",
"(",
"v",
",",
"indent",
"+",
"4",
",",
"False",
")",
"else",
":",
"v_str",
"=",
"str",
"(",
"v",
")",
"lines",
".",
"append",
"(",
"\"%s'%s': %s,\"",
"%",
"(",
"indent_str",
",",
"k",
",",
"v_str",
")",
")",
"lines",
".",
"append",
"(",
"\"%s})\"",
"%",
"first_last_indent_str",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"lines",
")"
] |
Pretty-print tfds.features.FeaturesDict.
|
[
"Pretty",
"-",
"print",
"tfds",
".",
"features",
".",
"FeaturesDict",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L308-L325
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/document_datasets.py
|
make_statistics_information
|
def make_statistics_information(info):
"""Make statistics information table."""
if not info.splits.total_num_examples:
# That means that we have yet to calculate the statistics for this.
return "None computed"
stats = [(info.splits.total_num_examples, "ALL")]
for split_name, split_info in info.splits.items():
stats.append((split_info.num_examples, split_name.upper()))
# Sort reverse on number of examples.
stats.sort(reverse=True)
stats = "\n".join([
"{0:10} | {1:>10,}".format(name, num_exs) for (num_exs, name) in stats
])
return STATISTICS_TABLE.format(split_statistics=stats)
|
python
|
def make_statistics_information(info):
"""Make statistics information table."""
if not info.splits.total_num_examples:
# That means that we have yet to calculate the statistics for this.
return "None computed"
stats = [(info.splits.total_num_examples, "ALL")]
for split_name, split_info in info.splits.items():
stats.append((split_info.num_examples, split_name.upper()))
# Sort reverse on number of examples.
stats.sort(reverse=True)
stats = "\n".join([
"{0:10} | {1:>10,}".format(name, num_exs) for (num_exs, name) in stats
])
return STATISTICS_TABLE.format(split_statistics=stats)
|
[
"def",
"make_statistics_information",
"(",
"info",
")",
":",
"if",
"not",
"info",
".",
"splits",
".",
"total_num_examples",
":",
"# That means that we have yet to calculate the statistics for this.",
"return",
"\"None computed\"",
"stats",
"=",
"[",
"(",
"info",
".",
"splits",
".",
"total_num_examples",
",",
"\"ALL\"",
")",
"]",
"for",
"split_name",
",",
"split_info",
"in",
"info",
".",
"splits",
".",
"items",
"(",
")",
":",
"stats",
".",
"append",
"(",
"(",
"split_info",
".",
"num_examples",
",",
"split_name",
".",
"upper",
"(",
")",
")",
")",
"# Sort reverse on number of examples.",
"stats",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"stats",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"{0:10} | {1:>10,}\"",
".",
"format",
"(",
"name",
",",
"num_exs",
")",
"for",
"(",
"num_exs",
",",
"name",
")",
"in",
"stats",
"]",
")",
"return",
"STATISTICS_TABLE",
".",
"format",
"(",
"split_statistics",
"=",
"stats",
")"
] |
Make statistics information table.
|
[
"Make",
"statistics",
"information",
"table",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L337-L351
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/document_datasets.py
|
dataset_docs_str
|
def dataset_docs_str(datasets=None):
"""Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
"""
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_tocs = []
section_docs = []
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
builder_docs = [document_single_builder(builder) for builder in builders]
section_doc = SECTION_DATASETS.format(
section_name=section, datasets="\n".join(builder_docs))
section_toc = create_section_toc(section, builders)
section_docs.append(section_doc)
section_tocs.append(section_toc)
full_doc = DOC.format(toc="\n".join(section_tocs),
datasets="\n".join(section_docs))
return full_doc
|
python
|
def dataset_docs_str(datasets=None):
"""Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
"""
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_tocs = []
section_docs = []
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
builder_docs = [document_single_builder(builder) for builder in builders]
section_doc = SECTION_DATASETS.format(
section_name=section, datasets="\n".join(builder_docs))
section_toc = create_section_toc(section, builders)
section_docs.append(section_doc)
section_tocs.append(section_toc)
full_doc = DOC.format(toc="\n".join(section_tocs),
datasets="\n".join(section_docs))
return full_doc
|
[
"def",
"dataset_docs_str",
"(",
"datasets",
"=",
"None",
")",
":",
"module_to_builder",
"=",
"make_module_to_builder_dict",
"(",
"datasets",
")",
"sections",
"=",
"sorted",
"(",
"list",
"(",
"module_to_builder",
".",
"keys",
"(",
")",
")",
")",
"section_tocs",
"=",
"[",
"]",
"section_docs",
"=",
"[",
"]",
"for",
"section",
"in",
"sections",
":",
"builders",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"module_to_builder",
"[",
"section",
"]",
")",
"builders",
"=",
"sorted",
"(",
"builders",
",",
"key",
"=",
"lambda",
"b",
":",
"b",
".",
"name",
")",
"builder_docs",
"=",
"[",
"document_single_builder",
"(",
"builder",
")",
"for",
"builder",
"in",
"builders",
"]",
"section_doc",
"=",
"SECTION_DATASETS",
".",
"format",
"(",
"section_name",
"=",
"section",
",",
"datasets",
"=",
"\"\\n\"",
".",
"join",
"(",
"builder_docs",
")",
")",
"section_toc",
"=",
"create_section_toc",
"(",
"section",
",",
"builders",
")",
"section_docs",
".",
"append",
"(",
"section_doc",
")",
"section_tocs",
".",
"append",
"(",
"section_toc",
")",
"full_doc",
"=",
"DOC",
".",
"format",
"(",
"toc",
"=",
"\"\\n\"",
".",
"join",
"(",
"section_tocs",
")",
",",
"datasets",
"=",
"\"\\n\"",
".",
"join",
"(",
"section_docs",
")",
")",
"return",
"full_doc"
] |
Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
|
[
"Create",
"dataset",
"documentation",
"string",
"for",
"given",
"datasets",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L354-L383
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/document_datasets.py
|
schema_org
|
def schema_org(builder):
# pylint: disable=line-too-long
"""Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata
"""
# pylint: enable=line-too-long
properties = [
(lambda x: x.name, SCHEMA_ORG_NAME),
(lambda x: x.description, SCHEMA_ORG_DESC),
(lambda x: x.name, SCHEMA_ORG_URL),
(lambda x: (x.urls and x.urls[0]) or "", SCHEMA_ORG_SAMEAS)
]
info = builder.info
out_str = SCHEMA_ORG_PRE
for extractor, template in properties:
val = extractor(info)
if val:
# We are using cgi module instead of html due to Python 2 compatibility
out_str += template.format(val=cgi.escape(val, quote=True).strip())
out_str += SCHEMA_ORG_POST
return out_str
|
python
|
def schema_org(builder):
# pylint: disable=line-too-long
"""Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata
"""
# pylint: enable=line-too-long
properties = [
(lambda x: x.name, SCHEMA_ORG_NAME),
(lambda x: x.description, SCHEMA_ORG_DESC),
(lambda x: x.name, SCHEMA_ORG_URL),
(lambda x: (x.urls and x.urls[0]) or "", SCHEMA_ORG_SAMEAS)
]
info = builder.info
out_str = SCHEMA_ORG_PRE
for extractor, template in properties:
val = extractor(info)
if val:
# We are using cgi module instead of html due to Python 2 compatibility
out_str += template.format(val=cgi.escape(val, quote=True).strip())
out_str += SCHEMA_ORG_POST
return out_str
|
[
"def",
"schema_org",
"(",
"builder",
")",
":",
"# pylint: disable=line-too-long",
"# pylint: enable=line-too-long",
"properties",
"=",
"[",
"(",
"lambda",
"x",
":",
"x",
".",
"name",
",",
"SCHEMA_ORG_NAME",
")",
",",
"(",
"lambda",
"x",
":",
"x",
".",
"description",
",",
"SCHEMA_ORG_DESC",
")",
",",
"(",
"lambda",
"x",
":",
"x",
".",
"name",
",",
"SCHEMA_ORG_URL",
")",
",",
"(",
"lambda",
"x",
":",
"(",
"x",
".",
"urls",
"and",
"x",
".",
"urls",
"[",
"0",
"]",
")",
"or",
"\"\"",
",",
"SCHEMA_ORG_SAMEAS",
")",
"]",
"info",
"=",
"builder",
".",
"info",
"out_str",
"=",
"SCHEMA_ORG_PRE",
"for",
"extractor",
",",
"template",
"in",
"properties",
":",
"val",
"=",
"extractor",
"(",
"info",
")",
"if",
"val",
":",
"# We are using cgi module instead of html due to Python 2 compatibility",
"out_str",
"+=",
"template",
".",
"format",
"(",
"val",
"=",
"cgi",
".",
"escape",
"(",
"val",
",",
"quote",
"=",
"True",
")",
".",
"strip",
"(",
")",
")",
"out_str",
"+=",
"SCHEMA_ORG_POST",
"return",
"out_str"
] |
Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata
|
[
"Builds",
"schema",
".",
"org",
"microdata",
"for",
"DatasetSearch",
"from",
"DatasetBuilder",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L414-L449
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
disk
|
def disk(radius, alias_blur=0.1, dtype=np.float32):
"""Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel.
"""
if radius <= 8:
length = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
length = np.arange(-radius, radius + 1)
ksize = (5, 5)
x_axis, y_axis = np.meshgrid(length, length)
aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return tfds.core.lazy_imports.cv2.GaussianBlur(
aliased_disk, ksize=ksize, sigmaX=alias_blur)
|
python
|
def disk(radius, alias_blur=0.1, dtype=np.float32):
"""Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel.
"""
if radius <= 8:
length = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
length = np.arange(-radius, radius + 1)
ksize = (5, 5)
x_axis, y_axis = np.meshgrid(length, length)
aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return tfds.core.lazy_imports.cv2.GaussianBlur(
aliased_disk, ksize=ksize, sigmaX=alias_blur)
|
[
"def",
"disk",
"(",
"radius",
",",
"alias_blur",
"=",
"0.1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"if",
"radius",
"<=",
"8",
":",
"length",
"=",
"np",
".",
"arange",
"(",
"-",
"8",
",",
"8",
"+",
"1",
")",
"ksize",
"=",
"(",
"3",
",",
"3",
")",
"else",
":",
"length",
"=",
"np",
".",
"arange",
"(",
"-",
"radius",
",",
"radius",
"+",
"1",
")",
"ksize",
"=",
"(",
"5",
",",
"5",
")",
"x_axis",
",",
"y_axis",
"=",
"np",
".",
"meshgrid",
"(",
"length",
",",
"length",
")",
"aliased_disk",
"=",
"np",
".",
"array",
"(",
"(",
"x_axis",
"**",
"2",
"+",
"y_axis",
"**",
"2",
")",
"<=",
"radius",
"**",
"2",
",",
"dtype",
"=",
"dtype",
")",
"aliased_disk",
"/=",
"np",
".",
"sum",
"(",
"aliased_disk",
")",
"# supersample disk to antialias",
"return",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"GaussianBlur",
"(",
"aliased_disk",
",",
"ksize",
"=",
"ksize",
",",
"sigmaX",
"=",
"alias_blur",
")"
] |
Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel.
|
[
"Generating",
"a",
"Gaussian",
"blurring",
"kernel",
"with",
"disk",
"shape",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L46-L70
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
clipped_zoom
|
def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(
img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),
order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]
|
python
|
def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(
img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),
order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]
|
[
"def",
"clipped_zoom",
"(",
"img",
",",
"zoom_factor",
")",
":",
"h",
"=",
"img",
".",
"shape",
"[",
"0",
"]",
"ch",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"h",
"/",
"float",
"(",
"zoom_factor",
")",
")",
")",
"top_h",
"=",
"(",
"h",
"-",
"ch",
")",
"//",
"2",
"w",
"=",
"img",
".",
"shape",
"[",
"1",
"]",
"cw",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"w",
"/",
"float",
"(",
"zoom_factor",
")",
")",
")",
"top_w",
"=",
"(",
"w",
"-",
"cw",
")",
"//",
"2",
"img",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"scipy",
".",
"ndimage",
".",
"zoom",
"(",
"img",
"[",
"top_h",
":",
"top_h",
"+",
"ch",
",",
"top_w",
":",
"top_w",
"+",
"cw",
"]",
",",
"(",
"zoom_factor",
",",
"zoom_factor",
",",
"1",
")",
",",
"order",
"=",
"1",
")",
"# trim off any extra pixels",
"trim_top_h",
"=",
"(",
"img",
".",
"shape",
"[",
"0",
"]",
"-",
"h",
")",
"//",
"2",
"trim_top_w",
"=",
"(",
"img",
".",
"shape",
"[",
"1",
"]",
"-",
"w",
")",
"//",
"2",
"return",
"img",
"[",
"trim_top_h",
":",
"trim_top_h",
"+",
"h",
",",
"trim_top_w",
":",
"trim_top_w",
"+",
"w",
"]"
] |
Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
|
[
"Zoom",
"image",
"with",
"clipping",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L73-L101
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
plasma_fractal
|
def plasma_fractal(mapsize=512, wibbledecay=3):
"""Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
"""
if mapsize & (mapsize - 1) != 0:
raise ValueError('mapsize must be a power of two.')
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square, calculate middle value as mean of points + wibble."""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond, calculate middle value as meanof points + wibble."""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize //
2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize //
2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
|
python
|
def plasma_fractal(mapsize=512, wibbledecay=3):
"""Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
"""
if mapsize & (mapsize - 1) != 0:
raise ValueError('mapsize must be a power of two.')
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square, calculate middle value as mean of points + wibble."""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond, calculate middle value as meanof points + wibble."""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize //
2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize //
2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
|
[
"def",
"plasma_fractal",
"(",
"mapsize",
"=",
"512",
",",
"wibbledecay",
"=",
"3",
")",
":",
"if",
"mapsize",
"&",
"(",
"mapsize",
"-",
"1",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'mapsize must be a power of two.'",
")",
"maparray",
"=",
"np",
".",
"empty",
"(",
"(",
"mapsize",
",",
"mapsize",
")",
",",
"dtype",
"=",
"np",
".",
"float_",
")",
"maparray",
"[",
"0",
",",
"0",
"]",
"=",
"0",
"stepsize",
"=",
"mapsize",
"wibble",
"=",
"100",
"def",
"wibbledmean",
"(",
"array",
")",
":",
"return",
"array",
"/",
"4",
"+",
"wibble",
"*",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"wibble",
",",
"wibble",
",",
"array",
".",
"shape",
")",
"def",
"fillsquares",
"(",
")",
":",
"\"\"\"For each square, calculate middle value as mean of points + wibble.\"\"\"",
"cornerref",
"=",
"maparray",
"[",
"0",
":",
"mapsize",
":",
"stepsize",
",",
"0",
":",
"mapsize",
":",
"stepsize",
"]",
"squareaccum",
"=",
"cornerref",
"+",
"np",
".",
"roll",
"(",
"cornerref",
",",
"shift",
"=",
"-",
"1",
",",
"axis",
"=",
"0",
")",
"squareaccum",
"+=",
"np",
".",
"roll",
"(",
"squareaccum",
",",
"shift",
"=",
"-",
"1",
",",
"axis",
"=",
"1",
")",
"maparray",
"[",
"stepsize",
"//",
"2",
":",
"mapsize",
":",
"stepsize",
",",
"stepsize",
"//",
"2",
":",
"mapsize",
":",
"stepsize",
"]",
"=",
"wibbledmean",
"(",
"squareaccum",
")",
"def",
"filldiamonds",
"(",
")",
":",
"\"\"\"For each diamond, calculate middle value as meanof points + wibble.\"\"\"",
"mapsize",
"=",
"maparray",
".",
"shape",
"[",
"0",
"]",
"drgrid",
"=",
"maparray",
"[",
"stepsize",
"//",
"2",
":",
"mapsize",
":",
"stepsize",
",",
"stepsize",
"//",
"2",
":",
"mapsize",
":",
"stepsize",
"]",
"ulgrid",
"=",
"maparray",
"[",
"0",
":",
"mapsize",
":",
"stepsize",
",",
"0",
":",
"mapsize",
":",
"stepsize",
"]",
"ldrsum",
"=",
"drgrid",
"+",
"np",
".",
"roll",
"(",
"drgrid",
",",
"1",
",",
"axis",
"=",
"0",
")",
"lulsum",
"=",
"ulgrid",
"+",
"np",
".",
"roll",
"(",
"ulgrid",
",",
"-",
"1",
",",
"axis",
"=",
"1",
")",
"ltsum",
"=",
"ldrsum",
"+",
"lulsum",
"maparray",
"[",
"0",
":",
"mapsize",
":",
"stepsize",
",",
"stepsize",
"//",
"2",
":",
"mapsize",
":",
"stepsize",
"]",
"=",
"wibbledmean",
"(",
"ltsum",
")",
"tdrsum",
"=",
"drgrid",
"+",
"np",
".",
"roll",
"(",
"drgrid",
",",
"1",
",",
"axis",
"=",
"1",
")",
"tulsum",
"=",
"ulgrid",
"+",
"np",
".",
"roll",
"(",
"ulgrid",
",",
"-",
"1",
",",
"axis",
"=",
"0",
")",
"ttsum",
"=",
"tdrsum",
"+",
"tulsum",
"maparray",
"[",
"stepsize",
"//",
"2",
":",
"mapsize",
":",
"stepsize",
",",
"0",
":",
"mapsize",
":",
"stepsize",
"]",
"=",
"wibbledmean",
"(",
"ttsum",
")",
"while",
"stepsize",
">=",
"2",
":",
"fillsquares",
"(",
")",
"filldiamonds",
"(",
")",
"stepsize",
"//=",
"2",
"wibble",
"/=",
"wibbledecay",
"maparray",
"-=",
"maparray",
".",
"min",
"(",
")",
"return",
"maparray",
"/",
"maparray",
".",
"max",
"(",
")"
] |
Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
|
[
"Generate",
"a",
"heightmap",
"using",
"diamond",
"-",
"square",
"algorithm",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L104-L159
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
gaussian_noise
|
def gaussian_noise(x, severity=1):
"""Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
"""
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def gaussian_noise(x, severity=1):
"""Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
"""
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"gaussian_noise",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
".08",
",",
".12",
",",
"0.18",
",",
"0.26",
",",
"0.38",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"x",
"+",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"x",
".",
"shape",
",",
"scale",
"=",
"c",
")",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
|
[
"Gaussian",
"noise",
"corruption",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L167-L180
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
shot_noise
|
def shot_noise(x, severity=1):
"""Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
"""
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def shot_noise(x, severity=1):
"""Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
"""
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"shot_noise",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"60",
",",
"25",
",",
"12",
",",
"5",
",",
"3",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"np",
".",
"random",
".",
"poisson",
"(",
"x",
"*",
"c",
")",
"/",
"float",
"(",
"c",
")",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
|
[
"Shot",
"noise",
"corruption",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L183-L196
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
impulse_noise
|
def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"impulse_noise",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
".03",
",",
".06",
",",
".09",
",",
"0.17",
",",
"0.27",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"skimage",
".",
"util",
".",
"random_noise",
"(",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
",",
"mode",
"=",
"'s&p'",
",",
"amount",
"=",
"c",
")",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"x",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
|
[
"Impulse",
"noise",
"corruption",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L199-L213
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
defocus_blur
|
def defocus_blur(x, severity=1):
"""Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
"""
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
x_clip = np.clip(channels, 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def defocus_blur(x, severity=1):
"""Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
"""
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
x_clip = np.clip(channels, 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"defocus_blur",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"(",
"3",
",",
"0.1",
")",
",",
"(",
"4",
",",
"0.5",
")",
",",
"(",
"6",
",",
"0.5",
")",
",",
"(",
"8",
",",
"0.5",
")",
",",
"(",
"10",
",",
"0.5",
")",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"kernel",
"=",
"disk",
"(",
"radius",
"=",
"c",
"[",
"0",
"]",
",",
"alias_blur",
"=",
"c",
"[",
"1",
"]",
")",
"channels",
"=",
"[",
"]",
"for",
"d",
"in",
"range",
"(",
"3",
")",
":",
"channels",
".",
"append",
"(",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"filter2D",
"(",
"x",
"[",
":",
",",
":",
",",
"d",
"]",
",",
"-",
"1",
",",
"kernel",
")",
")",
"channels",
"=",
"np",
".",
"array",
"(",
"channels",
")",
".",
"transpose",
"(",
"(",
"1",
",",
"2",
",",
"0",
")",
")",
"# 3x224x224 -> 224x224x3",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"channels",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
|
[
"Defocus",
"blurring",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L216-L236
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
frosted_glass_blur
|
def frosted_glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4,
2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip)
|
python
|
def frosted_glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4,
2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip)
|
[
"def",
"frosted_glass_blur",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"# sigma, max_delta, iterations",
"c",
"=",
"[",
"(",
"0.7",
",",
"1",
",",
"2",
")",
",",
"(",
"0.9",
",",
"2",
",",
"1",
")",
",",
"(",
"1",
",",
"2",
",",
"3",
")",
",",
"(",
"1.1",
",",
"3",
",",
"2",
")",
",",
"(",
"1.5",
",",
"4",
",",
"2",
")",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"uint8",
"(",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"skimage",
".",
"filters",
".",
"gaussian",
"(",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
",",
"sigma",
"=",
"c",
"[",
"0",
"]",
",",
"multichannel",
"=",
"True",
")",
"*",
"255",
")",
"# locally shuffle pixels",
"for",
"_",
"in",
"range",
"(",
"c",
"[",
"2",
"]",
")",
":",
"for",
"h",
"in",
"range",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
"-",
"c",
"[",
"1",
"]",
",",
"c",
"[",
"1",
"]",
",",
"-",
"1",
")",
":",
"for",
"w",
"in",
"range",
"(",
"x",
".",
"shape",
"[",
"1",
"]",
"-",
"c",
"[",
"1",
"]",
",",
"c",
"[",
"1",
"]",
",",
"-",
"1",
")",
":",
"dx",
",",
"dy",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"-",
"c",
"[",
"1",
"]",
",",
"c",
"[",
"1",
"]",
",",
"size",
"=",
"(",
"2",
",",
")",
")",
"h_prime",
",",
"w_prime",
"=",
"h",
"+",
"dy",
",",
"w",
"+",
"dx",
"# swap",
"x",
"[",
"h",
",",
"w",
"]",
",",
"x",
"[",
"h_prime",
",",
"w_prime",
"]",
"=",
"x",
"[",
"h_prime",
",",
"w_prime",
"]",
",",
"x",
"[",
"h",
",",
"w",
"]",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"skimage",
".",
"filters",
".",
"gaussian",
"(",
"x",
"/",
"255.",
",",
"sigma",
"=",
"c",
"[",
"0",
"]",
",",
"multichannel",
"=",
"True",
")",
",",
"0",
",",
"1",
")",
"x_clip",
"*=",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
|
[
"Frosted",
"glass",
"blurring",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L239-L270
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
zoom_blur
|
def zoom_blur(x, severity=1):
"""Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
"""
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def zoom_blur(x, severity=1):
"""Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
"""
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"zoom_blur",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"np",
".",
"arange",
"(",
"1",
",",
"1.11",
",",
"0.01",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.16",
",",
"0.01",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.21",
",",
"0.02",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.26",
",",
"0.02",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.31",
",",
"0.03",
")",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"(",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"for",
"zoom_factor",
"in",
"c",
":",
"out",
"+=",
"clipped_zoom",
"(",
"x",
",",
"zoom_factor",
")",
"x",
"=",
"(",
"x",
"+",
"out",
")",
"/",
"(",
"len",
"(",
"c",
")",
"+",
"1",
")",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"x",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
|
[
"Zoom",
"blurring",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L273-L298
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
fog
|
def fog(x, severity=1):
"""Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog.
"""
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
mapsize = 512
shape = x.shape
max_length = max(shape[0], shape[1])
if max_length > mapsize:
mapsize = 2**int(np.ceil(np.log2(float(max_length))))
tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1])
tmp = tmp[:x.shape[0], :x.shape[1]]
tmp = tmp[..., np.newaxis]
x += c[0] * tmp
x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def fog(x, severity=1):
"""Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog.
"""
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
mapsize = 512
shape = x.shape
max_length = max(shape[0], shape[1])
if max_length > mapsize:
mapsize = 2**int(np.ceil(np.log2(float(max_length))))
tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1])
tmp = tmp[:x.shape[0], :x.shape[1]]
tmp = tmp[..., np.newaxis]
x += c[0] * tmp
x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"fog",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"(",
"1.5",
",",
"2",
")",
",",
"(",
"2.",
",",
"2",
")",
",",
"(",
"2.5",
",",
"1.7",
")",
",",
"(",
"2.5",
",",
"1.5",
")",
",",
"(",
"3.",
",",
"1.4",
")",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"max_val",
"=",
"x",
".",
"max",
"(",
")",
"mapsize",
"=",
"512",
"shape",
"=",
"x",
".",
"shape",
"max_length",
"=",
"max",
"(",
"shape",
"[",
"0",
"]",
",",
"shape",
"[",
"1",
"]",
")",
"if",
"max_length",
">",
"mapsize",
":",
"mapsize",
"=",
"2",
"**",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"float",
"(",
"max_length",
")",
")",
")",
")",
"tmp",
"=",
"plasma_fractal",
"(",
"mapsize",
"=",
"mapsize",
",",
"wibbledecay",
"=",
"c",
"[",
"1",
"]",
")",
"tmp",
"=",
"tmp",
"[",
":",
"x",
".",
"shape",
"[",
"0",
"]",
",",
":",
"x",
".",
"shape",
"[",
"1",
"]",
"]",
"tmp",
"=",
"tmp",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"x",
"+=",
"c",
"[",
"0",
"]",
"*",
"tmp",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"x",
"*",
"max_val",
"/",
"(",
"max_val",
"+",
"c",
"[",
"0",
"]",
")",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog.
|
[
"Fog",
"corruption",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L301-L326
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
brightness
|
def brightness(x, severity=1):
"""Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness.
"""
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def brightness(x, severity=1):
"""Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness.
"""
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"brightness",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
".1",
",",
".2",
",",
".3",
",",
".4",
",",
".5",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"x",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"skimage",
".",
"color",
".",
"rgb2hsv",
"(",
"x",
")",
"x",
"[",
":",
",",
":",
",",
"2",
"]",
"=",
"np",
".",
"clip",
"(",
"x",
"[",
":",
",",
":",
",",
"2",
"]",
"+",
"c",
",",
"0",
",",
"1",
")",
"x",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"skimage",
".",
"color",
".",
"hsv2rgb",
"(",
"x",
")",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"x",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness.
|
[
"Change",
"brightness",
"of",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L329-L346
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
contrast
|
def contrast(x, severity=1):
"""Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
"""
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def contrast(x, severity=1):
"""Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
"""
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"contrast",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"0.4",
",",
".3",
",",
".2",
",",
".1",
",",
".05",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"means",
"=",
"np",
".",
"mean",
"(",
"x",
",",
"axis",
"=",
"(",
"0",
",",
"1",
")",
",",
"keepdims",
"=",
"True",
")",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"(",
"x",
"-",
"means",
")",
"*",
"c",
"+",
"means",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
|
[
"Change",
"contrast",
"of",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L349-L364
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
elastic
|
def elastic(x, severity=1):
"""Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.
"""
c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02), (244 * 0.07, 244 * 0.01,
244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(x, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size
])
pts2 = pts1 + np.random.uniform(
-c[2], c[2], size=pts1.shape).astype(np.float32)
affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2)
image = tfds.core.lazy_imports.cv2.warpAffine(
image,
affine_trans,
shape_size[::-1],
borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101)
dx = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dy = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx,
(-1, 1)), np.reshape(
z, (-1, 1))
x_clip = np.clip(
tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates(
image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
return around_and_astype(x_clip)
|
python
|
def elastic(x, severity=1):
"""Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.
"""
c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02), (244 * 0.07, 244 * 0.01,
244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(x, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size
])
pts2 = pts1 + np.random.uniform(
-c[2], c[2], size=pts1.shape).astype(np.float32)
affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2)
image = tfds.core.lazy_imports.cv2.warpAffine(
image,
affine_trans,
shape_size[::-1],
borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101)
dx = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dy = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx,
(-1, 1)), np.reshape(
z, (-1, 1))
x_clip = np.clip(
tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates(
image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
return around_and_astype(x_clip)
|
[
"def",
"elastic",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"(",
"244",
"*",
"2",
",",
"244",
"*",
"0.7",
",",
"244",
"*",
"0.1",
")",
",",
"(",
"244",
"*",
"2",
",",
"244",
"*",
"0.08",
",",
"244",
"*",
"0.2",
")",
",",
"(",
"244",
"*",
"0.05",
",",
"244",
"*",
"0.01",
",",
"244",
"*",
"0.02",
")",
",",
"(",
"244",
"*",
"0.07",
",",
"244",
"*",
"0.01",
",",
"244",
"*",
"0.02",
")",
",",
"(",
"244",
"*",
"0.12",
",",
"244",
"*",
"0.01",
",",
"244",
"*",
"0.02",
")",
"]",
"[",
"severity",
"-",
"1",
"]",
"image",
"=",
"np",
".",
"array",
"(",
"x",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"/",
"255.",
"shape",
"=",
"image",
".",
"shape",
"shape_size",
"=",
"shape",
"[",
":",
"2",
"]",
"# random affine",
"center_square",
"=",
"np",
".",
"float32",
"(",
"shape_size",
")",
"//",
"2",
"square_size",
"=",
"min",
"(",
"shape_size",
")",
"//",
"3",
"pts1",
"=",
"np",
".",
"float32",
"(",
"[",
"center_square",
"+",
"square_size",
",",
"[",
"center_square",
"[",
"0",
"]",
"+",
"square_size",
",",
"center_square",
"[",
"1",
"]",
"-",
"square_size",
"]",
",",
"center_square",
"-",
"square_size",
"]",
")",
"pts2",
"=",
"pts1",
"+",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"c",
"[",
"2",
"]",
",",
"c",
"[",
"2",
"]",
",",
"size",
"=",
"pts1",
".",
"shape",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"affine_trans",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"getAffineTransform",
"(",
"pts1",
",",
"pts2",
")",
"image",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"warpAffine",
"(",
"image",
",",
"affine_trans",
",",
"shape_size",
"[",
":",
":",
"-",
"1",
"]",
",",
"borderMode",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"BORDER_REFLECT_101",
")",
"dx",
"=",
"(",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"skimage",
".",
"filters",
".",
"gaussian",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"1",
",",
"1",
",",
"size",
"=",
"shape",
"[",
":",
"2",
"]",
")",
",",
"c",
"[",
"1",
"]",
",",
"mode",
"=",
"'reflect'",
",",
"truncate",
"=",
"3",
")",
"*",
"c",
"[",
"0",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"dy",
"=",
"(",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"skimage",
".",
"filters",
".",
"gaussian",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"1",
",",
"1",
",",
"size",
"=",
"shape",
"[",
":",
"2",
"]",
")",
",",
"c",
"[",
"1",
"]",
",",
"mode",
"=",
"'reflect'",
",",
"truncate",
"=",
"3",
")",
"*",
"c",
"[",
"0",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"dx",
",",
"dy",
"=",
"dx",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
",",
"dy",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"x",
",",
"y",
",",
"z",
"=",
"np",
".",
"meshgrid",
"(",
"np",
".",
"arange",
"(",
"shape",
"[",
"1",
"]",
")",
",",
"np",
".",
"arange",
"(",
"shape",
"[",
"0",
"]",
")",
",",
"np",
".",
"arange",
"(",
"shape",
"[",
"2",
"]",
")",
")",
"indices",
"=",
"np",
".",
"reshape",
"(",
"y",
"+",
"dy",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
",",
"np",
".",
"reshape",
"(",
"x",
"+",
"dx",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
",",
"np",
".",
"reshape",
"(",
"z",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"scipy",
".",
"ndimage",
".",
"interpolation",
".",
"map_coordinates",
"(",
"image",
",",
"indices",
",",
"order",
"=",
"1",
",",
"mode",
"=",
"'reflect'",
")",
".",
"reshape",
"(",
"shape",
")",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
] |
Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.
|
[
"Conduct",
"elastic",
"transform",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L367-L425
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
pixelate
|
def pixelate(x, severity=1):
"""Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption.
"""
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
shape = x.shape
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
x = x.resize((int(shape[1] * c), int(shape[0] * c)))
x = x.resize((shape[1], shape[0]))
return np.asarray(x)
|
python
|
def pixelate(x, severity=1):
"""Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption.
"""
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
shape = x.shape
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
x = x.resize((int(shape[1] * c), int(shape[0] * c)))
x = x.resize((shape[1], shape[0]))
return np.asarray(x)
|
[
"def",
"pixelate",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"0.6",
",",
"0.5",
",",
"0.4",
",",
"0.3",
",",
"0.25",
"]",
"[",
"severity",
"-",
"1",
"]",
"shape",
"=",
"x",
".",
"shape",
"x",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"PIL_Image",
".",
"fromarray",
"(",
"x",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
")",
"x",
"=",
"x",
".",
"resize",
"(",
"(",
"int",
"(",
"shape",
"[",
"1",
"]",
"*",
"c",
")",
",",
"int",
"(",
"shape",
"[",
"0",
"]",
"*",
"c",
")",
")",
")",
"x",
"=",
"x",
".",
"resize",
"(",
"(",
"shape",
"[",
"1",
"]",
",",
"shape",
"[",
"0",
"]",
")",
")",
"return",
"np",
".",
"asarray",
"(",
"x",
")"
] |
Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption.
|
[
"Pixelate",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L428-L447
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/corruptions.py
|
jpeg_compression
|
def jpeg_compression(x, severity=1):
"""Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.
"""
c = [25, 18, 15, 10, 7][severity - 1]
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
output = io.BytesIO()
x.save(output, 'JPEG', quality=c)
output.seek(0)
x = tfds.core.lazy_imports.PIL_Image.open(output)
return np.asarray(x)
|
python
|
def jpeg_compression(x, severity=1):
"""Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.
"""
c = [25, 18, 15, 10, 7][severity - 1]
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
output = io.BytesIO()
x.save(output, 'JPEG', quality=c)
output.seek(0)
x = tfds.core.lazy_imports.PIL_Image.open(output)
return np.asarray(x)
|
[
"def",
"jpeg_compression",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"25",
",",
"18",
",",
"15",
",",
"10",
",",
"7",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"PIL_Image",
".",
"fromarray",
"(",
"x",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
")",
"output",
"=",
"io",
".",
"BytesIO",
"(",
")",
"x",
".",
"save",
"(",
"output",
",",
"'JPEG'",
",",
"quality",
"=",
"c",
")",
"output",
".",
"seek",
"(",
"0",
")",
"x",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"PIL_Image",
".",
"open",
"(",
"output",
")",
"return",
"np",
".",
"asarray",
"(",
"x",
")"
] |
Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.
|
[
"Conduct",
"jpeg",
"compression",
"to",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L450-L466
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
temporary_assignment
|
def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None)
setattr(obj, attr, value)
yield
setattr(obj, attr, original)
|
python
|
def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None)
setattr(obj, attr, value)
yield
setattr(obj, attr, original)
|
[
"def",
"temporary_assignment",
"(",
"obj",
",",
"attr",
",",
"value",
")",
":",
"original",
"=",
"getattr",
"(",
"obj",
",",
"attr",
",",
"None",
")",
"setattr",
"(",
"obj",
",",
"attr",
",",
"value",
")",
"yield",
"setattr",
"(",
"obj",
",",
"attr",
",",
"original",
")"
] |
Temporarily assign obj.attr to value.
|
[
"Temporarily",
"assign",
"obj",
".",
"attr",
"to",
"value",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L55-L60
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
zip_dict
|
def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts)
|
python
|
def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts)
|
[
"def",
"zip_dict",
"(",
"*",
"dicts",
")",
":",
"for",
"key",
"in",
"set",
"(",
"itertools",
".",
"chain",
"(",
"*",
"dicts",
")",
")",
":",
"# set merge all keys",
"# Will raise KeyError if the dict don't have the same keys",
"yield",
"key",
",",
"tuple",
"(",
"d",
"[",
"key",
"]",
"for",
"d",
"in",
"dicts",
")"
] |
Iterate over items of dictionaries grouped by their keys.
|
[
"Iterate",
"over",
"items",
"of",
"dictionaries",
"grouped",
"by",
"their",
"keys",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L63-L67
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
map_nested
|
def map_nested(function, data_struct, dict_only=False, map_tuple=False):
"""Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, dict):
return {
k: map_nested(function, v, dict_only, map_tuple)
for k, v in data_struct.items()
}
elif not dict_only:
types = [list]
if map_tuple:
types.append(tuple)
if isinstance(data_struct, tuple(types)):
mapped = [map_nested(function, v, dict_only, map_tuple)
for v in data_struct]
if isinstance(data_struct, list):
return mapped
else:
return tuple(mapped)
# Singleton
return function(data_struct)
|
python
|
def map_nested(function, data_struct, dict_only=False, map_tuple=False):
"""Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, dict):
return {
k: map_nested(function, v, dict_only, map_tuple)
for k, v in data_struct.items()
}
elif not dict_only:
types = [list]
if map_tuple:
types.append(tuple)
if isinstance(data_struct, tuple(types)):
mapped = [map_nested(function, v, dict_only, map_tuple)
for v in data_struct]
if isinstance(data_struct, list):
return mapped
else:
return tuple(mapped)
# Singleton
return function(data_struct)
|
[
"def",
"map_nested",
"(",
"function",
",",
"data_struct",
",",
"dict_only",
"=",
"False",
",",
"map_tuple",
"=",
"False",
")",
":",
"# Could add support for more exotic data_struct, like OrderedDict",
"if",
"isinstance",
"(",
"data_struct",
",",
"dict",
")",
":",
"return",
"{",
"k",
":",
"map_nested",
"(",
"function",
",",
"v",
",",
"dict_only",
",",
"map_tuple",
")",
"for",
"k",
",",
"v",
"in",
"data_struct",
".",
"items",
"(",
")",
"}",
"elif",
"not",
"dict_only",
":",
"types",
"=",
"[",
"list",
"]",
"if",
"map_tuple",
":",
"types",
".",
"append",
"(",
"tuple",
")",
"if",
"isinstance",
"(",
"data_struct",
",",
"tuple",
"(",
"types",
")",
")",
":",
"mapped",
"=",
"[",
"map_nested",
"(",
"function",
",",
"v",
",",
"dict_only",
",",
"map_tuple",
")",
"for",
"v",
"in",
"data_struct",
"]",
"if",
"isinstance",
"(",
"data_struct",
",",
"list",
")",
":",
"return",
"mapped",
"else",
":",
"return",
"tuple",
"(",
"mapped",
")",
"# Singleton",
"return",
"function",
"(",
"data_struct",
")"
] |
Apply a function recursively to each element of a nested data struct.
|
[
"Apply",
"a",
"function",
"recursively",
"to",
"each",
"element",
"of",
"a",
"nested",
"data",
"struct",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L122-L143
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
zip_nested
|
def zip_nested(arg0, *args, **kwargs):
"""Zip data struct together and return a data struct with the same shape."""
# Python 2 do not support kwargs only arguments
dict_only = kwargs.pop("dict_only", False)
assert not kwargs
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(arg0, dict):
return {
k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args)
}
elif not dict_only:
if isinstance(arg0, list):
return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)]
# Singleton
return (arg0,) + args
|
python
|
def zip_nested(arg0, *args, **kwargs):
"""Zip data struct together and return a data struct with the same shape."""
# Python 2 do not support kwargs only arguments
dict_only = kwargs.pop("dict_only", False)
assert not kwargs
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(arg0, dict):
return {
k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args)
}
elif not dict_only:
if isinstance(arg0, list):
return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)]
# Singleton
return (arg0,) + args
|
[
"def",
"zip_nested",
"(",
"arg0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Python 2 do not support kwargs only arguments",
"dict_only",
"=",
"kwargs",
".",
"pop",
"(",
"\"dict_only\"",
",",
"False",
")",
"assert",
"not",
"kwargs",
"# Could add support for more exotic data_struct, like OrderedDict",
"if",
"isinstance",
"(",
"arg0",
",",
"dict",
")",
":",
"return",
"{",
"k",
":",
"zip_nested",
"(",
"*",
"a",
",",
"dict_only",
"=",
"dict_only",
")",
"for",
"k",
",",
"a",
"in",
"zip_dict",
"(",
"arg0",
",",
"*",
"args",
")",
"}",
"elif",
"not",
"dict_only",
":",
"if",
"isinstance",
"(",
"arg0",
",",
"list",
")",
":",
"return",
"[",
"zip_nested",
"(",
"*",
"a",
",",
"dict_only",
"=",
"dict_only",
")",
"for",
"a",
"in",
"zip",
"(",
"arg0",
",",
"*",
"args",
")",
"]",
"# Singleton",
"return",
"(",
"arg0",
",",
")",
"+",
"args"
] |
Zip data struct together and return a data struct with the same shape.
|
[
"Zip",
"data",
"struct",
"together",
"and",
"return",
"a",
"data",
"struct",
"with",
"the",
"same",
"shape",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L146-L161
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
as_proto_cls
|
def as_proto_cls(proto_cls):
"""Simulate proto inheritance.
By default, protobuf do not support direct inheritance, so this decorator
simulates inheritance to the class to which it is applied.
Example:
```
@as_proto_class(proto.MyProto)
class A(object):
def custom_method(self):
return self.proto_field * 10
p = proto.MyProto(proto_field=123)
a = A()
a.CopyFrom(p) # a is like a proto object
assert a.proto_field == 123
a.custom_method() # But has additional methods
```
Args:
proto_cls: The protobuf class to inherit from
Returns:
decorated_cls: The decorated class
"""
def decorator(cls):
"""Decorator applied to the class."""
class ProtoCls(object):
"""Base class simulating the protobuf."""
def __init__(self, *args, **kwargs):
super(ProtoCls, self).__setattr__(
"_ProtoCls__proto",
proto_cls(*args, **kwargs),
)
def __getattr__(self, attr_name):
return getattr(self.__proto, attr_name)
def __setattr__(self, attr_name, new_value):
try:
return setattr(self.__proto, attr_name, new_value)
except AttributeError:
return super(ProtoCls, self).__setattr__(attr_name, new_value)
def __eq__(self, other):
return self.__proto, other.get_proto()
def get_proto(self):
return self.__proto
def __repr__(self):
return "<{cls_name}\n{proto_repr}\n>".format(
cls_name=cls.__name__, proto_repr=repr(self.__proto))
decorator_cls = type(cls.__name__, (cls, ProtoCls), {
"__doc__": cls.__doc__,
})
return decorator_cls
return decorator
|
python
|
def as_proto_cls(proto_cls):
"""Simulate proto inheritance.
By default, protobuf do not support direct inheritance, so this decorator
simulates inheritance to the class to which it is applied.
Example:
```
@as_proto_class(proto.MyProto)
class A(object):
def custom_method(self):
return self.proto_field * 10
p = proto.MyProto(proto_field=123)
a = A()
a.CopyFrom(p) # a is like a proto object
assert a.proto_field == 123
a.custom_method() # But has additional methods
```
Args:
proto_cls: The protobuf class to inherit from
Returns:
decorated_cls: The decorated class
"""
def decorator(cls):
"""Decorator applied to the class."""
class ProtoCls(object):
"""Base class simulating the protobuf."""
def __init__(self, *args, **kwargs):
super(ProtoCls, self).__setattr__(
"_ProtoCls__proto",
proto_cls(*args, **kwargs),
)
def __getattr__(self, attr_name):
return getattr(self.__proto, attr_name)
def __setattr__(self, attr_name, new_value):
try:
return setattr(self.__proto, attr_name, new_value)
except AttributeError:
return super(ProtoCls, self).__setattr__(attr_name, new_value)
def __eq__(self, other):
return self.__proto, other.get_proto()
def get_proto(self):
return self.__proto
def __repr__(self):
return "<{cls_name}\n{proto_repr}\n>".format(
cls_name=cls.__name__, proto_repr=repr(self.__proto))
decorator_cls = type(cls.__name__, (cls, ProtoCls), {
"__doc__": cls.__doc__,
})
return decorator_cls
return decorator
|
[
"def",
"as_proto_cls",
"(",
"proto_cls",
")",
":",
"def",
"decorator",
"(",
"cls",
")",
":",
"\"\"\"Decorator applied to the class.\"\"\"",
"class",
"ProtoCls",
"(",
"object",
")",
":",
"\"\"\"Base class simulating the protobuf.\"\"\"",
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"ProtoCls",
",",
"self",
")",
".",
"__setattr__",
"(",
"\"_ProtoCls__proto\"",
",",
"proto_cls",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
")",
"def",
"__getattr__",
"(",
"self",
",",
"attr_name",
")",
":",
"return",
"getattr",
"(",
"self",
".",
"__proto",
",",
"attr_name",
")",
"def",
"__setattr__",
"(",
"self",
",",
"attr_name",
",",
"new_value",
")",
":",
"try",
":",
"return",
"setattr",
"(",
"self",
".",
"__proto",
",",
"attr_name",
",",
"new_value",
")",
"except",
"AttributeError",
":",
"return",
"super",
"(",
"ProtoCls",
",",
"self",
")",
".",
"__setattr__",
"(",
"attr_name",
",",
"new_value",
")",
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"__proto",
",",
"other",
".",
"get_proto",
"(",
")",
"def",
"get_proto",
"(",
"self",
")",
":",
"return",
"self",
".",
"__proto",
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"\"<{cls_name}\\n{proto_repr}\\n>\"",
".",
"format",
"(",
"cls_name",
"=",
"cls",
".",
"__name__",
",",
"proto_repr",
"=",
"repr",
"(",
"self",
".",
"__proto",
")",
")",
"decorator_cls",
"=",
"type",
"(",
"cls",
".",
"__name__",
",",
"(",
"cls",
",",
"ProtoCls",
")",
",",
"{",
"\"__doc__\"",
":",
"cls",
".",
"__doc__",
",",
"}",
")",
"return",
"decorator_cls",
"return",
"decorator"
] |
Simulate proto inheritance.
By default, protobuf do not support direct inheritance, so this decorator
simulates inheritance to the class to which it is applied.
Example:
```
@as_proto_class(proto.MyProto)
class A(object):
def custom_method(self):
return self.proto_field * 10
p = proto.MyProto(proto_field=123)
a = A()
a.CopyFrom(p) # a is like a proto object
assert a.proto_field == 123
a.custom_method() # But has additional methods
```
Args:
proto_cls: The protobuf class to inherit from
Returns:
decorated_cls: The decorated class
|
[
"Simulate",
"proto",
"inheritance",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L164-L229
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
tfds_dir
|
def tfds_dir():
"""Path to tensorflow_datasets directory."""
return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
python
|
def tfds_dir():
"""Path to tensorflow_datasets directory."""
return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
[
"def",
"tfds_dir",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
")"
] |
Path to tensorflow_datasets directory.
|
[
"Path",
"to",
"tensorflow_datasets",
"directory",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L232-L234
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
atomic_write
|
def atomic_write(path, mode):
"""Writes to path atomically, by writing to temp file and renaming it."""
tmp_path = "%s%s_%s" % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex)
with tf.io.gfile.GFile(tmp_path, mode) as file_:
yield file_
tf.io.gfile.rename(tmp_path, path, overwrite=True)
|
python
|
def atomic_write(path, mode):
"""Writes to path atomically, by writing to temp file and renaming it."""
tmp_path = "%s%s_%s" % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex)
with tf.io.gfile.GFile(tmp_path, mode) as file_:
yield file_
tf.io.gfile.rename(tmp_path, path, overwrite=True)
|
[
"def",
"atomic_write",
"(",
"path",
",",
"mode",
")",
":",
"tmp_path",
"=",
"\"%s%s_%s\"",
"%",
"(",
"path",
",",
"constants",
".",
"INCOMPLETE_SUFFIX",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"tmp_path",
",",
"mode",
")",
"as",
"file_",
":",
"yield",
"file_",
"tf",
".",
"io",
".",
"gfile",
".",
"rename",
"(",
"tmp_path",
",",
"path",
",",
"overwrite",
"=",
"True",
")"
] |
Writes to path atomically, by writing to temp file and renaming it.
|
[
"Writes",
"to",
"path",
"atomically",
"by",
"writing",
"to",
"temp",
"file",
"and",
"renaming",
"it",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L238-L243
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
read_checksum_digest
|
def read_checksum_digest(path, checksum_cls=hashlib.sha256):
"""Given a hash constructor, returns checksum digest and size of file."""
checksum = checksum_cls()
size = 0
with tf.io.gfile.GFile(path, "rb") as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
size += len(block)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), size
|
python
|
def read_checksum_digest(path, checksum_cls=hashlib.sha256):
"""Given a hash constructor, returns checksum digest and size of file."""
checksum = checksum_cls()
size = 0
with tf.io.gfile.GFile(path, "rb") as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
size += len(block)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), size
|
[
"def",
"read_checksum_digest",
"(",
"path",
",",
"checksum_cls",
"=",
"hashlib",
".",
"sha256",
")",
":",
"checksum",
"=",
"checksum_cls",
"(",
")",
"size",
"=",
"0",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"while",
"True",
":",
"block",
"=",
"f",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"size",
"+=",
"len",
"(",
"block",
")",
"if",
"not",
"block",
":",
"break",
"checksum",
".",
"update",
"(",
"block",
")",
"return",
"checksum",
".",
"hexdigest",
"(",
")",
",",
"size"
] |
Given a hash constructor, returns checksum digest and size of file.
|
[
"Given",
"a",
"hash",
"constructor",
"returns",
"checksum",
"digest",
"and",
"size",
"of",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L262-L273
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
reraise
|
def reraise(additional_msg):
"""Reraise an exception with an additional message."""
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = str(exc_value) + "\n" + additional_msg
six.reraise(exc_type, exc_type(msg), exc_traceback)
|
python
|
def reraise(additional_msg):
"""Reraise an exception with an additional message."""
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = str(exc_value) + "\n" + additional_msg
six.reraise(exc_type, exc_type(msg), exc_traceback)
|
[
"def",
"reraise",
"(",
"additional_msg",
")",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"msg",
"=",
"str",
"(",
"exc_value",
")",
"+",
"\"\\n\"",
"+",
"additional_msg",
"six",
".",
"reraise",
"(",
"exc_type",
",",
"exc_type",
"(",
"msg",
")",
",",
"exc_traceback",
")"
] |
Reraise an exception with an additional message.
|
[
"Reraise",
"an",
"exception",
"with",
"an",
"additional",
"message",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L276-L280
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/py_utils.py
|
rgetattr
|
def rgetattr(obj, attr, *args):
"""Get attr that handles dots in attr name."""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split("."))
|
python
|
def rgetattr(obj, attr, *args):
"""Get attr that handles dots in attr name."""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split("."))
|
[
"def",
"rgetattr",
"(",
"obj",
",",
"attr",
",",
"*",
"args",
")",
":",
"def",
"_getattr",
"(",
"obj",
",",
"attr",
")",
":",
"return",
"getattr",
"(",
"obj",
",",
"attr",
",",
"*",
"args",
")",
"return",
"functools",
".",
"reduce",
"(",
"_getattr",
",",
"[",
"obj",
"]",
"+",
"attr",
".",
"split",
"(",
"\".\"",
")",
")"
] |
Get attr that handles dots in attr name.
|
[
"Get",
"attr",
"that",
"handles",
"dots",
"in",
"attr",
"name",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L283-L287
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/celebahq.py
|
CelebAHq._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
image_tar_file = os.path.join(dl_manager.manual_dir,
self.builder_config.file_name)
if not tf.io.gfile.exists(image_tar_file):
# The current celebahq generation code depends on a concrete version of
# pillow library and cannot be easily ported into tfds.
msg = "You must download the dataset files manually and place them in: "
msg += dl_manager.manual_dir
msg += " as .tar files. See testing/test_data/fake_examples/celeb_a_hq "
raise AssertionError(msg)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=50,
gen_kwargs={"archive": dl_manager.iter_archive(image_tar_file)},
)
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
image_tar_file = os.path.join(dl_manager.manual_dir,
self.builder_config.file_name)
if not tf.io.gfile.exists(image_tar_file):
# The current celebahq generation code depends on a concrete version of
# pillow library and cannot be easily ported into tfds.
msg = "You must download the dataset files manually and place them in: "
msg += dl_manager.manual_dir
msg += " as .tar files. See testing/test_data/fake_examples/celeb_a_hq "
raise AssertionError(msg)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=50,
gen_kwargs={"archive": dl_manager.iter_archive(image_tar_file)},
)
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"image_tar_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_manager",
".",
"manual_dir",
",",
"self",
".",
"builder_config",
".",
"file_name",
")",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"image_tar_file",
")",
":",
"# The current celebahq generation code depends on a concrete version of",
"# pillow library and cannot be easily ported into tfds.",
"msg",
"=",
"\"You must download the dataset files manually and place them in: \"",
"msg",
"+=",
"dl_manager",
".",
"manual_dir",
"msg",
"+=",
"\" as .tar files. See testing/test_data/fake_examples/celeb_a_hq \"",
"raise",
"AssertionError",
"(",
"msg",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
",",
"num_shards",
"=",
"50",
",",
"gen_kwargs",
"=",
"{",
"\"archive\"",
":",
"dl_manager",
".",
"iter_archive",
"(",
"image_tar_file",
")",
"}",
",",
")",
"]"
] |
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/celebahq.py#L107-L124
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/ted_hrlr.py
|
TedHrlrTranslate._generate_examples
|
def _generate_examples(self, source_file, target_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(source_file) as f:
source_sentences = f.read().split("\n")
with tf.io.gfile.GFile(target_file) as f:
target_sentences = f.read().split("\n")
assert len(target_sentences) == len(
source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (len(
source_sentences), len(target_sentences), source_file, target_file)
source, target = self.builder_config.language_pair
for l1, l2 in zip(source_sentences, target_sentences):
result = {source: l1, target: l2}
# Make sure that both translations are non-empty.
if all(result.values()):
yield result
|
python
|
def _generate_examples(self, source_file, target_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(source_file) as f:
source_sentences = f.read().split("\n")
with tf.io.gfile.GFile(target_file) as f:
target_sentences = f.read().split("\n")
assert len(target_sentences) == len(
source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (len(
source_sentences), len(target_sentences), source_file, target_file)
source, target = self.builder_config.language_pair
for l1, l2 in zip(source_sentences, target_sentences):
result = {source: l1, target: l2}
# Make sure that both translations are non-empty.
if all(result.values()):
yield result
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"source_file",
",",
"target_file",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"source_file",
")",
"as",
"f",
":",
"source_sentences",
"=",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"target_file",
")",
"as",
"f",
":",
"target_sentences",
"=",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"assert",
"len",
"(",
"target_sentences",
")",
"==",
"len",
"(",
"source_sentences",
")",
",",
"\"Sizes do not match: %d vs %d for %s vs %s.\"",
"%",
"(",
"len",
"(",
"source_sentences",
")",
",",
"len",
"(",
"target_sentences",
")",
",",
"source_file",
",",
"target_file",
")",
"source",
",",
"target",
"=",
"self",
".",
"builder_config",
".",
"language_pair",
"for",
"l1",
",",
"l2",
"in",
"zip",
"(",
"source_sentences",
",",
"target_sentences",
")",
":",
"result",
"=",
"{",
"source",
":",
"l1",
",",
"target",
":",
"l2",
"}",
"# Make sure that both translations are non-empty.",
"if",
"all",
"(",
"result",
".",
"values",
"(",
")",
")",
":",
"yield",
"result"
] |
This function returns the examples in the raw (text) form.
|
[
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/ted_hrlr.py#L160-L176
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/xnli.py
|
Xnli._generate_examples
|
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
rows_per_pair_id = collections.defaultdict(list)
with tf.io.gfile.GFile(filepath) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
rows_per_pair_id[row['pairID']].append(row)
for rows in six.itervalues(rows_per_pair_id):
premise = {row['language']: row['sentence1'] for row in rows}
hypothesis = {row['language']: row['sentence2'] for row in rows}
yield {
'premise': premise,
'hypothesis': hypothesis,
'label': rows[0]['gold_label'],
}
|
python
|
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
rows_per_pair_id = collections.defaultdict(list)
with tf.io.gfile.GFile(filepath) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
rows_per_pair_id[row['pairID']].append(row)
for rows in six.itervalues(rows_per_pair_id):
premise = {row['language']: row['sentence1'] for row in rows}
hypothesis = {row['language']: row['sentence2'] for row in rows}
yield {
'premise': premise,
'hypothesis': hypothesis,
'label': rows[0]['gold_label'],
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"rows_per_pair_id",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_NONE",
")",
"for",
"row",
"in",
"reader",
":",
"rows_per_pair_id",
"[",
"row",
"[",
"'pairID'",
"]",
"]",
".",
"append",
"(",
"row",
")",
"for",
"rows",
"in",
"six",
".",
"itervalues",
"(",
"rows_per_pair_id",
")",
":",
"premise",
"=",
"{",
"row",
"[",
"'language'",
"]",
":",
"row",
"[",
"'sentence1'",
"]",
"for",
"row",
"in",
"rows",
"}",
"hypothesis",
"=",
"{",
"row",
"[",
"'language'",
"]",
":",
"row",
"[",
"'sentence2'",
"]",
"for",
"row",
"in",
"rows",
"}",
"yield",
"{",
"'premise'",
":",
"premise",
",",
"'hypothesis'",
":",
"hypothesis",
",",
"'label'",
":",
"rows",
"[",
"0",
"]",
"[",
"'gold_label'",
"]",
",",
"}"
] |
This function returns the examples in the raw (text) form.
|
[
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/xnli.py#L107-L123
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/voc.py
|
Voc2007._generate_example
|
def _generate_example(self, data_path, image_id):
"""Yields examples."""
image_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/JPEGImages", "{}.jpg".format(image_id))
annon_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/Annotations", "{}.xml".format(image_id))
def _get_example_objects():
"""Function to get all the objects from the annotation XML file."""
with tf.io.gfile.GFile(annon_filepath, "r") as f:
root = xml.etree.ElementTree.parse(f).getroot()
size = root.find("size")
width = float(size.find("width").text)
height = float(size.find("height").text)
for obj in root.findall("object"):
# Get object's label name.
label = obj.find("name").text.lower()
# Get objects' pose name.
pose = obj.find("pose").text.lower()
is_truncated = (obj.find("truncated").text == "1")
is_difficult = (obj.find("difficult").text == "1")
bndbox = obj.find("bndbox")
xmax = float(bndbox.find("xmax").text)
xmin = float(bndbox.find("xmin").text)
ymax = float(bndbox.find("ymax").text)
ymin = float(bndbox.find("ymin").text)
yield {
"label": label,
"pose": pose,
"bbox": tfds.features.BBox(
ymin / height, xmin / width, ymax / height, xmax / width),
"is_truncated": is_truncated,
"is_difficult": is_difficult,
}
objects = list(_get_example_objects())
# Use set() to remove duplicates
labels = sorted(set(obj["label"] for obj in objects))
labels_no_difficult = sorted(set(
obj["label"] for obj in objects if obj["is_difficult"] == 0
))
return {
"image": image_filepath,
"image/filename": image_id + ".jpg",
"objects": objects,
"labels": labels,
"labels_no_difficult": labels_no_difficult,
}
|
python
|
def _generate_example(self, data_path, image_id):
"""Yields examples."""
image_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/JPEGImages", "{}.jpg".format(image_id))
annon_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/Annotations", "{}.xml".format(image_id))
def _get_example_objects():
"""Function to get all the objects from the annotation XML file."""
with tf.io.gfile.GFile(annon_filepath, "r") as f:
root = xml.etree.ElementTree.parse(f).getroot()
size = root.find("size")
width = float(size.find("width").text)
height = float(size.find("height").text)
for obj in root.findall("object"):
# Get object's label name.
label = obj.find("name").text.lower()
# Get objects' pose name.
pose = obj.find("pose").text.lower()
is_truncated = (obj.find("truncated").text == "1")
is_difficult = (obj.find("difficult").text == "1")
bndbox = obj.find("bndbox")
xmax = float(bndbox.find("xmax").text)
xmin = float(bndbox.find("xmin").text)
ymax = float(bndbox.find("ymax").text)
ymin = float(bndbox.find("ymin").text)
yield {
"label": label,
"pose": pose,
"bbox": tfds.features.BBox(
ymin / height, xmin / width, ymax / height, xmax / width),
"is_truncated": is_truncated,
"is_difficult": is_difficult,
}
objects = list(_get_example_objects())
# Use set() to remove duplicates
labels = sorted(set(obj["label"] for obj in objects))
labels_no_difficult = sorted(set(
obj["label"] for obj in objects if obj["is_difficult"] == 0
))
return {
"image": image_filepath,
"image/filename": image_id + ".jpg",
"objects": objects,
"labels": labels,
"labels_no_difficult": labels_no_difficult,
}
|
[
"def",
"_generate_example",
"(",
"self",
",",
"data_path",
",",
"image_id",
")",
":",
"image_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"\"VOCdevkit/VOC2007/JPEGImages\"",
",",
"\"{}.jpg\"",
".",
"format",
"(",
"image_id",
")",
")",
"annon_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"\"VOCdevkit/VOC2007/Annotations\"",
",",
"\"{}.xml\"",
".",
"format",
"(",
"image_id",
")",
")",
"def",
"_get_example_objects",
"(",
")",
":",
"\"\"\"Function to get all the objects from the annotation XML file.\"\"\"",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"annon_filepath",
",",
"\"r\"",
")",
"as",
"f",
":",
"root",
"=",
"xml",
".",
"etree",
".",
"ElementTree",
".",
"parse",
"(",
"f",
")",
".",
"getroot",
"(",
")",
"size",
"=",
"root",
".",
"find",
"(",
"\"size\"",
")",
"width",
"=",
"float",
"(",
"size",
".",
"find",
"(",
"\"width\"",
")",
".",
"text",
")",
"height",
"=",
"float",
"(",
"size",
".",
"find",
"(",
"\"height\"",
")",
".",
"text",
")",
"for",
"obj",
"in",
"root",
".",
"findall",
"(",
"\"object\"",
")",
":",
"# Get object's label name.",
"label",
"=",
"obj",
".",
"find",
"(",
"\"name\"",
")",
".",
"text",
".",
"lower",
"(",
")",
"# Get objects' pose name.",
"pose",
"=",
"obj",
".",
"find",
"(",
"\"pose\"",
")",
".",
"text",
".",
"lower",
"(",
")",
"is_truncated",
"=",
"(",
"obj",
".",
"find",
"(",
"\"truncated\"",
")",
".",
"text",
"==",
"\"1\"",
")",
"is_difficult",
"=",
"(",
"obj",
".",
"find",
"(",
"\"difficult\"",
")",
".",
"text",
"==",
"\"1\"",
")",
"bndbox",
"=",
"obj",
".",
"find",
"(",
"\"bndbox\"",
")",
"xmax",
"=",
"float",
"(",
"bndbox",
".",
"find",
"(",
"\"xmax\"",
")",
".",
"text",
")",
"xmin",
"=",
"float",
"(",
"bndbox",
".",
"find",
"(",
"\"xmin\"",
")",
".",
"text",
")",
"ymax",
"=",
"float",
"(",
"bndbox",
".",
"find",
"(",
"\"ymax\"",
")",
".",
"text",
")",
"ymin",
"=",
"float",
"(",
"bndbox",
".",
"find",
"(",
"\"ymin\"",
")",
".",
"text",
")",
"yield",
"{",
"\"label\"",
":",
"label",
",",
"\"pose\"",
":",
"pose",
",",
"\"bbox\"",
":",
"tfds",
".",
"features",
".",
"BBox",
"(",
"ymin",
"/",
"height",
",",
"xmin",
"/",
"width",
",",
"ymax",
"/",
"height",
",",
"xmax",
"/",
"width",
")",
",",
"\"is_truncated\"",
":",
"is_truncated",
",",
"\"is_difficult\"",
":",
"is_difficult",
",",
"}",
"objects",
"=",
"list",
"(",
"_get_example_objects",
"(",
")",
")",
"# Use set() to remove duplicates",
"labels",
"=",
"sorted",
"(",
"set",
"(",
"obj",
"[",
"\"label\"",
"]",
"for",
"obj",
"in",
"objects",
")",
")",
"labels_no_difficult",
"=",
"sorted",
"(",
"set",
"(",
"obj",
"[",
"\"label\"",
"]",
"for",
"obj",
"in",
"objects",
"if",
"obj",
"[",
"\"is_difficult\"",
"]",
"==",
"0",
")",
")",
"return",
"{",
"\"image\"",
":",
"image_filepath",
",",
"\"image/filename\"",
":",
"image_id",
"+",
"\".jpg\"",
",",
"\"objects\"",
":",
"objects",
",",
"\"labels\"",
":",
"labels",
",",
"\"labels_no_difficult\"",
":",
"labels_no_difficult",
",",
"}"
] |
Yields examples.
|
[
"Yields",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/voc.py#L137-L186
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/image_feature.py
|
Image.set_encoding_format
|
def set_encoding_format(self, encoding_format):
"""Update the encoding format."""
supported = ENCODE_FN.keys()
if encoding_format not in supported:
raise ValueError('`encoding_format` must be one of %s.' % supported)
self._encoding_format = encoding_format
|
python
|
def set_encoding_format(self, encoding_format):
"""Update the encoding format."""
supported = ENCODE_FN.keys()
if encoding_format not in supported:
raise ValueError('`encoding_format` must be one of %s.' % supported)
self._encoding_format = encoding_format
|
[
"def",
"set_encoding_format",
"(",
"self",
",",
"encoding_format",
")",
":",
"supported",
"=",
"ENCODE_FN",
".",
"keys",
"(",
")",
"if",
"encoding_format",
"not",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"'`encoding_format` must be one of %s.'",
"%",
"supported",
")",
"self",
".",
"_encoding_format",
"=",
"encoding_format"
] |
Update the encoding format.
|
[
"Update",
"the",
"encoding",
"format",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L97-L102
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/image_feature.py
|
Image.set_shape
|
def set_shape(self, shape):
"""Update the shape."""
channels = shape[-1]
acceptable_channels = ACCEPTABLE_CHANNELS[self._encoding_format]
if channels not in acceptable_channels:
raise ValueError('Acceptable `channels` for %s: %s (was %s)' % (
self._encoding_format, acceptable_channels, channels))
self._shape = tuple(shape)
|
python
|
def set_shape(self, shape):
"""Update the shape."""
channels = shape[-1]
acceptable_channels = ACCEPTABLE_CHANNELS[self._encoding_format]
if channels not in acceptable_channels:
raise ValueError('Acceptable `channels` for %s: %s (was %s)' % (
self._encoding_format, acceptable_channels, channels))
self._shape = tuple(shape)
|
[
"def",
"set_shape",
"(",
"self",
",",
"shape",
")",
":",
"channels",
"=",
"shape",
"[",
"-",
"1",
"]",
"acceptable_channels",
"=",
"ACCEPTABLE_CHANNELS",
"[",
"self",
".",
"_encoding_format",
"]",
"if",
"channels",
"not",
"in",
"acceptable_channels",
":",
"raise",
"ValueError",
"(",
"'Acceptable `channels` for %s: %s (was %s)'",
"%",
"(",
"self",
".",
"_encoding_format",
",",
"acceptable_channels",
",",
"channels",
")",
")",
"self",
".",
"_shape",
"=",
"tuple",
"(",
"shape",
")"
] |
Update the shape.
|
[
"Update",
"the",
"shape",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L104-L111
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/image_feature.py
|
Image._encode_image
|
def _encode_image(self, np_image):
"""Returns np_image encoded as jpeg or png."""
if np_image.dtype != np.uint8:
raise ValueError('Image should be uint8. Detected: %s.' % np_image.dtype)
utils.assert_shape_match(np_image.shape, self._shape)
return self._runner.run(ENCODE_FN[self._encoding_format], np_image)
|
python
|
def _encode_image(self, np_image):
"""Returns np_image encoded as jpeg or png."""
if np_image.dtype != np.uint8:
raise ValueError('Image should be uint8. Detected: %s.' % np_image.dtype)
utils.assert_shape_match(np_image.shape, self._shape)
return self._runner.run(ENCODE_FN[self._encoding_format], np_image)
|
[
"def",
"_encode_image",
"(",
"self",
",",
"np_image",
")",
":",
"if",
"np_image",
".",
"dtype",
"!=",
"np",
".",
"uint8",
":",
"raise",
"ValueError",
"(",
"'Image should be uint8. Detected: %s.'",
"%",
"np_image",
".",
"dtype",
")",
"utils",
".",
"assert_shape_match",
"(",
"np_image",
".",
"shape",
",",
"self",
".",
"_shape",
")",
"return",
"self",
".",
"_runner",
".",
"run",
"(",
"ENCODE_FN",
"[",
"self",
".",
"_encoding_format",
"]",
",",
"np_image",
")"
] |
Returns np_image encoded as jpeg or png.
|
[
"Returns",
"np_image",
"encoded",
"as",
"jpeg",
"or",
"png",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L128-L133
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/image_feature.py
|
Image.encode_example
|
def encode_example(self, image_or_path_or_fobj):
"""Convert the given image into a dict convertible to tf example."""
if isinstance(image_or_path_or_fobj, np.ndarray):
encoded_image = self._encode_image(image_or_path_or_fobj)
elif isinstance(image_or_path_or_fobj, six.string_types):
with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f:
encoded_image = image_f.read()
else:
encoded_image = image_or_path_or_fobj.read()
return encoded_image
|
python
|
def encode_example(self, image_or_path_or_fobj):
"""Convert the given image into a dict convertible to tf example."""
if isinstance(image_or_path_or_fobj, np.ndarray):
encoded_image = self._encode_image(image_or_path_or_fobj)
elif isinstance(image_or_path_or_fobj, six.string_types):
with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f:
encoded_image = image_f.read()
else:
encoded_image = image_or_path_or_fobj.read()
return encoded_image
|
[
"def",
"encode_example",
"(",
"self",
",",
"image_or_path_or_fobj",
")",
":",
"if",
"isinstance",
"(",
"image_or_path_or_fobj",
",",
"np",
".",
"ndarray",
")",
":",
"encoded_image",
"=",
"self",
".",
"_encode_image",
"(",
"image_or_path_or_fobj",
")",
"elif",
"isinstance",
"(",
"image_or_path_or_fobj",
",",
"six",
".",
"string_types",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"image_or_path_or_fobj",
",",
"'rb'",
")",
"as",
"image_f",
":",
"encoded_image",
"=",
"image_f",
".",
"read",
"(",
")",
"else",
":",
"encoded_image",
"=",
"image_or_path_or_fobj",
".",
"read",
"(",
")",
"return",
"encoded_image"
] |
Convert the given image into a dict convertible to tf example.
|
[
"Convert",
"the",
"given",
"image",
"into",
"a",
"dict",
"convertible",
"to",
"tf",
"example",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L135-L144
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/image_feature.py
|
Image.decode_example
|
def decode_example(self, example):
"""Reconstruct the image from the tf example."""
img = tf.image.decode_image(
example, channels=self._shape[-1], dtype=tf.uint8)
img.set_shape(self._shape)
return img
|
python
|
def decode_example(self, example):
"""Reconstruct the image from the tf example."""
img = tf.image.decode_image(
example, channels=self._shape[-1], dtype=tf.uint8)
img.set_shape(self._shape)
return img
|
[
"def",
"decode_example",
"(",
"self",
",",
"example",
")",
":",
"img",
"=",
"tf",
".",
"image",
".",
"decode_image",
"(",
"example",
",",
"channels",
"=",
"self",
".",
"_shape",
"[",
"-",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"uint8",
")",
"img",
".",
"set_shape",
"(",
"self",
".",
"_shape",
")",
"return",
"img"
] |
Reconstruct the image from the tf example.
|
[
"Reconstruct",
"the",
"image",
"from",
"the",
"tf",
"example",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L146-L151
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/image_feature.py
|
Image.save_metadata
|
def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
filepath = _get_metadata_filepath(data_dir, feature_name)
with tf.io.gfile.GFile(filepath, 'w') as f:
json.dump({
'shape': [-1 if d is None else d for d in self._shape],
'encoding_format': self._encoding_format,
}, f, sort_keys=True)
|
python
|
def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
filepath = _get_metadata_filepath(data_dir, feature_name)
with tf.io.gfile.GFile(filepath, 'w') as f:
json.dump({
'shape': [-1 if d is None else d for d in self._shape],
'encoding_format': self._encoding_format,
}, f, sort_keys=True)
|
[
"def",
"save_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"filepath",
"=",
"_get_metadata_filepath",
"(",
"data_dir",
",",
"feature_name",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"{",
"'shape'",
":",
"[",
"-",
"1",
"if",
"d",
"is",
"None",
"else",
"d",
"for",
"d",
"in",
"self",
".",
"_shape",
"]",
",",
"'encoding_format'",
":",
"self",
".",
"_encoding_format",
",",
"}",
",",
"f",
",",
"sort_keys",
"=",
"True",
")"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L153-L160
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/image_feature.py
|
Image.load_metadata
|
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
filepath = _get_metadata_filepath(data_dir, feature_name)
if tf.io.gfile.exists(filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
info_data = json.load(f)
self.set_encoding_format(info_data['encoding_format'])
self.set_shape([None if d == -1 else d for d in info_data['shape']])
|
python
|
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
filepath = _get_metadata_filepath(data_dir, feature_name)
if tf.io.gfile.exists(filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
info_data = json.load(f)
self.set_encoding_format(info_data['encoding_format'])
self.set_shape([None if d == -1 else d for d in info_data['shape']])
|
[
"def",
"load_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"# Restore names if defined",
"filepath",
"=",
"_get_metadata_filepath",
"(",
"data_dir",
",",
"feature_name",
")",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"filepath",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"'r'",
")",
"as",
"f",
":",
"info_data",
"=",
"json",
".",
"load",
"(",
"f",
")",
"self",
".",
"set_encoding_format",
"(",
"info_data",
"[",
"'encoding_format'",
"]",
")",
"self",
".",
"set_shape",
"(",
"[",
"None",
"if",
"d",
"==",
"-",
"1",
"else",
"d",
"for",
"d",
"in",
"info_data",
"[",
"'shape'",
"]",
"]",
")"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L162-L170
|
train
|
tensorflow/datasets
|
tensorflow_datasets/video/moving_sequence.py
|
_create_moving_sequence
|
def _create_moving_sequence(image, pad_lefts, total_padding):
"""Create a moving image sequence from the given image a left padding values.
Args:
image: [in_h, in_w, n_channels] uint8 array
pad_lefts: [sequence_length, 2] int32 array of left padding values
total_padding: tensor of padding values, (pad_h, pad_w)
Returns:
[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where
out_h = in_h + pad_h, out_w = in_w + out_w
"""
with tf.name_scope("moving_sequence"):
def get_padded_image(args):
pad_left, = args
pad_right = total_padding - pad_left
padding = tf.stack([pad_left, pad_right], axis=-1)
z = tf.zeros((1, 2), dtype=pad_left.dtype)
padding = tf.concat([padding, z], axis=0)
return tf.pad(image, padding)
padded_images = tf.map_fn(
get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False,
back_prop=False)
return padded_images
|
python
|
def _create_moving_sequence(image, pad_lefts, total_padding):
"""Create a moving image sequence from the given image a left padding values.
Args:
image: [in_h, in_w, n_channels] uint8 array
pad_lefts: [sequence_length, 2] int32 array of left padding values
total_padding: tensor of padding values, (pad_h, pad_w)
Returns:
[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where
out_h = in_h + pad_h, out_w = in_w + out_w
"""
with tf.name_scope("moving_sequence"):
def get_padded_image(args):
pad_left, = args
pad_right = total_padding - pad_left
padding = tf.stack([pad_left, pad_right], axis=-1)
z = tf.zeros((1, 2), dtype=pad_left.dtype)
padding = tf.concat([padding, z], axis=0)
return tf.pad(image, padding)
padded_images = tf.map_fn(
get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False,
back_prop=False)
return padded_images
|
[
"def",
"_create_moving_sequence",
"(",
"image",
",",
"pad_lefts",
",",
"total_padding",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"moving_sequence\"",
")",
":",
"def",
"get_padded_image",
"(",
"args",
")",
":",
"pad_left",
",",
"=",
"args",
"pad_right",
"=",
"total_padding",
"-",
"pad_left",
"padding",
"=",
"tf",
".",
"stack",
"(",
"[",
"pad_left",
",",
"pad_right",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"z",
"=",
"tf",
".",
"zeros",
"(",
"(",
"1",
",",
"2",
")",
",",
"dtype",
"=",
"pad_left",
".",
"dtype",
")",
"padding",
"=",
"tf",
".",
"concat",
"(",
"[",
"padding",
",",
"z",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"tf",
".",
"pad",
"(",
"image",
",",
"padding",
")",
"padded_images",
"=",
"tf",
".",
"map_fn",
"(",
"get_padded_image",
",",
"[",
"pad_lefts",
"]",
",",
"dtype",
"=",
"tf",
".",
"uint8",
",",
"infer_shape",
"=",
"False",
",",
"back_prop",
"=",
"False",
")",
"return",
"padded_images"
] |
Create a moving image sequence from the given image a left padding values.
Args:
image: [in_h, in_w, n_channels] uint8 array
pad_lefts: [sequence_length, 2] int32 array of left padding values
total_padding: tensor of padding values, (pad_h, pad_w)
Returns:
[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where
out_h = in_h + pad_h, out_w = in_w + out_w
|
[
"Create",
"a",
"moving",
"image",
"sequence",
"from",
"the",
"given",
"image",
"a",
"left",
"padding",
"values",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_sequence.py#L27-L53
|
train
|
tensorflow/datasets
|
tensorflow_datasets/video/moving_sequence.py
|
_get_linear_trajectory
|
def _get_linear_trajectory(x0, velocity, t):
"""Construct a linear trajectory from x0.
Args:
x0: N-D float tensor.
velocity: N-D float tensor
t: [sequence_length]-length float tensor
Returns:
x: [sequence_length, ndims] float tensor.
"""
x0 = tf.convert_to_tensor(x0)
velocity = tf.convert_to_tensor(velocity)
t = tf.convert_to_tensor(t)
if x0.shape.ndims != 1:
raise ValueError("x0 must be a rank 1 tensor")
if velocity.shape.ndims != 1:
raise ValueError("velocity must be a rank 1 tensor")
if t.shape.ndims != 1:
raise ValueError("t must be a rank 1 tensor")
x0 = tf.expand_dims(x0, axis=0)
velocity = tf.expand_dims(velocity, axis=0)
dx = velocity * tf.expand_dims(t, axis=-1)
linear_trajectories = x0 + dx
assert linear_trajectories.shape.ndims == 2, \
"linear_trajectories should be a rank 2 tensor"
return linear_trajectories
|
python
|
def _get_linear_trajectory(x0, velocity, t):
"""Construct a linear trajectory from x0.
Args:
x0: N-D float tensor.
velocity: N-D float tensor
t: [sequence_length]-length float tensor
Returns:
x: [sequence_length, ndims] float tensor.
"""
x0 = tf.convert_to_tensor(x0)
velocity = tf.convert_to_tensor(velocity)
t = tf.convert_to_tensor(t)
if x0.shape.ndims != 1:
raise ValueError("x0 must be a rank 1 tensor")
if velocity.shape.ndims != 1:
raise ValueError("velocity must be a rank 1 tensor")
if t.shape.ndims != 1:
raise ValueError("t must be a rank 1 tensor")
x0 = tf.expand_dims(x0, axis=0)
velocity = tf.expand_dims(velocity, axis=0)
dx = velocity * tf.expand_dims(t, axis=-1)
linear_trajectories = x0 + dx
assert linear_trajectories.shape.ndims == 2, \
"linear_trajectories should be a rank 2 tensor"
return linear_trajectories
|
[
"def",
"_get_linear_trajectory",
"(",
"x0",
",",
"velocity",
",",
"t",
")",
":",
"x0",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"x0",
")",
"velocity",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"velocity",
")",
"t",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"t",
")",
"if",
"x0",
".",
"shape",
".",
"ndims",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"x0 must be a rank 1 tensor\"",
")",
"if",
"velocity",
".",
"shape",
".",
"ndims",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"velocity must be a rank 1 tensor\"",
")",
"if",
"t",
".",
"shape",
".",
"ndims",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"t must be a rank 1 tensor\"",
")",
"x0",
"=",
"tf",
".",
"expand_dims",
"(",
"x0",
",",
"axis",
"=",
"0",
")",
"velocity",
"=",
"tf",
".",
"expand_dims",
"(",
"velocity",
",",
"axis",
"=",
"0",
")",
"dx",
"=",
"velocity",
"*",
"tf",
".",
"expand_dims",
"(",
"t",
",",
"axis",
"=",
"-",
"1",
")",
"linear_trajectories",
"=",
"x0",
"+",
"dx",
"assert",
"linear_trajectories",
".",
"shape",
".",
"ndims",
"==",
"2",
",",
"\"linear_trajectories should be a rank 2 tensor\"",
"return",
"linear_trajectories"
] |
Construct a linear trajectory from x0.
Args:
x0: N-D float tensor.
velocity: N-D float tensor
t: [sequence_length]-length float tensor
Returns:
x: [sequence_length, ndims] float tensor.
|
[
"Construct",
"a",
"linear",
"trajectory",
"from",
"x0",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_sequence.py#L56-L82
|
train
|
tensorflow/datasets
|
tensorflow_datasets/video/moving_sequence.py
|
image_as_moving_sequence
|
def image_as_moving_sequence(
image, sequence_length=20, output_size=(64, 64), velocity=0.1,
start_position=None):
"""Turn simple static images into sequences of the originals bouncing around.
Adapted from Srivastava et al.
http://www.cs.toronto.edu/~nitish/unsupervised_video/
Example usage:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
tf.compat.v1.enable_eager_execution()
def animate(sequence):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sequence = np.squeeze(sequence, axis=-1)
fig = plt.figure()
plt.axis("off")
ims = [[plt.imshow(im, cmap="gray", animated=True)] for im in sequence]
# don't remove `anim =` as linter may suggets
# weird behaviour, plot will freeze on last frame
anim = animation.ArtistAnimation(
fig, ims, interval=50, blit=True, repeat_delay=100)
plt.show()
plt.close()
tf.enable_eager_execution()
mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True)
mnist_ds = mnist_ds.repeat().shuffle(1024)
def map_fn(image, label):
sequence = moving_sequence.image_as_moving_sequence(
image, sequence_length=20)
return sequence.image_sequence
moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map(
lambda x: dict(image_sequence=tf.reduce_max(x, axis=0)))
# # for comparison with test data provided by original authors
# moving_mnist_ds = tfds.load("moving_mnist", split=tfds.Split.TEST)
for seq in moving_mnist_ds:
animate(seq["image_sequence"].numpy())
```
Args:
image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing
around.
sequence_length: int, length of sequence.
output_size: (out_h, out_w) size returned images.
velocity: scalar speed or 2D velocity of image. If scalar, the 2D
velocity is randomly generated with this magnitude. This is the
normalized distance moved each time step by the sub-image, where
normalization occurs over the feasible distance the sub-image can move
e.g if the input image is [10 x 10] and the output image is [60 x 60],
a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per
time step.
start_position: 2D float32 normalized initial position of each
image in [0, 1]. Randomized uniformly if not given.
Returns:
`MovingSequence` namedtuple containing:
`image_sequence`:
[sequence_length, out_h, out_w, n_channels] image at each time step.
padded values are all zero. Same dtype as input image.
`trajectory`: [sequence_length, 2] float32 in [0, 1]
2D normalized coordinates of the image at every time step.
`start_position`: 2D float32 initial position in [0, 1].
2D normalized initial position of image. Same as input if provided,
otherwise the randomly value generated.
`velocity`: 2D float32 normalized velocity. Same as input velocity
if provided as a 2D tensor, otherwise the random velocity generated.
"""
ndims = 2
image = tf.convert_to_tensor(image)
if image.shape.ndims != 3:
raise ValueError("image must be rank 3, got %s" % str(image))
output_size = tf.TensorShape(output_size)
if len(output_size) != ndims:
raise ValueError("output_size must have exactly %d elements, got %s"
% (ndims, output_size))
image_shape = tf.shape(image)
if start_position is None:
start_position = tf.random.uniform((ndims,), dtype=tf.float32)
elif start_position.shape != (ndims,):
raise ValueError("start_positions must (%d,)" % ndims)
velocity = tf.convert_to_tensor(velocity, dtype=tf.float32)
if velocity.shape.ndims == 0:
velocity = _get_random_unit_vector(ndims, tf.float32) * velocity
elif velocity.shape.ndims != 1:
raise ValueError("velocity must be rank 0 or rank 1, got %s" % velocity)
t = tf.range(sequence_length, dtype=tf.float32)
trajectory = _get_linear_trajectory(start_position, velocity, t)
trajectory = _bounce_to_bbox(trajectory)
total_padding = output_size - image_shape[:2]
if not tf.executing_eagerly():
cond = tf.compat.v1.assert_greater(total_padding, -1)
with tf.control_dependencies([cond]):
total_padding = tf.identity(total_padding)
sequence_pad_lefts = tf.cast(
tf.math.round(trajectory * tf.cast(total_padding, tf.float32)), tf.int32)
sequence = _create_moving_sequence(image, sequence_pad_lefts, total_padding)
sequence.set_shape(
[sequence_length] + output_size.as_list() + [image.shape[-1]])
return MovingSequence(
image_sequence=sequence,
trajectory=trajectory,
start_position=start_position,
velocity=velocity)
|
python
|
def image_as_moving_sequence(
image, sequence_length=20, output_size=(64, 64), velocity=0.1,
start_position=None):
"""Turn simple static images into sequences of the originals bouncing around.
Adapted from Srivastava et al.
http://www.cs.toronto.edu/~nitish/unsupervised_video/
Example usage:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
tf.compat.v1.enable_eager_execution()
def animate(sequence):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sequence = np.squeeze(sequence, axis=-1)
fig = plt.figure()
plt.axis("off")
ims = [[plt.imshow(im, cmap="gray", animated=True)] for im in sequence]
# don't remove `anim =` as linter may suggets
# weird behaviour, plot will freeze on last frame
anim = animation.ArtistAnimation(
fig, ims, interval=50, blit=True, repeat_delay=100)
plt.show()
plt.close()
tf.enable_eager_execution()
mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True)
mnist_ds = mnist_ds.repeat().shuffle(1024)
def map_fn(image, label):
sequence = moving_sequence.image_as_moving_sequence(
image, sequence_length=20)
return sequence.image_sequence
moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map(
lambda x: dict(image_sequence=tf.reduce_max(x, axis=0)))
# # for comparison with test data provided by original authors
# moving_mnist_ds = tfds.load("moving_mnist", split=tfds.Split.TEST)
for seq in moving_mnist_ds:
animate(seq["image_sequence"].numpy())
```
Args:
image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing
around.
sequence_length: int, length of sequence.
output_size: (out_h, out_w) size returned images.
velocity: scalar speed or 2D velocity of image. If scalar, the 2D
velocity is randomly generated with this magnitude. This is the
normalized distance moved each time step by the sub-image, where
normalization occurs over the feasible distance the sub-image can move
e.g if the input image is [10 x 10] and the output image is [60 x 60],
a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per
time step.
start_position: 2D float32 normalized initial position of each
image in [0, 1]. Randomized uniformly if not given.
Returns:
`MovingSequence` namedtuple containing:
`image_sequence`:
[sequence_length, out_h, out_w, n_channels] image at each time step.
padded values are all zero. Same dtype as input image.
`trajectory`: [sequence_length, 2] float32 in [0, 1]
2D normalized coordinates of the image at every time step.
`start_position`: 2D float32 initial position in [0, 1].
2D normalized initial position of image. Same as input if provided,
otherwise the randomly value generated.
`velocity`: 2D float32 normalized velocity. Same as input velocity
if provided as a 2D tensor, otherwise the random velocity generated.
"""
ndims = 2
image = tf.convert_to_tensor(image)
if image.shape.ndims != 3:
raise ValueError("image must be rank 3, got %s" % str(image))
output_size = tf.TensorShape(output_size)
if len(output_size) != ndims:
raise ValueError("output_size must have exactly %d elements, got %s"
% (ndims, output_size))
image_shape = tf.shape(image)
if start_position is None:
start_position = tf.random.uniform((ndims,), dtype=tf.float32)
elif start_position.shape != (ndims,):
raise ValueError("start_positions must (%d,)" % ndims)
velocity = tf.convert_to_tensor(velocity, dtype=tf.float32)
if velocity.shape.ndims == 0:
velocity = _get_random_unit_vector(ndims, tf.float32) * velocity
elif velocity.shape.ndims != 1:
raise ValueError("velocity must be rank 0 or rank 1, got %s" % velocity)
t = tf.range(sequence_length, dtype=tf.float32)
trajectory = _get_linear_trajectory(start_position, velocity, t)
trajectory = _bounce_to_bbox(trajectory)
total_padding = output_size - image_shape[:2]
if not tf.executing_eagerly():
cond = tf.compat.v1.assert_greater(total_padding, -1)
with tf.control_dependencies([cond]):
total_padding = tf.identity(total_padding)
sequence_pad_lefts = tf.cast(
tf.math.round(trajectory * tf.cast(total_padding, tf.float32)), tf.int32)
sequence = _create_moving_sequence(image, sequence_pad_lefts, total_padding)
sequence.set_shape(
[sequence_length] + output_size.as_list() + [image.shape[-1]])
return MovingSequence(
image_sequence=sequence,
trajectory=trajectory,
start_position=start_position,
velocity=velocity)
|
[
"def",
"image_as_moving_sequence",
"(",
"image",
",",
"sequence_length",
"=",
"20",
",",
"output_size",
"=",
"(",
"64",
",",
"64",
")",
",",
"velocity",
"=",
"0.1",
",",
"start_position",
"=",
"None",
")",
":",
"ndims",
"=",
"2",
"image",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"image",
")",
"if",
"image",
".",
"shape",
".",
"ndims",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"image must be rank 3, got %s\"",
"%",
"str",
"(",
"image",
")",
")",
"output_size",
"=",
"tf",
".",
"TensorShape",
"(",
"output_size",
")",
"if",
"len",
"(",
"output_size",
")",
"!=",
"ndims",
":",
"raise",
"ValueError",
"(",
"\"output_size must have exactly %d elements, got %s\"",
"%",
"(",
"ndims",
",",
"output_size",
")",
")",
"image_shape",
"=",
"tf",
".",
"shape",
"(",
"image",
")",
"if",
"start_position",
"is",
"None",
":",
"start_position",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"(",
"ndims",
",",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"elif",
"start_position",
".",
"shape",
"!=",
"(",
"ndims",
",",
")",
":",
"raise",
"ValueError",
"(",
"\"start_positions must (%d,)\"",
"%",
"ndims",
")",
"velocity",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"velocity",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"if",
"velocity",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"velocity",
"=",
"_get_random_unit_vector",
"(",
"ndims",
",",
"tf",
".",
"float32",
")",
"*",
"velocity",
"elif",
"velocity",
".",
"shape",
".",
"ndims",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"velocity must be rank 0 or rank 1, got %s\"",
"%",
"velocity",
")",
"t",
"=",
"tf",
".",
"range",
"(",
"sequence_length",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"trajectory",
"=",
"_get_linear_trajectory",
"(",
"start_position",
",",
"velocity",
",",
"t",
")",
"trajectory",
"=",
"_bounce_to_bbox",
"(",
"trajectory",
")",
"total_padding",
"=",
"output_size",
"-",
"image_shape",
"[",
":",
"2",
"]",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"cond",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_greater",
"(",
"total_padding",
",",
"-",
"1",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"cond",
"]",
")",
":",
"total_padding",
"=",
"tf",
".",
"identity",
"(",
"total_padding",
")",
"sequence_pad_lefts",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"math",
".",
"round",
"(",
"trajectory",
"*",
"tf",
".",
"cast",
"(",
"total_padding",
",",
"tf",
".",
"float32",
")",
")",
",",
"tf",
".",
"int32",
")",
"sequence",
"=",
"_create_moving_sequence",
"(",
"image",
",",
"sequence_pad_lefts",
",",
"total_padding",
")",
"sequence",
".",
"set_shape",
"(",
"[",
"sequence_length",
"]",
"+",
"output_size",
".",
"as_list",
"(",
")",
"+",
"[",
"image",
".",
"shape",
"[",
"-",
"1",
"]",
"]",
")",
"return",
"MovingSequence",
"(",
"image_sequence",
"=",
"sequence",
",",
"trajectory",
"=",
"trajectory",
",",
"start_position",
"=",
"start_position",
",",
"velocity",
"=",
"velocity",
")"
] |
Turn simple static images into sequences of the originals bouncing around.
Adapted from Srivastava et al.
http://www.cs.toronto.edu/~nitish/unsupervised_video/
Example usage:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
tf.compat.v1.enable_eager_execution()
def animate(sequence):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sequence = np.squeeze(sequence, axis=-1)
fig = plt.figure()
plt.axis("off")
ims = [[plt.imshow(im, cmap="gray", animated=True)] for im in sequence]
# don't remove `anim =` as linter may suggets
# weird behaviour, plot will freeze on last frame
anim = animation.ArtistAnimation(
fig, ims, interval=50, blit=True, repeat_delay=100)
plt.show()
plt.close()
tf.enable_eager_execution()
mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True)
mnist_ds = mnist_ds.repeat().shuffle(1024)
def map_fn(image, label):
sequence = moving_sequence.image_as_moving_sequence(
image, sequence_length=20)
return sequence.image_sequence
moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map(
lambda x: dict(image_sequence=tf.reduce_max(x, axis=0)))
# # for comparison with test data provided by original authors
# moving_mnist_ds = tfds.load("moving_mnist", split=tfds.Split.TEST)
for seq in moving_mnist_ds:
animate(seq["image_sequence"].numpy())
```
Args:
image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing
around.
sequence_length: int, length of sequence.
output_size: (out_h, out_w) size returned images.
velocity: scalar speed or 2D velocity of image. If scalar, the 2D
velocity is randomly generated with this magnitude. This is the
normalized distance moved each time step by the sub-image, where
normalization occurs over the feasible distance the sub-image can move
e.g if the input image is [10 x 10] and the output image is [60 x 60],
a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per
time step.
start_position: 2D float32 normalized initial position of each
image in [0, 1]. Randomized uniformly if not given.
Returns:
`MovingSequence` namedtuple containing:
`image_sequence`:
[sequence_length, out_h, out_w, n_channels] image at each time step.
padded values are all zero. Same dtype as input image.
`trajectory`: [sequence_length, 2] float32 in [0, 1]
2D normalized coordinates of the image at every time step.
`start_position`: 2D float32 initial position in [0, 1].
2D normalized initial position of image. Same as input if provided,
otherwise the randomly value generated.
`velocity`: 2D float32 normalized velocity. Same as input velocity
if provided as a 2D tensor, otherwise the random velocity generated.
|
[
"Turn",
"simple",
"static",
"images",
"into",
"sequences",
"of",
"the",
"originals",
"bouncing",
"around",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_sequence.py#L115-L234
|
train
|
tensorflow/datasets
|
tensorflow_datasets/audio/nsynth.py
|
Nsynth._split_generators
|
def _split_generators(self, dl_manager):
"""Returns splits."""
dl_urls = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord" % split for split in _SPLITS
}
dl_urls["instrument_labels"] = (_BASE_DOWNLOAD_PATH +
"instrument_labels.txt")
dl_paths = dl_manager.download_and_extract(dl_urls)
instrument_labels = tf.io.gfile.GFile(dl_paths["instrument_labels"],
"r").read().strip().split("\n")
self.info.features["instrument"]["label"].names = instrument_labels
return [
tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension
name=split,
num_shards=_SPLIT_SHARDS[split],
gen_kwargs={"path": dl_paths[split]}) for split in _SPLITS
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns splits."""
dl_urls = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord" % split for split in _SPLITS
}
dl_urls["instrument_labels"] = (_BASE_DOWNLOAD_PATH +
"instrument_labels.txt")
dl_paths = dl_manager.download_and_extract(dl_urls)
instrument_labels = tf.io.gfile.GFile(dl_paths["instrument_labels"],
"r").read().strip().split("\n")
self.info.features["instrument"]["label"].names = instrument_labels
return [
tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension
name=split,
num_shards=_SPLIT_SHARDS[split],
gen_kwargs={"path": dl_paths[split]}) for split in _SPLITS
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"dl_urls",
"=",
"{",
"split",
":",
"_BASE_DOWNLOAD_PATH",
"+",
"\"%s.tfrecord\"",
"%",
"split",
"for",
"split",
"in",
"_SPLITS",
"}",
"dl_urls",
"[",
"\"instrument_labels\"",
"]",
"=",
"(",
"_BASE_DOWNLOAD_PATH",
"+",
"\"instrument_labels.txt\"",
")",
"dl_paths",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"dl_urls",
")",
"instrument_labels",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"dl_paths",
"[",
"\"instrument_labels\"",
"]",
",",
"\"r\"",
")",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"self",
".",
"info",
".",
"features",
"[",
"\"instrument\"",
"]",
"[",
"\"label\"",
"]",
".",
"names",
"=",
"instrument_labels",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"# pylint: disable=g-complex-comprehension",
"name",
"=",
"split",
",",
"num_shards",
"=",
"_SPLIT_SHARDS",
"[",
"split",
"]",
",",
"gen_kwargs",
"=",
"{",
"\"path\"",
":",
"dl_paths",
"[",
"split",
"]",
"}",
")",
"for",
"split",
"in",
"_SPLITS",
"]"
] |
Returns splits.
|
[
"Returns",
"splits",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/audio/nsynth.py#L117-L135
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/version.py
|
_str_to_version
|
def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = "Invalid version '{}'. Format should be x.y.z".format(version_str)
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(
v if v == "*" else int(v)
for v in [res.group("major"), res.group("minor"), res.group("patch")])
|
python
|
def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = "Invalid version '{}'. Format should be x.y.z".format(version_str)
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(
v if v == "*" else int(v)
for v in [res.group("major"), res.group("minor"), res.group("patch")])
|
[
"def",
"_str_to_version",
"(",
"version_str",
",",
"allow_wildcard",
"=",
"False",
")",
":",
"reg",
"=",
"_VERSION_WILDCARD_REG",
"if",
"allow_wildcard",
"else",
"_VERSION_RESOLVED_REG",
"res",
"=",
"reg",
".",
"match",
"(",
"version_str",
")",
"if",
"not",
"res",
":",
"msg",
"=",
"\"Invalid version '{}'. Format should be x.y.z\"",
".",
"format",
"(",
"version_str",
")",
"if",
"allow_wildcard",
":",
"msg",
"+=",
"\" with {x,y,z} being digits or wildcard.\"",
"else",
":",
"msg",
"+=",
"\" with {x,y,z} being digits.\"",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"tuple",
"(",
"v",
"if",
"v",
"==",
"\"*\"",
"else",
"int",
"(",
"v",
")",
"for",
"v",
"in",
"[",
"res",
".",
"group",
"(",
"\"major\"",
")",
",",
"res",
".",
"group",
"(",
"\"minor\"",
")",
",",
"res",
".",
"group",
"(",
"\"patch\"",
")",
"]",
")"
] |
Return the tuple (major, minor, patch) version extracted from the str.
|
[
"Return",
"the",
"tuple",
"(",
"major",
"minor",
"patch",
")",
"version",
"extracted",
"from",
"the",
"str",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/version.py#L70-L83
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/version.py
|
Version.match
|
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (major in [self.major, "*"] and minor in [self.minor, "*"]
and patch in [self.patch, "*"])
|
python
|
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (major in [self.major, "*"] and minor in [self.minor, "*"]
and patch in [self.patch, "*"])
|
[
"def",
"match",
"(",
"self",
",",
"other_version",
")",
":",
"major",
",",
"minor",
",",
"patch",
"=",
"_str_to_version",
"(",
"other_version",
",",
"allow_wildcard",
"=",
"True",
")",
"return",
"(",
"major",
"in",
"[",
"self",
".",
"major",
",",
"\"*\"",
"]",
"and",
"minor",
"in",
"[",
"self",
".",
"minor",
",",
"\"*\"",
"]",
"and",
"patch",
"in",
"[",
"self",
".",
"patch",
",",
"\"*\"",
"]",
")"
] |
Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
|
[
"Returns",
"True",
"if",
"other_version",
"matches",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/version.py#L58-L67
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/imagenet.py
|
Imagenet2012._get_validation_labels
|
def _get_validation_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)
with tf.io.gfile.GFile(labels_path) as labels_f:
labels = labels_f.read().strip().split('\n')
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
|
python
|
def _get_validation_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)
with tf.io.gfile.GFile(labels_path) as labels_f:
labels = labels_f.read().strip().split('\n')
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
|
[
"def",
"_get_validation_labels",
"(",
"val_path",
")",
":",
"labels_path",
"=",
"tfds",
".",
"core",
".",
"get_tfds_path",
"(",
"_VALIDATION_LABELS_FNAME",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_path",
")",
"as",
"labels_f",
":",
"labels",
"=",
"labels_f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"val_path",
",",
"'rb'",
")",
"as",
"tar_f_obj",
":",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"mode",
"=",
"'r:'",
",",
"fileobj",
"=",
"tar_f_obj",
")",
"images",
"=",
"sorted",
"(",
"tar",
".",
"getnames",
"(",
")",
")",
"return",
"dict",
"(",
"zip",
"(",
"images",
",",
"labels",
")",
")"
] |
Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
|
[
"Returns",
"labels",
"for",
"validation",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet.py#L86-L102
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/imagenet.py
|
Imagenet2012._generate_examples
|
def _generate_examples(self, archive, validation_labels=None):
"""Yields examples."""
if validation_labels: # Validation split
for example in self._generate_examples_validation(archive,
validation_labels):
yield example
# Training split. Main archive contains archives names after a synset noun.
# Each sub-archive contains pictures associated to that synset.
for fname, fobj in archive:
label = fname[:-4] # fname is something like 'n01632458.tar'
# TODO(b/117643231): in py3, the following lines trigger tarfile module
# to call `fobj.seekable()`, which Gfile doesn't have. We should find an
# alternative, as this loads ~150MB in RAM.
fobj_mem = io.BytesIO(fobj.read())
for image_fname, image_fobj in tfds.download.iter_archive(
fobj_mem, tfds.download.ExtractMethod.TAR):
yield {
'file_name': image_fname,
'image': image_fobj,
'label': label,
}
|
python
|
def _generate_examples(self, archive, validation_labels=None):
"""Yields examples."""
if validation_labels: # Validation split
for example in self._generate_examples_validation(archive,
validation_labels):
yield example
# Training split. Main archive contains archives names after a synset noun.
# Each sub-archive contains pictures associated to that synset.
for fname, fobj in archive:
label = fname[:-4] # fname is something like 'n01632458.tar'
# TODO(b/117643231): in py3, the following lines trigger tarfile module
# to call `fobj.seekable()`, which Gfile doesn't have. We should find an
# alternative, as this loads ~150MB in RAM.
fobj_mem = io.BytesIO(fobj.read())
for image_fname, image_fobj in tfds.download.iter_archive(
fobj_mem, tfds.download.ExtractMethod.TAR):
yield {
'file_name': image_fname,
'image': image_fobj,
'label': label,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
",",
"validation_labels",
"=",
"None",
")",
":",
"if",
"validation_labels",
":",
"# Validation split",
"for",
"example",
"in",
"self",
".",
"_generate_examples_validation",
"(",
"archive",
",",
"validation_labels",
")",
":",
"yield",
"example",
"# Training split. Main archive contains archives names after a synset noun.",
"# Each sub-archive contains pictures associated to that synset.",
"for",
"fname",
",",
"fobj",
"in",
"archive",
":",
"label",
"=",
"fname",
"[",
":",
"-",
"4",
"]",
"# fname is something like 'n01632458.tar'",
"# TODO(b/117643231): in py3, the following lines trigger tarfile module",
"# to call `fobj.seekable()`, which Gfile doesn't have. We should find an",
"# alternative, as this loads ~150MB in RAM.",
"fobj_mem",
"=",
"io",
".",
"BytesIO",
"(",
"fobj",
".",
"read",
"(",
")",
")",
"for",
"image_fname",
",",
"image_fobj",
"in",
"tfds",
".",
"download",
".",
"iter_archive",
"(",
"fobj_mem",
",",
"tfds",
".",
"download",
".",
"ExtractMethod",
".",
"TAR",
")",
":",
"yield",
"{",
"'file_name'",
":",
"image_fname",
",",
"'image'",
":",
"image_fobj",
",",
"'label'",
":",
"label",
",",
"}"
] |
Yields examples.
|
[
"Yields",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet.py#L131-L151
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
do_files_exist
|
def do_files_exist(filenames):
"""Whether any of the filenames exist."""
preexisting = [tf.io.gfile.exists(f) for f in filenames]
return any(preexisting)
|
python
|
def do_files_exist(filenames):
"""Whether any of the filenames exist."""
preexisting = [tf.io.gfile.exists(f) for f in filenames]
return any(preexisting)
|
[
"def",
"do_files_exist",
"(",
"filenames",
")",
":",
"preexisting",
"=",
"[",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"f",
")",
"for",
"f",
"in",
"filenames",
"]",
"return",
"any",
"(",
"preexisting",
")"
] |
Whether any of the filenames exist.
|
[
"Whether",
"any",
"of",
"the",
"filenames",
"exist",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L194-L197
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
get_incomplete_path
|
def get_incomplete_path(filename):
"""Returns a temporary filename based on filename."""
random_suffix = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
return filename + ".incomplete" + random_suffix
|
python
|
def get_incomplete_path(filename):
"""Returns a temporary filename based on filename."""
random_suffix = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
return filename + ".incomplete" + random_suffix
|
[
"def",
"get_incomplete_path",
"(",
"filename",
")",
":",
"random_suffix",
"=",
"\"\"",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_uppercase",
"+",
"string",
".",
"digits",
")",
"for",
"_",
"in",
"range",
"(",
"6",
")",
")",
"return",
"filename",
"+",
"\".incomplete\"",
"+",
"random_suffix"
] |
Returns a temporary filename based on filename.
|
[
"Returns",
"a",
"temporary",
"filename",
"based",
"on",
"filename",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L210-L214
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.