repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
_incomplete_files
|
def _incomplete_files(filenames):
"""Create temporary files for filenames and rename on exit."""
tmp_files = [get_incomplete_path(f) for f in filenames]
try:
yield tmp_files
for tmp, output in zip(tmp_files, filenames):
tf.io.gfile.rename(tmp, output)
finally:
for tmp in tmp_files:
if tf.io.gfile.exists(tmp):
tf.io.gfile.remove(tmp)
|
python
|
def _incomplete_files(filenames):
"""Create temporary files for filenames and rename on exit."""
tmp_files = [get_incomplete_path(f) for f in filenames]
try:
yield tmp_files
for tmp, output in zip(tmp_files, filenames):
tf.io.gfile.rename(tmp, output)
finally:
for tmp in tmp_files:
if tf.io.gfile.exists(tmp):
tf.io.gfile.remove(tmp)
|
[
"def",
"_incomplete_files",
"(",
"filenames",
")",
":",
"tmp_files",
"=",
"[",
"get_incomplete_path",
"(",
"f",
")",
"for",
"f",
"in",
"filenames",
"]",
"try",
":",
"yield",
"tmp_files",
"for",
"tmp",
",",
"output",
"in",
"zip",
"(",
"tmp_files",
",",
"filenames",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"rename",
"(",
"tmp",
",",
"output",
")",
"finally",
":",
"for",
"tmp",
"in",
"tmp_files",
":",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"tmp",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"remove",
"(",
"tmp",
")"
] |
Create temporary files for filenames and rename on exit.
|
[
"Create",
"temporary",
"files",
"for",
"filenames",
"and",
"rename",
"on",
"exit",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L218-L228
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
incomplete_dir
|
def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
tmp_dir = get_incomplete_path(dirname)
tf.io.gfile.makedirs(tmp_dir)
try:
yield tmp_dir
tf.io.gfile.rename(tmp_dir, dirname)
finally:
if tf.io.gfile.exists(tmp_dir):
tf.io.gfile.rmtree(tmp_dir)
|
python
|
def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
tmp_dir = get_incomplete_path(dirname)
tf.io.gfile.makedirs(tmp_dir)
try:
yield tmp_dir
tf.io.gfile.rename(tmp_dir, dirname)
finally:
if tf.io.gfile.exists(tmp_dir):
tf.io.gfile.rmtree(tmp_dir)
|
[
"def",
"incomplete_dir",
"(",
"dirname",
")",
":",
"tmp_dir",
"=",
"get_incomplete_path",
"(",
"dirname",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"tmp_dir",
")",
"try",
":",
"yield",
"tmp_dir",
"tf",
".",
"io",
".",
"gfile",
".",
"rename",
"(",
"tmp_dir",
",",
"dirname",
")",
"finally",
":",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"tmp_dir",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"rmtree",
"(",
"tmp_dir",
")"
] |
Create temporary dir for dirname and rename on exit.
|
[
"Create",
"temporary",
"dir",
"for",
"dirname",
"and",
"rename",
"on",
"exit",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L232-L241
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
_shuffle_tfrecord
|
def _shuffle_tfrecord(path, random_gen):
"""Shuffle a single record file in memory."""
# Read all records
record_iter = tf.compat.v1.io.tf_record_iterator(path)
all_records = [
r for r in utils.tqdm(
record_iter, desc="Reading...", unit=" examples", leave=False)
]
# Shuffling in memory
random_gen.shuffle(all_records)
# Write all record back
with tf.io.TFRecordWriter(path) as writer:
for record in utils.tqdm(
all_records, desc="Writing...", unit=" examples", leave=False):
writer.write(record)
|
python
|
def _shuffle_tfrecord(path, random_gen):
"""Shuffle a single record file in memory."""
# Read all records
record_iter = tf.compat.v1.io.tf_record_iterator(path)
all_records = [
r for r in utils.tqdm(
record_iter, desc="Reading...", unit=" examples", leave=False)
]
# Shuffling in memory
random_gen.shuffle(all_records)
# Write all record back
with tf.io.TFRecordWriter(path) as writer:
for record in utils.tqdm(
all_records, desc="Writing...", unit=" examples", leave=False):
writer.write(record)
|
[
"def",
"_shuffle_tfrecord",
"(",
"path",
",",
"random_gen",
")",
":",
"# Read all records",
"record_iter",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"io",
".",
"tf_record_iterator",
"(",
"path",
")",
"all_records",
"=",
"[",
"r",
"for",
"r",
"in",
"utils",
".",
"tqdm",
"(",
"record_iter",
",",
"desc",
"=",
"\"Reading...\"",
",",
"unit",
"=",
"\" examples\"",
",",
"leave",
"=",
"False",
")",
"]",
"# Shuffling in memory",
"random_gen",
".",
"shuffle",
"(",
"all_records",
")",
"# Write all record back",
"with",
"tf",
".",
"io",
".",
"TFRecordWriter",
"(",
"path",
")",
"as",
"writer",
":",
"for",
"record",
"in",
"utils",
".",
"tqdm",
"(",
"all_records",
",",
"desc",
"=",
"\"Writing...\"",
",",
"unit",
"=",
"\" examples\"",
",",
"leave",
"=",
"False",
")",
":",
"writer",
".",
"write",
"(",
"record",
")"
] |
Shuffle a single record file in memory.
|
[
"Shuffle",
"a",
"single",
"record",
"file",
"in",
"memory",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L244-L258
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
_write_tfrecords_from_generator
|
def _write_tfrecords_from_generator(generator, output_files, shuffle=True):
"""Writes generated str records to output_files in round-robin order."""
if do_files_exist(output_files):
raise ValueError(
"Pre-processed files already exists: {}.".format(output_files))
with _incomplete_files(output_files) as tmp_files:
# Write all shards
writers = [tf.io.TFRecordWriter(fname) for fname in tmp_files]
with _close_on_exit(writers) as writers:
logging.info("Writing TFRecords")
_round_robin_write(writers, generator)
# Shuffle each shard
if shuffle:
# WARNING: Using np instead of Python random because Python random
# produce different values between Python 2 and 3 and between
# architectures
random_gen = np.random.RandomState(42)
for path in utils.tqdm(
tmp_files, desc="Shuffling...", unit=" shard", leave=False):
_shuffle_tfrecord(path, random_gen=random_gen)
|
python
|
def _write_tfrecords_from_generator(generator, output_files, shuffle=True):
"""Writes generated str records to output_files in round-robin order."""
if do_files_exist(output_files):
raise ValueError(
"Pre-processed files already exists: {}.".format(output_files))
with _incomplete_files(output_files) as tmp_files:
# Write all shards
writers = [tf.io.TFRecordWriter(fname) for fname in tmp_files]
with _close_on_exit(writers) as writers:
logging.info("Writing TFRecords")
_round_robin_write(writers, generator)
# Shuffle each shard
if shuffle:
# WARNING: Using np instead of Python random because Python random
# produce different values between Python 2 and 3 and between
# architectures
random_gen = np.random.RandomState(42)
for path in utils.tqdm(
tmp_files, desc="Shuffling...", unit=" shard", leave=False):
_shuffle_tfrecord(path, random_gen=random_gen)
|
[
"def",
"_write_tfrecords_from_generator",
"(",
"generator",
",",
"output_files",
",",
"shuffle",
"=",
"True",
")",
":",
"if",
"do_files_exist",
"(",
"output_files",
")",
":",
"raise",
"ValueError",
"(",
"\"Pre-processed files already exists: {}.\"",
".",
"format",
"(",
"output_files",
")",
")",
"with",
"_incomplete_files",
"(",
"output_files",
")",
"as",
"tmp_files",
":",
"# Write all shards",
"writers",
"=",
"[",
"tf",
".",
"io",
".",
"TFRecordWriter",
"(",
"fname",
")",
"for",
"fname",
"in",
"tmp_files",
"]",
"with",
"_close_on_exit",
"(",
"writers",
")",
"as",
"writers",
":",
"logging",
".",
"info",
"(",
"\"Writing TFRecords\"",
")",
"_round_robin_write",
"(",
"writers",
",",
"generator",
")",
"# Shuffle each shard",
"if",
"shuffle",
":",
"# WARNING: Using np instead of Python random because Python random",
"# produce different values between Python 2 and 3 and between",
"# architectures",
"random_gen",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"42",
")",
"for",
"path",
"in",
"utils",
".",
"tqdm",
"(",
"tmp_files",
",",
"desc",
"=",
"\"Shuffling...\"",
",",
"unit",
"=",
"\" shard\"",
",",
"leave",
"=",
"False",
")",
":",
"_shuffle_tfrecord",
"(",
"path",
",",
"random_gen",
"=",
"random_gen",
")"
] |
Writes generated str records to output_files in round-robin order.
|
[
"Writes",
"generated",
"str",
"records",
"to",
"output_files",
"in",
"round",
"-",
"robin",
"order",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L261-L281
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
_round_robin_write
|
def _round_robin_write(writers, generator):
"""Write records from generator round-robin across writers."""
for i, example in enumerate(utils.tqdm(
generator, unit=" examples", leave=False)):
writers[i % len(writers)].write(example)
|
python
|
def _round_robin_write(writers, generator):
"""Write records from generator round-robin across writers."""
for i, example in enumerate(utils.tqdm(
generator, unit=" examples", leave=False)):
writers[i % len(writers)].write(example)
|
[
"def",
"_round_robin_write",
"(",
"writers",
",",
"generator",
")",
":",
"for",
"i",
",",
"example",
"in",
"enumerate",
"(",
"utils",
".",
"tqdm",
"(",
"generator",
",",
"unit",
"=",
"\" examples\"",
",",
"leave",
"=",
"False",
")",
")",
":",
"writers",
"[",
"i",
"%",
"len",
"(",
"writers",
")",
"]",
".",
"write",
"(",
"example",
")"
] |
Write records from generator round-robin across writers.
|
[
"Write",
"records",
"from",
"generator",
"round",
"-",
"robin",
"across",
"writers",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L284-L288
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
_item_to_tf_feature
|
def _item_to_tf_feature(item, key_name):
"""Single item to a tf.train.Feature."""
v = item
if isinstance(v, (list, tuple)) and not v:
raise ValueError(
"Feature {} received an empty list value, so is unable to infer the "
"feature type to record. To support empty value, the corresponding "
"FeatureConnector should return a numpy array with the correct dtype "
"instead of a Python list.".format(key_name)
)
# Handle strings/bytes first
if isinstance(v, (six.binary_type, six.string_types)):
v = [tf.compat.as_bytes(v)]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif (isinstance(v, (tuple, list)) and
all(isinstance(x, (six.binary_type, six.string_types)) for x in v)):
v = [tf.compat.as_bytes(x) for x in v]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif (isinstance(v, np.ndarray) and
(v.dtype.kind in ("U", "S") or v.dtype == object)): # binary or unicode
v = [tf.compat.as_bytes(x) for x in v.flatten()]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
# Use NumPy for numeric types
v = np.array(v).flatten() # Convert v into a 1-d array
if np.issubdtype(v.dtype, np.integer):
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif np.issubdtype(v.dtype, np.floating):
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
else:
raise ValueError(
"Value received: {}.\n"
"tf.train.Feature does not support type {} for feature key {}. "
"This may indicate that one of the FeatureConnectors received an "
"unsupported value as input.".format(repr(v), repr(type(v)), key_name)
)
|
python
|
def _item_to_tf_feature(item, key_name):
"""Single item to a tf.train.Feature."""
v = item
if isinstance(v, (list, tuple)) and not v:
raise ValueError(
"Feature {} received an empty list value, so is unable to infer the "
"feature type to record. To support empty value, the corresponding "
"FeatureConnector should return a numpy array with the correct dtype "
"instead of a Python list.".format(key_name)
)
# Handle strings/bytes first
if isinstance(v, (six.binary_type, six.string_types)):
v = [tf.compat.as_bytes(v)]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif (isinstance(v, (tuple, list)) and
all(isinstance(x, (six.binary_type, six.string_types)) for x in v)):
v = [tf.compat.as_bytes(x) for x in v]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif (isinstance(v, np.ndarray) and
(v.dtype.kind in ("U", "S") or v.dtype == object)): # binary or unicode
v = [tf.compat.as_bytes(x) for x in v.flatten()]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
# Use NumPy for numeric types
v = np.array(v).flatten() # Convert v into a 1-d array
if np.issubdtype(v.dtype, np.integer):
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif np.issubdtype(v.dtype, np.floating):
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
else:
raise ValueError(
"Value received: {}.\n"
"tf.train.Feature does not support type {} for feature key {}. "
"This may indicate that one of the FeatureConnectors received an "
"unsupported value as input.".format(repr(v), repr(type(v)), key_name)
)
|
[
"def",
"_item_to_tf_feature",
"(",
"item",
",",
"key_name",
")",
":",
"v",
"=",
"item",
"if",
"isinstance",
"(",
"v",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"not",
"v",
":",
"raise",
"ValueError",
"(",
"\"Feature {} received an empty list value, so is unable to infer the \"",
"\"feature type to record. To support empty value, the corresponding \"",
"\"FeatureConnector should return a numpy array with the correct dtype \"",
"\"instead of a Python list.\"",
".",
"format",
"(",
"key_name",
")",
")",
"# Handle strings/bytes first",
"if",
"isinstance",
"(",
"v",
",",
"(",
"six",
".",
"binary_type",
",",
"six",
".",
"string_types",
")",
")",
":",
"v",
"=",
"[",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"v",
")",
"]",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"bytes_list",
"=",
"tf",
".",
"train",
".",
"BytesList",
"(",
"value",
"=",
"v",
")",
")",
"elif",
"(",
"isinstance",
"(",
"v",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"all",
"(",
"isinstance",
"(",
"x",
",",
"(",
"six",
".",
"binary_type",
",",
"six",
".",
"string_types",
")",
")",
"for",
"x",
"in",
"v",
")",
")",
":",
"v",
"=",
"[",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"x",
")",
"for",
"x",
"in",
"v",
"]",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"bytes_list",
"=",
"tf",
".",
"train",
".",
"BytesList",
"(",
"value",
"=",
"v",
")",
")",
"elif",
"(",
"isinstance",
"(",
"v",
",",
"np",
".",
"ndarray",
")",
"and",
"(",
"v",
".",
"dtype",
".",
"kind",
"in",
"(",
"\"U\"",
",",
"\"S\"",
")",
"or",
"v",
".",
"dtype",
"==",
"object",
")",
")",
":",
"# binary or unicode",
"v",
"=",
"[",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"x",
")",
"for",
"x",
"in",
"v",
".",
"flatten",
"(",
")",
"]",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"bytes_list",
"=",
"tf",
".",
"train",
".",
"BytesList",
"(",
"value",
"=",
"v",
")",
")",
"# Use NumPy for numeric types",
"v",
"=",
"np",
".",
"array",
"(",
"v",
")",
".",
"flatten",
"(",
")",
"# Convert v into a 1-d array",
"if",
"np",
".",
"issubdtype",
"(",
"v",
".",
"dtype",
",",
"np",
".",
"integer",
")",
":",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"int64_list",
"=",
"tf",
".",
"train",
".",
"Int64List",
"(",
"value",
"=",
"v",
")",
")",
"elif",
"np",
".",
"issubdtype",
"(",
"v",
".",
"dtype",
",",
"np",
".",
"floating",
")",
":",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"float_list",
"=",
"tf",
".",
"train",
".",
"FloatList",
"(",
"value",
"=",
"v",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Value received: {}.\\n\"",
"\"tf.train.Feature does not support type {} for feature key {}. \"",
"\"This may indicate that one of the FeatureConnectors received an \"",
"\"unsupported value as input.\"",
".",
"format",
"(",
"repr",
"(",
"v",
")",
",",
"repr",
"(",
"type",
"(",
"v",
")",
")",
",",
"key_name",
")",
")"
] |
Single item to a tf.train.Feature.
|
[
"Single",
"item",
"to",
"a",
"tf",
".",
"train",
".",
"Feature",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L307-L344
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/file_format_adapter.py
|
_dict_to_tf_features
|
def _dict_to_tf_features(example_dict):
"""Builds tf.train.Features from (string -> int/float/str list) dictionary."""
features = {k: _item_to_tf_feature(v, k) for k, v
in six.iteritems(example_dict)}
return tf.train.Features(feature=features)
|
python
|
def _dict_to_tf_features(example_dict):
"""Builds tf.train.Features from (string -> int/float/str list) dictionary."""
features = {k: _item_to_tf_feature(v, k) for k, v
in six.iteritems(example_dict)}
return tf.train.Features(feature=features)
|
[
"def",
"_dict_to_tf_features",
"(",
"example_dict",
")",
":",
"features",
"=",
"{",
"k",
":",
"_item_to_tf_feature",
"(",
"v",
",",
"k",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"example_dict",
")",
"}",
"return",
"tf",
".",
"train",
".",
"Features",
"(",
"feature",
"=",
"features",
")"
] |
Builds tf.train.Features from (string -> int/float/str list) dictionary.
|
[
"Builds",
"tf",
".",
"train",
".",
"Features",
"from",
"(",
"string",
"-",
">",
"int",
"/",
"float",
"/",
"str",
"list",
")",
"dictionary",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L347-L351
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tqdm_utils.py
|
_async_tqdm
|
def _async_tqdm(*args, **kwargs):
"""Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:
pbar: Async pbar which can be shared between threads.
"""
with tqdm_lib.tqdm(*args, **kwargs) as pbar:
pbar = _TqdmPbarAsync(pbar)
yield pbar
pbar.clear() # pop pbar from the active list of pbar
print()
|
python
|
def _async_tqdm(*args, **kwargs):
"""Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:
pbar: Async pbar which can be shared between threads.
"""
with tqdm_lib.tqdm(*args, **kwargs) as pbar:
pbar = _TqdmPbarAsync(pbar)
yield pbar
pbar.clear() # pop pbar from the active list of pbar
print()
|
[
"def",
"_async_tqdm",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"tqdm_lib",
".",
"tqdm",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"as",
"pbar",
":",
"pbar",
"=",
"_TqdmPbarAsync",
"(",
"pbar",
")",
"yield",
"pbar",
"pbar",
".",
"clear",
"(",
")",
"# pop pbar from the active list of pbar",
"print",
"(",
")"
] |
Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:
pbar: Async pbar which can be shared between threads.
|
[
"Wrapper",
"around",
"Tqdm",
"which",
"can",
"be",
"updated",
"in",
"threads",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L79-L102
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tqdm_utils.py
|
_TqdmPbarAsync.update_total
|
def update_total(self, n=1):
"""Increment total pbar value."""
with self._lock:
self._pbar.total += n
self.refresh()
|
python
|
def update_total(self, n=1):
"""Increment total pbar value."""
with self._lock:
self._pbar.total += n
self.refresh()
|
[
"def",
"update_total",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_pbar",
".",
"total",
"+=",
"n",
"self",
".",
"refresh",
"(",
")"
] |
Increment total pbar value.
|
[
"Increment",
"total",
"pbar",
"value",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L114-L118
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tqdm_utils.py
|
_TqdmPbarAsync.update
|
def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh()
|
python
|
def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh()
|
[
"def",
"update",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_pbar",
".",
"update",
"(",
"n",
")",
"self",
".",
"refresh",
"(",
")"
] |
Increment current value.
|
[
"Increment",
"current",
"value",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L120-L124
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/abstract_reasoning.py
|
AbstractReasoning._build_pcollection
|
def _build_pcollection(self, pipeline, folder, split):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
split_type = self.builder_config.split_type
filename = os.path.join(folder, "{}.tar.gz".format(split_type))
def _extract_data(inputs):
"""Extracts files from the tar archives."""
filename, split = inputs
with tf.io.gfile.GFile(filename, "rb") as f:
with tarfile.open(fileobj=f, mode="r") as tar:
for tarinfo in tar:
split_name = tarinfo.name.split("_")
if len(split_name) > 2 and split_name[2] == split:
buf = six.BytesIO()
shutil.copyfileobj(tar.extractfile(tarinfo), buf)
yield [tarinfo.name, buf.getvalue()]
def _process_example(inputs):
filename, data_string = inputs
buf = six.BytesIO(data_string)
buf.seek(0)
data = np.load(buf)
# Extract the images and convert to uint8. The reshape is required, see
# https://github.com/deepmind/abstract-reasoning-matrices.
all_images = np.uint8(data["image"].reshape(16, 160, 160, 1))
return {
"relation_structure_encoded": data["relation_structure_encoded"],
"target": data["target"],
"meta_target": data["meta_target"],
"context": all_images[:8],
"answers": all_images[8:],
"filename": filename,
}
# Beam might fuse together the _extract_data and _process_example which
# defeats the purpose of parallel processing. As a result, we reshard by
# doing a GroupByKey on random keys, and then flattening again.
def _add_random_keys(inputs):
key = str(random.randrange(10**10))
return key, inputs
def _remove_keys(inputs):
_, rows = inputs
for row in rows:
yield row
return (pipeline
| beam.Create([(filename, split)])
| beam.FlatMap(_extract_data)
| beam.Map(_add_random_keys)
| beam.GroupByKey()
| beam.FlatMap(_remove_keys)
| beam.Map(_process_example))
|
python
|
def _build_pcollection(self, pipeline, folder, split):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
split_type = self.builder_config.split_type
filename = os.path.join(folder, "{}.tar.gz".format(split_type))
def _extract_data(inputs):
"""Extracts files from the tar archives."""
filename, split = inputs
with tf.io.gfile.GFile(filename, "rb") as f:
with tarfile.open(fileobj=f, mode="r") as tar:
for tarinfo in tar:
split_name = tarinfo.name.split("_")
if len(split_name) > 2 and split_name[2] == split:
buf = six.BytesIO()
shutil.copyfileobj(tar.extractfile(tarinfo), buf)
yield [tarinfo.name, buf.getvalue()]
def _process_example(inputs):
filename, data_string = inputs
buf = six.BytesIO(data_string)
buf.seek(0)
data = np.load(buf)
# Extract the images and convert to uint8. The reshape is required, see
# https://github.com/deepmind/abstract-reasoning-matrices.
all_images = np.uint8(data["image"].reshape(16, 160, 160, 1))
return {
"relation_structure_encoded": data["relation_structure_encoded"],
"target": data["target"],
"meta_target": data["meta_target"],
"context": all_images[:8],
"answers": all_images[8:],
"filename": filename,
}
# Beam might fuse together the _extract_data and _process_example which
# defeats the purpose of parallel processing. As a result, we reshard by
# doing a GroupByKey on random keys, and then flattening again.
def _add_random_keys(inputs):
key = str(random.randrange(10**10))
return key, inputs
def _remove_keys(inputs):
_, rows = inputs
for row in rows:
yield row
return (pipeline
| beam.Create([(filename, split)])
| beam.FlatMap(_extract_data)
| beam.Map(_add_random_keys)
| beam.GroupByKey()
| beam.FlatMap(_remove_keys)
| beam.Map(_process_example))
|
[
"def",
"_build_pcollection",
"(",
"self",
",",
"pipeline",
",",
"folder",
",",
"split",
")",
":",
"beam",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"apache_beam",
"split_type",
"=",
"self",
".",
"builder_config",
".",
"split_type",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"\"{}.tar.gz\"",
".",
"format",
"(",
"split_type",
")",
")",
"def",
"_extract_data",
"(",
"inputs",
")",
":",
"\"\"\"Extracts files from the tar archives.\"\"\"",
"filename",
",",
"split",
"=",
"inputs",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"with",
"tarfile",
".",
"open",
"(",
"fileobj",
"=",
"f",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"tar",
":",
"for",
"tarinfo",
"in",
"tar",
":",
"split_name",
"=",
"tarinfo",
".",
"name",
".",
"split",
"(",
"\"_\"",
")",
"if",
"len",
"(",
"split_name",
")",
">",
"2",
"and",
"split_name",
"[",
"2",
"]",
"==",
"split",
":",
"buf",
"=",
"six",
".",
"BytesIO",
"(",
")",
"shutil",
".",
"copyfileobj",
"(",
"tar",
".",
"extractfile",
"(",
"tarinfo",
")",
",",
"buf",
")",
"yield",
"[",
"tarinfo",
".",
"name",
",",
"buf",
".",
"getvalue",
"(",
")",
"]",
"def",
"_process_example",
"(",
"inputs",
")",
":",
"filename",
",",
"data_string",
"=",
"inputs",
"buf",
"=",
"six",
".",
"BytesIO",
"(",
"data_string",
")",
"buf",
".",
"seek",
"(",
"0",
")",
"data",
"=",
"np",
".",
"load",
"(",
"buf",
")",
"# Extract the images and convert to uint8. The reshape is required, see",
"# https://github.com/deepmind/abstract-reasoning-matrices.",
"all_images",
"=",
"np",
".",
"uint8",
"(",
"data",
"[",
"\"image\"",
"]",
".",
"reshape",
"(",
"16",
",",
"160",
",",
"160",
",",
"1",
")",
")",
"return",
"{",
"\"relation_structure_encoded\"",
":",
"data",
"[",
"\"relation_structure_encoded\"",
"]",
",",
"\"target\"",
":",
"data",
"[",
"\"target\"",
"]",
",",
"\"meta_target\"",
":",
"data",
"[",
"\"meta_target\"",
"]",
",",
"\"context\"",
":",
"all_images",
"[",
":",
"8",
"]",
",",
"\"answers\"",
":",
"all_images",
"[",
"8",
":",
"]",
",",
"\"filename\"",
":",
"filename",
",",
"}",
"# Beam might fuse together the _extract_data and _process_example which",
"# defeats the purpose of parallel processing. As a result, we reshard by",
"# doing a GroupByKey on random keys, and then flattening again.",
"def",
"_add_random_keys",
"(",
"inputs",
")",
":",
"key",
"=",
"str",
"(",
"random",
".",
"randrange",
"(",
"10",
"**",
"10",
")",
")",
"return",
"key",
",",
"inputs",
"def",
"_remove_keys",
"(",
"inputs",
")",
":",
"_",
",",
"rows",
"=",
"inputs",
"for",
"row",
"in",
"rows",
":",
"yield",
"row",
"return",
"(",
"pipeline",
"|",
"beam",
".",
"Create",
"(",
"[",
"(",
"filename",
",",
"split",
")",
"]",
")",
"|",
"beam",
".",
"FlatMap",
"(",
"_extract_data",
")",
"|",
"beam",
".",
"Map",
"(",
"_add_random_keys",
")",
"|",
"beam",
".",
"GroupByKey",
"(",
")",
"|",
"beam",
".",
"FlatMap",
"(",
"_remove_keys",
")",
"|",
"beam",
".",
"Map",
"(",
"_process_example",
")",
")"
] |
Generate examples as dicts.
|
[
"Generate",
"examples",
"as",
"dicts",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/abstract_reasoning.py#L250-L305
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/extractor.py
|
_copy
|
def _copy(src_file, dest_path):
"""Copy data read from src file obj to new file in dest_path."""
tf.io.gfile.makedirs(os.path.dirname(dest_path))
with tf.io.gfile.GFile(dest_path, 'wb') as dest_file:
while True:
data = src_file.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
dest_file.write(data)
|
python
|
def _copy(src_file, dest_path):
"""Copy data read from src file obj to new file in dest_path."""
tf.io.gfile.makedirs(os.path.dirname(dest_path))
with tf.io.gfile.GFile(dest_path, 'wb') as dest_file:
while True:
data = src_file.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
dest_file.write(data)
|
[
"def",
"_copy",
"(",
"src_file",
",",
"dest_path",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dest_path",
")",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"dest_path",
",",
"'wb'",
")",
"as",
"dest_file",
":",
"while",
"True",
":",
"data",
"=",
"src_file",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"if",
"not",
"data",
":",
"break",
"dest_file",
".",
"write",
"(",
"data",
")"
] |
Copy data read from src file obj to new file in dest_path.
|
[
"Copy",
"data",
"read",
"from",
"src",
"file",
"obj",
"to",
"new",
"file",
"in",
"dest_path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L103-L111
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/extractor.py
|
iter_tar
|
def iter_tar(arch_f, gz=False, stream=False):
"""Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less temporary disk consumption, but random access to the
file is not allowed.
Yields:
(filepath, extracted_fobj) for each file in the archive.
"""
read_type = 'r' + ('|' if stream else ':')
if gz:
read_type += 'gz'
with _open_or_pass(arch_f) as fobj:
tar = tarfile.open(mode=read_type, fileobj=fobj)
for member in tar:
extract_file = tar.extractfile(member)
if extract_file: # File with data (not directory):
path = _normpath(member.path)
if not path:
continue
yield [path, extract_file]
|
python
|
def iter_tar(arch_f, gz=False, stream=False):
"""Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less temporary disk consumption, but random access to the
file is not allowed.
Yields:
(filepath, extracted_fobj) for each file in the archive.
"""
read_type = 'r' + ('|' if stream else ':')
if gz:
read_type += 'gz'
with _open_or_pass(arch_f) as fobj:
tar = tarfile.open(mode=read_type, fileobj=fobj)
for member in tar:
extract_file = tar.extractfile(member)
if extract_file: # File with data (not directory):
path = _normpath(member.path)
if not path:
continue
yield [path, extract_file]
|
[
"def",
"iter_tar",
"(",
"arch_f",
",",
"gz",
"=",
"False",
",",
"stream",
"=",
"False",
")",
":",
"read_type",
"=",
"'r'",
"+",
"(",
"'|'",
"if",
"stream",
"else",
"':'",
")",
"if",
"gz",
":",
"read_type",
"+=",
"'gz'",
"with",
"_open_or_pass",
"(",
"arch_f",
")",
"as",
"fobj",
":",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"mode",
"=",
"read_type",
",",
"fileobj",
"=",
"fobj",
")",
"for",
"member",
"in",
"tar",
":",
"extract_file",
"=",
"tar",
".",
"extractfile",
"(",
"member",
")",
"if",
"extract_file",
":",
"# File with data (not directory):",
"path",
"=",
"_normpath",
"(",
"member",
".",
"path",
")",
"if",
"not",
"path",
":",
"continue",
"yield",
"[",
"path",
",",
"extract_file",
"]"
] |
Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less temporary disk consumption, but random access to the
file is not allowed.
Yields:
(filepath, extracted_fobj) for each file in the archive.
|
[
"Iter",
"over",
"tar",
"archive",
"yielding",
"(",
"path",
"object",
"-",
"like",
")",
"tuples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L133-L158
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/extractor.py
|
_Extractor.tqdm
|
def tqdm(self):
"""Add a progression bar for the current extraction."""
with utils.async_tqdm(
total=0, desc='Extraction completed...', unit=' file') as pbar_path:
self._pbar_path = pbar_path
yield
|
python
|
def tqdm(self):
"""Add a progression bar for the current extraction."""
with utils.async_tqdm(
total=0, desc='Extraction completed...', unit=' file') as pbar_path:
self._pbar_path = pbar_path
yield
|
[
"def",
"tqdm",
"(",
"self",
")",
":",
"with",
"utils",
".",
"async_tqdm",
"(",
"total",
"=",
"0",
",",
"desc",
"=",
"'Extraction completed...'",
",",
"unit",
"=",
"' file'",
")",
"as",
"pbar_path",
":",
"self",
".",
"_pbar_path",
"=",
"pbar_path",
"yield"
] |
Add a progression bar for the current extraction.
|
[
"Add",
"a",
"progression",
"bar",
"for",
"the",
"current",
"extraction",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L68-L73
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/extractor.py
|
_Extractor.extract
|
def extract(self, path, extract_method, to_path):
"""Returns `promise.Promise` => to_path."""
self._pbar_path.update_total(1)
if extract_method not in _EXTRACT_METHODS:
raise ValueError('Unknown extraction method "%s".' % extract_method)
future = self._executor.submit(self._sync_extract,
path, extract_method, to_path)
return promise.Promise.resolve(future)
|
python
|
def extract(self, path, extract_method, to_path):
"""Returns `promise.Promise` => to_path."""
self._pbar_path.update_total(1)
if extract_method not in _EXTRACT_METHODS:
raise ValueError('Unknown extraction method "%s".' % extract_method)
future = self._executor.submit(self._sync_extract,
path, extract_method, to_path)
return promise.Promise.resolve(future)
|
[
"def",
"extract",
"(",
"self",
",",
"path",
",",
"extract_method",
",",
"to_path",
")",
":",
"self",
".",
"_pbar_path",
".",
"update_total",
"(",
"1",
")",
"if",
"extract_method",
"not",
"in",
"_EXTRACT_METHODS",
":",
"raise",
"ValueError",
"(",
"'Unknown extraction method \"%s\".'",
"%",
"extract_method",
")",
"future",
"=",
"self",
".",
"_executor",
".",
"submit",
"(",
"self",
".",
"_sync_extract",
",",
"path",
",",
"extract_method",
",",
"to_path",
")",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"future",
")"
] |
Returns `promise.Promise` => to_path.
|
[
"Returns",
"promise",
".",
"Promise",
"=",
">",
"to_path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L75-L82
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/extractor.py
|
_Extractor._sync_extract
|
def _sync_extract(self, from_path, method, to_path):
"""Returns `to_path` once resource has been extracted there."""
to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX,
uuid.uuid4().hex)
try:
for path, handle in iter_archive(from_path, method):
_copy(handle, path and os.path.join(to_path_tmp, path) or to_path_tmp)
except BaseException as err:
msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
raise ExtractError(msg)
# `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
# directories, so delete destination first, if it already exists.
if tf.io.gfile.exists(to_path):
tf.io.gfile.rmtree(to_path)
tf.io.gfile.rename(to_path_tmp, to_path)
self._pbar_path.update(1)
return to_path
|
python
|
def _sync_extract(self, from_path, method, to_path):
"""Returns `to_path` once resource has been extracted there."""
to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX,
uuid.uuid4().hex)
try:
for path, handle in iter_archive(from_path, method):
_copy(handle, path and os.path.join(to_path_tmp, path) or to_path_tmp)
except BaseException as err:
msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
raise ExtractError(msg)
# `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
# directories, so delete destination first, if it already exists.
if tf.io.gfile.exists(to_path):
tf.io.gfile.rmtree(to_path)
tf.io.gfile.rename(to_path_tmp, to_path)
self._pbar_path.update(1)
return to_path
|
[
"def",
"_sync_extract",
"(",
"self",
",",
"from_path",
",",
"method",
",",
"to_path",
")",
":",
"to_path_tmp",
"=",
"'%s%s_%s'",
"%",
"(",
"to_path",
",",
"constants",
".",
"INCOMPLETE_SUFFIX",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"try",
":",
"for",
"path",
",",
"handle",
"in",
"iter_archive",
"(",
"from_path",
",",
"method",
")",
":",
"_copy",
"(",
"handle",
",",
"path",
"and",
"os",
".",
"path",
".",
"join",
"(",
"to_path_tmp",
",",
"path",
")",
"or",
"to_path_tmp",
")",
"except",
"BaseException",
"as",
"err",
":",
"msg",
"=",
"'Error while extracting %s to %s : %s'",
"%",
"(",
"from_path",
",",
"to_path",
",",
"err",
")",
"raise",
"ExtractError",
"(",
"msg",
")",
"# `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty",
"# directories, so delete destination first, if it already exists.",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"to_path",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"rmtree",
"(",
"to_path",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"rename",
"(",
"to_path_tmp",
",",
"to_path",
")",
"self",
".",
"_pbar_path",
".",
"update",
"(",
"1",
")",
"return",
"to_path"
] |
Returns `to_path` once resource has been extracted there.
|
[
"Returns",
"to_path",
"once",
"resource",
"has",
"been",
"extracted",
"there",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L84-L100
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
to_serialized_field
|
def to_serialized_field(tensor_info):
"""Convert a `TensorInfo` object into a feature proto object."""
# Select the type
dtype = tensor_info.dtype
# TODO(b/119937875): TF Examples proto only support int64, float32 and string
# This create limitation like float64 downsampled to float32, bool converted
# to int64 which is space ineficient, no support for complexes or quantized
if tensor_info.dtype.is_integer or tensor_info.dtype.is_bool:
dtype = tf.int64
elif tensor_info.dtype.is_floating:
dtype = tf.float32
# It seems quite space inefficient to convert bool to int64
# We may want to add support for complex, quantize dtype in the future
# TFRecord only support 3 types
if dtype not in (tf.int64, tf.float32, tf.string):
raise NotImplementedError(
'Serialization not implemented for {}'.format(dtype))
# Select the feature proto type in function of the unknown shape
if (tensor_info.shape is not None and # Shape is a sequence (None, ...)
tensor_info.shape.count(None) == 1 and
tensor_info.shape[0] is None):
return tf.io.FixedLenSequenceFeature(
shape=tensor_info.shape[1:],
dtype=dtype,
allow_missing=True,
)
# At least one dimension is undefined
elif tensor_info.shape is None or None in tensor_info.shape:
return tf.io.VarLenFeature(dtype=dtype)
else:
return tf.io.FixedLenFeature(
shape=tensor_info.shape,
dtype=dtype,
)
|
python
|
def to_serialized_field(tensor_info):
"""Convert a `TensorInfo` object into a feature proto object."""
# Select the type
dtype = tensor_info.dtype
# TODO(b/119937875): TF Examples proto only support int64, float32 and string
# This create limitation like float64 downsampled to float32, bool converted
# to int64 which is space ineficient, no support for complexes or quantized
if tensor_info.dtype.is_integer or tensor_info.dtype.is_bool:
dtype = tf.int64
elif tensor_info.dtype.is_floating:
dtype = tf.float32
# It seems quite space inefficient to convert bool to int64
# We may want to add support for complex, quantize dtype in the future
# TFRecord only support 3 types
if dtype not in (tf.int64, tf.float32, tf.string):
raise NotImplementedError(
'Serialization not implemented for {}'.format(dtype))
# Select the feature proto type in function of the unknown shape
if (tensor_info.shape is not None and # Shape is a sequence (None, ...)
tensor_info.shape.count(None) == 1 and
tensor_info.shape[0] is None):
return tf.io.FixedLenSequenceFeature(
shape=tensor_info.shape[1:],
dtype=dtype,
allow_missing=True,
)
# At least one dimension is undefined
elif tensor_info.shape is None or None in tensor_info.shape:
return tf.io.VarLenFeature(dtype=dtype)
else:
return tf.io.FixedLenFeature(
shape=tensor_info.shape,
dtype=dtype,
)
|
[
"def",
"to_serialized_field",
"(",
"tensor_info",
")",
":",
"# Select the type",
"dtype",
"=",
"tensor_info",
".",
"dtype",
"# TODO(b/119937875): TF Examples proto only support int64, float32 and string",
"# This create limitation like float64 downsampled to float32, bool converted",
"# to int64 which is space ineficient, no support for complexes or quantized",
"if",
"tensor_info",
".",
"dtype",
".",
"is_integer",
"or",
"tensor_info",
".",
"dtype",
".",
"is_bool",
":",
"dtype",
"=",
"tf",
".",
"int64",
"elif",
"tensor_info",
".",
"dtype",
".",
"is_floating",
":",
"dtype",
"=",
"tf",
".",
"float32",
"# It seems quite space inefficient to convert bool to int64",
"# We may want to add support for complex, quantize dtype in the future",
"# TFRecord only support 3 types",
"if",
"dtype",
"not",
"in",
"(",
"tf",
".",
"int64",
",",
"tf",
".",
"float32",
",",
"tf",
".",
"string",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Serialization not implemented for {}'",
".",
"format",
"(",
"dtype",
")",
")",
"# Select the feature proto type in function of the unknown shape",
"if",
"(",
"tensor_info",
".",
"shape",
"is",
"not",
"None",
"and",
"# Shape is a sequence (None, ...)",
"tensor_info",
".",
"shape",
".",
"count",
"(",
"None",
")",
"==",
"1",
"and",
"tensor_info",
".",
"shape",
"[",
"0",
"]",
"is",
"None",
")",
":",
"return",
"tf",
".",
"io",
".",
"FixedLenSequenceFeature",
"(",
"shape",
"=",
"tensor_info",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"dtype",
",",
"allow_missing",
"=",
"True",
",",
")",
"# At least one dimension is undefined",
"elif",
"tensor_info",
".",
"shape",
"is",
"None",
"or",
"None",
"in",
"tensor_info",
".",
"shape",
":",
"return",
"tf",
".",
"io",
".",
"VarLenFeature",
"(",
"dtype",
"=",
"dtype",
")",
"else",
":",
"return",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"shape",
"=",
"tensor_info",
".",
"shape",
",",
"dtype",
"=",
"dtype",
",",
")"
] |
Convert a `TensorInfo` object into a feature proto object.
|
[
"Convert",
"a",
"TensorInfo",
"object",
"into",
"a",
"feature",
"proto",
"object",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L576-L612
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
to_feature
|
def to_feature(value):
"""Convert the given value to Feature if necessary."""
if isinstance(value, FeatureConnector):
return value
elif utils.is_dtype(value): # tf.int32, tf.string,...
return Tensor(shape=(), dtype=tf.as_dtype(value))
elif isinstance(value, dict):
return FeaturesDict(value)
else:
raise ValueError('Feature not supported: {}'.format(value))
|
python
|
def to_feature(value):
"""Convert the given value to Feature if necessary."""
if isinstance(value, FeatureConnector):
return value
elif utils.is_dtype(value): # tf.int32, tf.string,...
return Tensor(shape=(), dtype=tf.as_dtype(value))
elif isinstance(value, dict):
return FeaturesDict(value)
else:
raise ValueError('Feature not supported: {}'.format(value))
|
[
"def",
"to_feature",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"FeatureConnector",
")",
":",
"return",
"value",
"elif",
"utils",
".",
"is_dtype",
"(",
"value",
")",
":",
"# tf.int32, tf.string,...",
"return",
"Tensor",
"(",
"shape",
"=",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"as_dtype",
"(",
"value",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"FeaturesDict",
"(",
"value",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Feature not supported: {}'",
".",
"format",
"(",
"value",
")",
")"
] |
Convert the given value to Feature if necessary.
|
[
"Convert",
"the",
"given",
"value",
"to",
"Feature",
"if",
"necessary",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L615-L624
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
decode_single_feature_from_dict
|
def decode_single_feature_from_dict(
feature_k,
feature,
tfexample_dict):
"""Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict containing the data to decode.
Returns:
decoded_feature: The output of the feature.decode_example
"""
# Singleton case
if not feature.serialized_keys:
data_to_decode = tfexample_dict[feature_k]
# Feature contains sub features
else:
# Extract the sub-features from the global feature dict
data_to_decode = {
k: tfexample_dict[posixpath.join(feature_k, k)]
for k in feature.serialized_keys
}
return feature.decode_example(data_to_decode)
|
python
|
def decode_single_feature_from_dict(
feature_k,
feature,
tfexample_dict):
"""Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict containing the data to decode.
Returns:
decoded_feature: The output of the feature.decode_example
"""
# Singleton case
if not feature.serialized_keys:
data_to_decode = tfexample_dict[feature_k]
# Feature contains sub features
else:
# Extract the sub-features from the global feature dict
data_to_decode = {
k: tfexample_dict[posixpath.join(feature_k, k)]
for k in feature.serialized_keys
}
return feature.decode_example(data_to_decode)
|
[
"def",
"decode_single_feature_from_dict",
"(",
"feature_k",
",",
"feature",
",",
"tfexample_dict",
")",
":",
"# Singleton case",
"if",
"not",
"feature",
".",
"serialized_keys",
":",
"data_to_decode",
"=",
"tfexample_dict",
"[",
"feature_k",
"]",
"# Feature contains sub features",
"else",
":",
"# Extract the sub-features from the global feature dict",
"data_to_decode",
"=",
"{",
"k",
":",
"tfexample_dict",
"[",
"posixpath",
".",
"join",
"(",
"feature_k",
",",
"k",
")",
"]",
"for",
"k",
"in",
"feature",
".",
"serialized_keys",
"}",
"return",
"feature",
".",
"decode_example",
"(",
"data_to_decode",
")"
] |
Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict containing the data to decode.
Returns:
decoded_feature: The output of the feature.decode_example
|
[
"Decode",
"the",
"given",
"feature",
"from",
"the",
"tfexample_dict",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L627-L651
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
_assert_keys_match
|
def _assert_keys_match(keys1, keys2):
"""Ensure the two list of keys matches."""
if set(keys1) != set(keys2):
raise ValueError('{} {}'.format(list(keys1), list(keys2)))
|
python
|
def _assert_keys_match(keys1, keys2):
"""Ensure the two list of keys matches."""
if set(keys1) != set(keys2):
raise ValueError('{} {}'.format(list(keys1), list(keys2)))
|
[
"def",
"_assert_keys_match",
"(",
"keys1",
",",
"keys2",
")",
":",
"if",
"set",
"(",
"keys1",
")",
"!=",
"set",
"(",
"keys2",
")",
":",
"raise",
"ValueError",
"(",
"'{} {}'",
".",
"format",
"(",
"list",
"(",
"keys1",
")",
",",
"list",
"(",
"keys2",
")",
")",
")"
] |
Ensure the two list of keys matches.
|
[
"Ensure",
"the",
"two",
"list",
"of",
"keys",
"matches",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L654-L657
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
FeaturesDict.get_tensor_info
|
def get_tensor_info(self):
"""See base class for details."""
return {
feature_key: feature.get_tensor_info()
for feature_key, feature in self._feature_dict.items()
}
|
python
|
def get_tensor_info(self):
"""See base class for details."""
return {
feature_key: feature.get_tensor_info()
for feature_key, feature in self._feature_dict.items()
}
|
[
"def",
"get_tensor_info",
"(",
"self",
")",
":",
"return",
"{",
"feature_key",
":",
"feature",
".",
"get_tensor_info",
"(",
")",
"for",
"feature_key",
",",
"feature",
"in",
"self",
".",
"_feature_dict",
".",
"items",
"(",
")",
"}"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L437-L442
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
FeaturesDict.get_serialized_info
|
def get_serialized_info(self):
"""See base class for details."""
# Flatten tf-example features dict
# Use NonMutableDict to ensure there is no collision between features keys
features_dict = utils.NonMutableDict()
for feature_key, feature in self._feature_dict.items():
serialized_info = feature.get_serialized_info()
# Features can be either containers (dict of other features) or plain
# features (ex: single tensor). Plain features have a None
# feature.features_keys
if not feature.serialized_keys:
features_dict[feature_key] = serialized_info
else:
# Sanity check which should always be True, as feature.serialized_keys
# is computed using feature.get_serialized_info()
_assert_keys_match(serialized_info.keys(), feature.serialized_keys)
features_dict.update({
posixpath.join(feature_key, k): v
for k, v in serialized_info.items()
})
return features_dict
|
python
|
def get_serialized_info(self):
"""See base class for details."""
# Flatten tf-example features dict
# Use NonMutableDict to ensure there is no collision between features keys
features_dict = utils.NonMutableDict()
for feature_key, feature in self._feature_dict.items():
serialized_info = feature.get_serialized_info()
# Features can be either containers (dict of other features) or plain
# features (ex: single tensor). Plain features have a None
# feature.features_keys
if not feature.serialized_keys:
features_dict[feature_key] = serialized_info
else:
# Sanity check which should always be True, as feature.serialized_keys
# is computed using feature.get_serialized_info()
_assert_keys_match(serialized_info.keys(), feature.serialized_keys)
features_dict.update({
posixpath.join(feature_key, k): v
for k, v in serialized_info.items()
})
return features_dict
|
[
"def",
"get_serialized_info",
"(",
"self",
")",
":",
"# Flatten tf-example features dict",
"# Use NonMutableDict to ensure there is no collision between features keys",
"features_dict",
"=",
"utils",
".",
"NonMutableDict",
"(",
")",
"for",
"feature_key",
",",
"feature",
"in",
"self",
".",
"_feature_dict",
".",
"items",
"(",
")",
":",
"serialized_info",
"=",
"feature",
".",
"get_serialized_info",
"(",
")",
"# Features can be either containers (dict of other features) or plain",
"# features (ex: single tensor). Plain features have a None",
"# feature.features_keys",
"if",
"not",
"feature",
".",
"serialized_keys",
":",
"features_dict",
"[",
"feature_key",
"]",
"=",
"serialized_info",
"else",
":",
"# Sanity check which should always be True, as feature.serialized_keys",
"# is computed using feature.get_serialized_info()",
"_assert_keys_match",
"(",
"serialized_info",
".",
"keys",
"(",
")",
",",
"feature",
".",
"serialized_keys",
")",
"features_dict",
".",
"update",
"(",
"{",
"posixpath",
".",
"join",
"(",
"feature_key",
",",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"serialized_info",
".",
"items",
"(",
")",
"}",
")",
"return",
"features_dict"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L444-L466
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
FeaturesDict.encode_example
|
def encode_example(self, example_dict):
"""See base class for details."""
# Flatten dict matching the tf-example features
# Use NonMutableDict to ensure there is no collision between features keys
tfexample_dict = utils.NonMutableDict()
# Iterate over example fields
for feature_key, (feature, example_value) in utils.zip_dict(
self._feature_dict, example_dict):
# Encode the field with the associated encoder
encoded_feature = feature.encode_example(example_value)
# Singleton case
if not feature.serialized_keys:
tfexample_dict[feature_key] = encoded_feature
# Feature contains sub features
else:
_assert_keys_match(encoded_feature.keys(), feature.serialized_keys)
tfexample_dict.update({
posixpath.join(feature_key, k): encoded_feature[k]
for k in feature.serialized_keys
})
return tfexample_dict
|
python
|
def encode_example(self, example_dict):
"""See base class for details."""
# Flatten dict matching the tf-example features
# Use NonMutableDict to ensure there is no collision between features keys
tfexample_dict = utils.NonMutableDict()
# Iterate over example fields
for feature_key, (feature, example_value) in utils.zip_dict(
self._feature_dict, example_dict):
# Encode the field with the associated encoder
encoded_feature = feature.encode_example(example_value)
# Singleton case
if not feature.serialized_keys:
tfexample_dict[feature_key] = encoded_feature
# Feature contains sub features
else:
_assert_keys_match(encoded_feature.keys(), feature.serialized_keys)
tfexample_dict.update({
posixpath.join(feature_key, k): encoded_feature[k]
for k in feature.serialized_keys
})
return tfexample_dict
|
[
"def",
"encode_example",
"(",
"self",
",",
"example_dict",
")",
":",
"# Flatten dict matching the tf-example features",
"# Use NonMutableDict to ensure there is no collision between features keys",
"tfexample_dict",
"=",
"utils",
".",
"NonMutableDict",
"(",
")",
"# Iterate over example fields",
"for",
"feature_key",
",",
"(",
"feature",
",",
"example_value",
")",
"in",
"utils",
".",
"zip_dict",
"(",
"self",
".",
"_feature_dict",
",",
"example_dict",
")",
":",
"# Encode the field with the associated encoder",
"encoded_feature",
"=",
"feature",
".",
"encode_example",
"(",
"example_value",
")",
"# Singleton case",
"if",
"not",
"feature",
".",
"serialized_keys",
":",
"tfexample_dict",
"[",
"feature_key",
"]",
"=",
"encoded_feature",
"# Feature contains sub features",
"else",
":",
"_assert_keys_match",
"(",
"encoded_feature",
".",
"keys",
"(",
")",
",",
"feature",
".",
"serialized_keys",
")",
"tfexample_dict",
".",
"update",
"(",
"{",
"posixpath",
".",
"join",
"(",
"feature_key",
",",
"k",
")",
":",
"encoded_feature",
"[",
"k",
"]",
"for",
"k",
"in",
"feature",
".",
"serialized_keys",
"}",
")",
"return",
"tfexample_dict"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L468-L490
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
FeaturesDict.decode_example
|
def decode_example(self, tfexample_dict):
"""See base class for details."""
tensor_dict = {}
# Iterate over the Tensor dict keys
for feature_key, feature in six.iteritems(self._feature_dict):
decoded_feature = decode_single_feature_from_dict(
feature_k=feature_key,
feature=feature,
tfexample_dict=tfexample_dict,
)
tensor_dict[feature_key] = decoded_feature
return tensor_dict
|
python
|
def decode_example(self, tfexample_dict):
"""See base class for details."""
tensor_dict = {}
# Iterate over the Tensor dict keys
for feature_key, feature in six.iteritems(self._feature_dict):
decoded_feature = decode_single_feature_from_dict(
feature_k=feature_key,
feature=feature,
tfexample_dict=tfexample_dict,
)
tensor_dict[feature_key] = decoded_feature
return tensor_dict
|
[
"def",
"decode_example",
"(",
"self",
",",
"tfexample_dict",
")",
":",
"tensor_dict",
"=",
"{",
"}",
"# Iterate over the Tensor dict keys",
"for",
"feature_key",
",",
"feature",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_feature_dict",
")",
":",
"decoded_feature",
"=",
"decode_single_feature_from_dict",
"(",
"feature_k",
"=",
"feature_key",
",",
"feature",
"=",
"feature",
",",
"tfexample_dict",
"=",
"tfexample_dict",
",",
")",
"tensor_dict",
"[",
"feature_key",
"]",
"=",
"decoded_feature",
"return",
"tensor_dict"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L492-L503
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
FeaturesDict.save_metadata
|
def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Recursively save all child features
for feature_key, feature in six.iteritems(self._feature_dict):
if feature_name:
feature_key = '-'.join((feature_name, feature_key))
feature.save_metadata(data_dir, feature_name=feature_key)
|
python
|
def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Recursively save all child features
for feature_key, feature in six.iteritems(self._feature_dict):
if feature_name:
feature_key = '-'.join((feature_name, feature_key))
feature.save_metadata(data_dir, feature_name=feature_key)
|
[
"def",
"save_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"# Recursively save all child features",
"for",
"feature_key",
",",
"feature",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_feature_dict",
")",
":",
"if",
"feature_name",
":",
"feature_key",
"=",
"'-'",
".",
"join",
"(",
"(",
"feature_name",
",",
"feature_key",
")",
")",
"feature",
".",
"save_metadata",
"(",
"data_dir",
",",
"feature_name",
"=",
"feature_key",
")"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L508-L514
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
Tensor.encode_example
|
def encode_example(self, example_data):
"""See base class for details."""
np_dtype = np.dtype(self._dtype.as_numpy_dtype)
# Convert to numpy if possible
if not isinstance(example_data, np.ndarray):
example_data = np.array(example_data, dtype=np_dtype)
# Ensure the shape and dtype match
if example_data.dtype != np_dtype:
raise ValueError('Dtype {} do not match {}'.format(
example_data.dtype, np_dtype))
utils.assert_shape_match(example_data.shape, self._shape)
# For booleans, convert to integer (tf.train.Example does not support bool)
if example_data.dtype == np.bool_:
example_data = example_data.astype(int)
return example_data
|
python
|
def encode_example(self, example_data):
"""See base class for details."""
np_dtype = np.dtype(self._dtype.as_numpy_dtype)
# Convert to numpy if possible
if not isinstance(example_data, np.ndarray):
example_data = np.array(example_data, dtype=np_dtype)
# Ensure the shape and dtype match
if example_data.dtype != np_dtype:
raise ValueError('Dtype {} do not match {}'.format(
example_data.dtype, np_dtype))
utils.assert_shape_match(example_data.shape, self._shape)
# For booleans, convert to integer (tf.train.Example does not support bool)
if example_data.dtype == np.bool_:
example_data = example_data.astype(int)
return example_data
|
[
"def",
"encode_example",
"(",
"self",
",",
"example_data",
")",
":",
"np_dtype",
"=",
"np",
".",
"dtype",
"(",
"self",
".",
"_dtype",
".",
"as_numpy_dtype",
")",
"# Convert to numpy if possible",
"if",
"not",
"isinstance",
"(",
"example_data",
",",
"np",
".",
"ndarray",
")",
":",
"example_data",
"=",
"np",
".",
"array",
"(",
"example_data",
",",
"dtype",
"=",
"np_dtype",
")",
"# Ensure the shape and dtype match",
"if",
"example_data",
".",
"dtype",
"!=",
"np_dtype",
":",
"raise",
"ValueError",
"(",
"'Dtype {} do not match {}'",
".",
"format",
"(",
"example_data",
".",
"dtype",
",",
"np_dtype",
")",
")",
"utils",
".",
"assert_shape_match",
"(",
"example_data",
".",
"shape",
",",
"self",
".",
"_shape",
")",
"# For booleans, convert to integer (tf.train.Example does not support bool)",
"if",
"example_data",
".",
"dtype",
"==",
"np",
".",
"bool_",
":",
"example_data",
"=",
"example_data",
".",
"astype",
"(",
"int",
")",
"return",
"example_data"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L548-L562
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/feature.py
|
Tensor.decode_example
|
def decode_example(self, tfexample_data):
"""See base class for details."""
# TODO(epot): Support dynamic shape
if self.shape.count(None) < 2:
# Restore the shape if possible. TF Example flattened it.
shape = [-1 if i is None else i for i in self.shape]
tfexample_data = tf.reshape(tfexample_data, shape)
if tfexample_data.dtype != self.dtype:
tfexample_data = tf.dtypes.cast(tfexample_data, self.dtype)
return tfexample_data
|
python
|
def decode_example(self, tfexample_data):
"""See base class for details."""
# TODO(epot): Support dynamic shape
if self.shape.count(None) < 2:
# Restore the shape if possible. TF Example flattened it.
shape = [-1 if i is None else i for i in self.shape]
tfexample_data = tf.reshape(tfexample_data, shape)
if tfexample_data.dtype != self.dtype:
tfexample_data = tf.dtypes.cast(tfexample_data, self.dtype)
return tfexample_data
|
[
"def",
"decode_example",
"(",
"self",
",",
"tfexample_data",
")",
":",
"# TODO(epot): Support dynamic shape",
"if",
"self",
".",
"shape",
".",
"count",
"(",
"None",
")",
"<",
"2",
":",
"# Restore the shape if possible. TF Example flattened it.",
"shape",
"=",
"[",
"-",
"1",
"if",
"i",
"is",
"None",
"else",
"i",
"for",
"i",
"in",
"self",
".",
"shape",
"]",
"tfexample_data",
"=",
"tf",
".",
"reshape",
"(",
"tfexample_data",
",",
"shape",
")",
"if",
"tfexample_data",
".",
"dtype",
"!=",
"self",
".",
"dtype",
":",
"tfexample_data",
"=",
"tf",
".",
"dtypes",
".",
"cast",
"(",
"tfexample_data",
",",
"self",
".",
"dtype",
")",
"return",
"tfexample_data"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L564-L573
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/celeba.py
|
CelebA._process_celeba_config_file
|
def _process_celeba_config_file(self, file_path):
"""Unpack the celeba config file.
The file starts with the number of lines, and a header.
Afterwards, there is a configuration for each file: one per line.
Args:
file_path: Path to the file with the configuration.
Returns:
keys: names of the attributes
values: map from the file name to the list of attribute values for
this file.
"""
with tf.io.gfile.GFile(file_path) as f:
data_raw = f.read()
lines = data_raw.split("\n")
keys = lines[1].strip().split()
values = {}
# Go over each line (skip the last one, as it is empty).
for line in lines[2:-1]:
row_values = line.strip().split()
# Each row start with the 'file_name' and then space-separated values.
values[row_values[0]] = [int(v) for v in row_values[1:]]
return keys, values
|
python
|
def _process_celeba_config_file(self, file_path):
"""Unpack the celeba config file.
The file starts with the number of lines, and a header.
Afterwards, there is a configuration for each file: one per line.
Args:
file_path: Path to the file with the configuration.
Returns:
keys: names of the attributes
values: map from the file name to the list of attribute values for
this file.
"""
with tf.io.gfile.GFile(file_path) as f:
data_raw = f.read()
lines = data_raw.split("\n")
keys = lines[1].strip().split()
values = {}
# Go over each line (skip the last one, as it is empty).
for line in lines[2:-1]:
row_values = line.strip().split()
# Each row start with the 'file_name' and then space-separated values.
values[row_values[0]] = [int(v) for v in row_values[1:]]
return keys, values
|
[
"def",
"_process_celeba_config_file",
"(",
"self",
",",
"file_path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"file_path",
")",
"as",
"f",
":",
"data_raw",
"=",
"f",
".",
"read",
"(",
")",
"lines",
"=",
"data_raw",
".",
"split",
"(",
"\"\\n\"",
")",
"keys",
"=",
"lines",
"[",
"1",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"values",
"=",
"{",
"}",
"# Go over each line (skip the last one, as it is empty).",
"for",
"line",
"in",
"lines",
"[",
"2",
":",
"-",
"1",
"]",
":",
"row_values",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Each row start with the 'file_name' and then space-separated values.",
"values",
"[",
"row_values",
"[",
"0",
"]",
"]",
"=",
"[",
"int",
"(",
"v",
")",
"for",
"v",
"in",
"row_values",
"[",
"1",
":",
"]",
"]",
"return",
"keys",
",",
"values"
] |
Unpack the celeba config file.
The file starts with the number of lines, and a header.
Afterwards, there is a configuration for each file: one per line.
Args:
file_path: Path to the file with the configuration.
Returns:
keys: names of the attributes
values: map from the file name to the list of attribute values for
this file.
|
[
"Unpack",
"the",
"celeba",
"config",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/celeba.py#L150-L175
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/celeba.py
|
CelebA._generate_examples
|
def _generate_examples(self, file_id, extracted_dirs):
"""Yields examples."""
filedir = os.path.join(extracted_dirs["img_align_celeba"],
"img_align_celeba")
img_list_path = extracted_dirs["list_eval_partition"]
landmarks_path = extracted_dirs["landmarks_celeba"]
attr_path = extracted_dirs["list_attr_celeba"]
with tf.io.gfile.GFile(img_list_path) as f:
files = [
line.split()[0]
for line in f.readlines()
if int(line.split()[1]) == file_id
]
attributes = self._process_celeba_config_file(attr_path)
landmarks = self._process_celeba_config_file(landmarks_path)
for file_name in sorted(files):
path = os.path.join(filedir, file_name)
yield {
"image": path,
"landmarks": {
k: v for k, v in zip(landmarks[0], landmarks[1][file_name])
},
"attributes": {
# atributes value are either 1 or -1, so convert to bool
k: v > 0 for k, v in zip(attributes[0], attributes[1][file_name])
},
}
|
python
|
def _generate_examples(self, file_id, extracted_dirs):
"""Yields examples."""
filedir = os.path.join(extracted_dirs["img_align_celeba"],
"img_align_celeba")
img_list_path = extracted_dirs["list_eval_partition"]
landmarks_path = extracted_dirs["landmarks_celeba"]
attr_path = extracted_dirs["list_attr_celeba"]
with tf.io.gfile.GFile(img_list_path) as f:
files = [
line.split()[0]
for line in f.readlines()
if int(line.split()[1]) == file_id
]
attributes = self._process_celeba_config_file(attr_path)
landmarks = self._process_celeba_config_file(landmarks_path)
for file_name in sorted(files):
path = os.path.join(filedir, file_name)
yield {
"image": path,
"landmarks": {
k: v for k, v in zip(landmarks[0], landmarks[1][file_name])
},
"attributes": {
# atributes value are either 1 or -1, so convert to bool
k: v > 0 for k, v in zip(attributes[0], attributes[1][file_name])
},
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"file_id",
",",
"extracted_dirs",
")",
":",
"filedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"extracted_dirs",
"[",
"\"img_align_celeba\"",
"]",
",",
"\"img_align_celeba\"",
")",
"img_list_path",
"=",
"extracted_dirs",
"[",
"\"list_eval_partition\"",
"]",
"landmarks_path",
"=",
"extracted_dirs",
"[",
"\"landmarks_celeba\"",
"]",
"attr_path",
"=",
"extracted_dirs",
"[",
"\"list_attr_celeba\"",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"img_list_path",
")",
"as",
"f",
":",
"files",
"=",
"[",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
"if",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"1",
"]",
")",
"==",
"file_id",
"]",
"attributes",
"=",
"self",
".",
"_process_celeba_config_file",
"(",
"attr_path",
")",
"landmarks",
"=",
"self",
".",
"_process_celeba_config_file",
"(",
"landmarks_path",
")",
"for",
"file_name",
"in",
"sorted",
"(",
"files",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filedir",
",",
"file_name",
")",
"yield",
"{",
"\"image\"",
":",
"path",
",",
"\"landmarks\"",
":",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"landmarks",
"[",
"0",
"]",
",",
"landmarks",
"[",
"1",
"]",
"[",
"file_name",
"]",
")",
"}",
",",
"\"attributes\"",
":",
"{",
"# atributes value are either 1 or -1, so convert to bool",
"k",
":",
"v",
">",
"0",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"attributes",
"[",
"0",
"]",
",",
"attributes",
"[",
"1",
"]",
"[",
"file_name",
"]",
")",
"}",
",",
"}"
] |
Yields examples.
|
[
"Yields",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/celeba.py#L177-L207
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/quickdraw.py
|
QuickdrawBitmap._generate_examples
|
def _generate_examples(self, file_paths):
"""Generate QuickDraw bitmap examples.
Given a list of file paths with data for each class label, generate examples
in a random order.
Args:
file_paths: (dict of {str: str}) the paths to files containing the data,
indexed by label.
Yields:
The QuickDraw examples, as defined in the dataset info features.
"""
for label, path in sorted(file_paths.items(), key=lambda x: x[0]):
with tf.io.gfile.GFile(path, "rb") as f:
class_images = np.load(f)
for np_image in class_images:
yield {
"image": np_image.reshape(_QUICKDRAW_IMAGE_SHAPE),
"label": label,
}
|
python
|
def _generate_examples(self, file_paths):
"""Generate QuickDraw bitmap examples.
Given a list of file paths with data for each class label, generate examples
in a random order.
Args:
file_paths: (dict of {str: str}) the paths to files containing the data,
indexed by label.
Yields:
The QuickDraw examples, as defined in the dataset info features.
"""
for label, path in sorted(file_paths.items(), key=lambda x: x[0]):
with tf.io.gfile.GFile(path, "rb") as f:
class_images = np.load(f)
for np_image in class_images:
yield {
"image": np_image.reshape(_QUICKDRAW_IMAGE_SHAPE),
"label": label,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"file_paths",
")",
":",
"for",
"label",
",",
"path",
"in",
"sorted",
"(",
"file_paths",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"class_images",
"=",
"np",
".",
"load",
"(",
"f",
")",
"for",
"np_image",
"in",
"class_images",
":",
"yield",
"{",
"\"image\"",
":",
"np_image",
".",
"reshape",
"(",
"_QUICKDRAW_IMAGE_SHAPE",
")",
",",
"\"label\"",
":",
"label",
",",
"}"
] |
Generate QuickDraw bitmap examples.
Given a list of file paths with data for each class label, generate examples
in a random order.
Args:
file_paths: (dict of {str: str}) the paths to files containing the data,
indexed by label.
Yields:
The QuickDraw examples, as defined in the dataset info features.
|
[
"Generate",
"QuickDraw",
"bitmap",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/quickdraw.py#L97-L117
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/tf_compat.py
|
ensure_tf_install
|
def ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not "
"installed by default when you install TensorFlow Datasets. This is "
"so that users can decide whether to install the GPU-enabled "
"TensorFlow package. To use TensorFlow Datasets, please install the "
"most recent version of TensorFlow, by following instructions at "
"https://tensorflow.org/install.\n\n")
raise
tf_version = distutils.version.LooseVersion(tf.__version__)
v_1_12 = distutils.version.LooseVersion("1.12.0")
if tf_version < v_1_12:
raise ImportError(
"This version of TensorFlow Datasets requires TensorFlow "
"version >= {required}; Detected an installation of version {present}. "
"Please upgrade TensorFlow to proceed.".format(
required="1.12.0",
present=tf.__version__))
_patch_tf(tf)
|
python
|
def ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not "
"installed by default when you install TensorFlow Datasets. This is "
"so that users can decide whether to install the GPU-enabled "
"TensorFlow package. To use TensorFlow Datasets, please install the "
"most recent version of TensorFlow, by following instructions at "
"https://tensorflow.org/install.\n\n")
raise
tf_version = distutils.version.LooseVersion(tf.__version__)
v_1_12 = distutils.version.LooseVersion("1.12.0")
if tf_version < v_1_12:
raise ImportError(
"This version of TensorFlow Datasets requires TensorFlow "
"version >= {required}; Detected an installation of version {present}. "
"Please upgrade TensorFlow to proceed.".format(
required="1.12.0",
present=tf.__version__))
_patch_tf(tf)
|
[
"def",
"ensure_tf_install",
"(",
")",
":",
"# pylint: disable=g-statement-before-imports",
"try",
":",
"import",
"tensorflow",
"as",
"tf",
"except",
"ImportError",
":",
"# Print more informative error message, then reraise.",
"print",
"(",
"\"\\n\\nFailed to import TensorFlow. Please note that TensorFlow is not \"",
"\"installed by default when you install TensorFlow Datasets. This is \"",
"\"so that users can decide whether to install the GPU-enabled \"",
"\"TensorFlow package. To use TensorFlow Datasets, please install the \"",
"\"most recent version of TensorFlow, by following instructions at \"",
"\"https://tensorflow.org/install.\\n\\n\"",
")",
"raise",
"tf_version",
"=",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"tf",
".",
"__version__",
")",
"v_1_12",
"=",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"\"1.12.0\"",
")",
"if",
"tf_version",
"<",
"v_1_12",
":",
"raise",
"ImportError",
"(",
"\"This version of TensorFlow Datasets requires TensorFlow \"",
"\"version >= {required}; Detected an installation of version {present}. \"",
"\"Please upgrade TensorFlow to proceed.\"",
".",
"format",
"(",
"required",
"=",
"\"1.12.0\"",
",",
"present",
"=",
"tf",
".",
"__version__",
")",
")",
"_patch_tf",
"(",
"tf",
")"
] |
Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
|
[
"Attempt",
"to",
"import",
"tensorflow",
"and",
"ensure",
"its",
"version",
"is",
"sufficient",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L39-L67
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/tf_compat.py
|
_patch_tf
|
def _patch_tf(tf):
"""Patch TF to maintain compatibility across versions."""
global TF_PATCH
if TF_PATCH:
return
v_1_12 = distutils.version.LooseVersion("1.12.0")
v_1_13 = distutils.version.LooseVersion("1.13.0")
v_2 = distutils.version.LooseVersion("2.0.0")
tf_version = distutils.version.LooseVersion(tf.__version__)
if v_1_12 <= tf_version < v_1_13:
# TODO(b/123930850): remove when 1.13 is stable.
TF_PATCH = "tf1_12"
_patch_for_tf1_12(tf)
elif v_1_13 <= tf_version < v_2:
TF_PATCH = "tf1_13"
_patch_for_tf1_13(tf)
else:
TF_PATCH = "tf2"
_patch_for_tf2(tf)
|
python
|
def _patch_tf(tf):
"""Patch TF to maintain compatibility across versions."""
global TF_PATCH
if TF_PATCH:
return
v_1_12 = distutils.version.LooseVersion("1.12.0")
v_1_13 = distutils.version.LooseVersion("1.13.0")
v_2 = distutils.version.LooseVersion("2.0.0")
tf_version = distutils.version.LooseVersion(tf.__version__)
if v_1_12 <= tf_version < v_1_13:
# TODO(b/123930850): remove when 1.13 is stable.
TF_PATCH = "tf1_12"
_patch_for_tf1_12(tf)
elif v_1_13 <= tf_version < v_2:
TF_PATCH = "tf1_13"
_patch_for_tf1_13(tf)
else:
TF_PATCH = "tf2"
_patch_for_tf2(tf)
|
[
"def",
"_patch_tf",
"(",
"tf",
")",
":",
"global",
"TF_PATCH",
"if",
"TF_PATCH",
":",
"return",
"v_1_12",
"=",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"\"1.12.0\"",
")",
"v_1_13",
"=",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"\"1.13.0\"",
")",
"v_2",
"=",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"\"2.0.0\"",
")",
"tf_version",
"=",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"tf",
".",
"__version__",
")",
"if",
"v_1_12",
"<=",
"tf_version",
"<",
"v_1_13",
":",
"# TODO(b/123930850): remove when 1.13 is stable.",
"TF_PATCH",
"=",
"\"tf1_12\"",
"_patch_for_tf1_12",
"(",
"tf",
")",
"elif",
"v_1_13",
"<=",
"tf_version",
"<",
"v_2",
":",
"TF_PATCH",
"=",
"\"tf1_13\"",
"_patch_for_tf1_13",
"(",
"tf",
")",
"else",
":",
"TF_PATCH",
"=",
"\"tf2\"",
"_patch_for_tf2",
"(",
"tf",
")"
] |
Patch TF to maintain compatibility across versions.
|
[
"Patch",
"TF",
"to",
"maintain",
"compatibility",
"across",
"versions",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L70-L89
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/tf_compat.py
|
_patch_for_tf1_12
|
def _patch_for_tf1_12(tf):
"""Monkey patch tf 1.12 so tfds can use it."""
tf.io.gfile = tf.gfile
tf.io.gfile.copy = tf.gfile.Copy
tf.io.gfile.exists = tf.gfile.Exists
tf.io.gfile.glob = tf.gfile.Glob
tf.io.gfile.isdir = tf.gfile.IsDirectory
tf.io.gfile.listdir = tf.gfile.ListDirectory
tf.io.gfile.makedirs = tf.gfile.MakeDirs
tf.io.gfile.mkdir = tf.gfile.MkDir
tf.io.gfile.remove = tf.gfile.Remove
tf.io.gfile.rename = tf.gfile.Rename
tf.io.gfile.rmtree = tf.gfile.DeleteRecursively
tf.io.gfile.stat = tf.gfile.Stat
tf.io.gfile.walk = tf.gfile.Walk
tf.io.gfile.GFile = tf.gfile.GFile
tf.data.experimental = tf.contrib.data
tf.compat.v1 = types.ModuleType("tf.compat.v1")
tf.compat.v1.assert_greater = tf.assert_greater
tf.compat.v1.placeholder = tf.placeholder
tf.compat.v1.ConfigProto = tf.ConfigProto
tf.compat.v1.Session = tf.Session
tf.compat.v1.enable_eager_execution = tf.enable_eager_execution
tf.compat.v1.io = tf.io
tf.compat.v1.data = tf.data
tf.compat.v1.data.Dataset = tf.data.Dataset
tf.compat.v1.data.make_one_shot_iterator = (
lambda ds: ds.make_one_shot_iterator())
tf.compat.v1.train = tf.train
tf.compat.v1.global_variables_initializer = tf.global_variables_initializer
tf.compat.v1.test = tf.test
tf.compat.v1.test.get_temp_dir = tf.test.get_temp_dir
tf.nest = tf.contrib.framework.nest
|
python
|
def _patch_for_tf1_12(tf):
"""Monkey patch tf 1.12 so tfds can use it."""
tf.io.gfile = tf.gfile
tf.io.gfile.copy = tf.gfile.Copy
tf.io.gfile.exists = tf.gfile.Exists
tf.io.gfile.glob = tf.gfile.Glob
tf.io.gfile.isdir = tf.gfile.IsDirectory
tf.io.gfile.listdir = tf.gfile.ListDirectory
tf.io.gfile.makedirs = tf.gfile.MakeDirs
tf.io.gfile.mkdir = tf.gfile.MkDir
tf.io.gfile.remove = tf.gfile.Remove
tf.io.gfile.rename = tf.gfile.Rename
tf.io.gfile.rmtree = tf.gfile.DeleteRecursively
tf.io.gfile.stat = tf.gfile.Stat
tf.io.gfile.walk = tf.gfile.Walk
tf.io.gfile.GFile = tf.gfile.GFile
tf.data.experimental = tf.contrib.data
tf.compat.v1 = types.ModuleType("tf.compat.v1")
tf.compat.v1.assert_greater = tf.assert_greater
tf.compat.v1.placeholder = tf.placeholder
tf.compat.v1.ConfigProto = tf.ConfigProto
tf.compat.v1.Session = tf.Session
tf.compat.v1.enable_eager_execution = tf.enable_eager_execution
tf.compat.v1.io = tf.io
tf.compat.v1.data = tf.data
tf.compat.v1.data.Dataset = tf.data.Dataset
tf.compat.v1.data.make_one_shot_iterator = (
lambda ds: ds.make_one_shot_iterator())
tf.compat.v1.train = tf.train
tf.compat.v1.global_variables_initializer = tf.global_variables_initializer
tf.compat.v1.test = tf.test
tf.compat.v1.test.get_temp_dir = tf.test.get_temp_dir
tf.nest = tf.contrib.framework.nest
|
[
"def",
"_patch_for_tf1_12",
"(",
"tf",
")",
":",
"tf",
".",
"io",
".",
"gfile",
"=",
"tf",
".",
"gfile",
"tf",
".",
"io",
".",
"gfile",
".",
"copy",
"=",
"tf",
".",
"gfile",
".",
"Copy",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"=",
"tf",
".",
"gfile",
".",
"Exists",
"tf",
".",
"io",
".",
"gfile",
".",
"glob",
"=",
"tf",
".",
"gfile",
".",
"Glob",
"tf",
".",
"io",
".",
"gfile",
".",
"isdir",
"=",
"tf",
".",
"gfile",
".",
"IsDirectory",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"=",
"tf",
".",
"gfile",
".",
"ListDirectory",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"=",
"tf",
".",
"gfile",
".",
"MakeDirs",
"tf",
".",
"io",
".",
"gfile",
".",
"mkdir",
"=",
"tf",
".",
"gfile",
".",
"MkDir",
"tf",
".",
"io",
".",
"gfile",
".",
"remove",
"=",
"tf",
".",
"gfile",
".",
"Remove",
"tf",
".",
"io",
".",
"gfile",
".",
"rename",
"=",
"tf",
".",
"gfile",
".",
"Rename",
"tf",
".",
"io",
".",
"gfile",
".",
"rmtree",
"=",
"tf",
".",
"gfile",
".",
"DeleteRecursively",
"tf",
".",
"io",
".",
"gfile",
".",
"stat",
"=",
"tf",
".",
"gfile",
".",
"Stat",
"tf",
".",
"io",
".",
"gfile",
".",
"walk",
"=",
"tf",
".",
"gfile",
".",
"Walk",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"=",
"tf",
".",
"gfile",
".",
"GFile",
"tf",
".",
"data",
".",
"experimental",
"=",
"tf",
".",
"contrib",
".",
"data",
"tf",
".",
"compat",
".",
"v1",
"=",
"types",
".",
"ModuleType",
"(",
"\"tf.compat.v1\"",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_greater",
"=",
"tf",
".",
"assert_greater",
"tf",
".",
"compat",
".",
"v1",
".",
"placeholder",
"=",
"tf",
".",
"placeholder",
"tf",
".",
"compat",
".",
"v1",
".",
"ConfigProto",
"=",
"tf",
".",
"ConfigProto",
"tf",
".",
"compat",
".",
"v1",
".",
"Session",
"=",
"tf",
".",
"Session",
"tf",
".",
"compat",
".",
"v1",
".",
"enable_eager_execution",
"=",
"tf",
".",
"enable_eager_execution",
"tf",
".",
"compat",
".",
"v1",
".",
"io",
"=",
"tf",
".",
"io",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
"=",
"tf",
".",
"data",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"Dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"=",
"(",
"lambda",
"ds",
":",
"ds",
".",
"make_one_shot_iterator",
"(",
")",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"train",
"=",
"tf",
".",
"train",
"tf",
".",
"compat",
".",
"v1",
".",
"global_variables_initializer",
"=",
"tf",
".",
"global_variables_initializer",
"tf",
".",
"compat",
".",
"v1",
".",
"test",
"=",
"tf",
".",
"test",
"tf",
".",
"compat",
".",
"v1",
".",
"test",
".",
"get_temp_dir",
"=",
"tf",
".",
"test",
".",
"get_temp_dir",
"tf",
".",
"nest",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"nest"
] |
Monkey patch tf 1.12 so tfds can use it.
|
[
"Monkey",
"patch",
"tf",
"1",
".",
"12",
"so",
"tfds",
"can",
"use",
"it",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L100-L132
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/tf_compat.py
|
_patch_for_tf1_13
|
def _patch_for_tf1_13(tf):
"""Monkey patch tf 1.13 so tfds can use it."""
if not hasattr(tf.io.gfile, "GFile"):
tf.io.gfile.GFile = tf.gfile.GFile
if not hasattr(tf, "nest"):
tf.nest = tf.contrib.framework.nest
if not hasattr(tf.compat, "v2"):
tf.compat.v2 = types.ModuleType("tf.compat.v2")
tf.compat.v2.data = types.ModuleType("tf.compat.v2.data")
from tensorflow.python.data.ops import dataset_ops
tf.compat.v2.data.Dataset = dataset_ops.DatasetV2
if not hasattr(tf.compat.v2.data.Dataset, "output_shapes"):
from tensorflow.python.data.ops import dataset_ops
if hasattr(dataset_ops, "get_legacy_output_shapes"):
tf.compat.v2.data.Dataset.output_shapes = property(
dataset_ops.get_legacy_output_shapes)
tf.compat.v2.data.Dataset.output_types = property(
dataset_ops.get_legacy_output_types)
|
python
|
def _patch_for_tf1_13(tf):
"""Monkey patch tf 1.13 so tfds can use it."""
if not hasattr(tf.io.gfile, "GFile"):
tf.io.gfile.GFile = tf.gfile.GFile
if not hasattr(tf, "nest"):
tf.nest = tf.contrib.framework.nest
if not hasattr(tf.compat, "v2"):
tf.compat.v2 = types.ModuleType("tf.compat.v2")
tf.compat.v2.data = types.ModuleType("tf.compat.v2.data")
from tensorflow.python.data.ops import dataset_ops
tf.compat.v2.data.Dataset = dataset_ops.DatasetV2
if not hasattr(tf.compat.v2.data.Dataset, "output_shapes"):
from tensorflow.python.data.ops import dataset_ops
if hasattr(dataset_ops, "get_legacy_output_shapes"):
tf.compat.v2.data.Dataset.output_shapes = property(
dataset_ops.get_legacy_output_shapes)
tf.compat.v2.data.Dataset.output_types = property(
dataset_ops.get_legacy_output_types)
|
[
"def",
"_patch_for_tf1_13",
"(",
"tf",
")",
":",
"if",
"not",
"hasattr",
"(",
"tf",
".",
"io",
".",
"gfile",
",",
"\"GFile\"",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"=",
"tf",
".",
"gfile",
".",
"GFile",
"if",
"not",
"hasattr",
"(",
"tf",
",",
"\"nest\"",
")",
":",
"tf",
".",
"nest",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"nest",
"if",
"not",
"hasattr",
"(",
"tf",
".",
"compat",
",",
"\"v2\"",
")",
":",
"tf",
".",
"compat",
".",
"v2",
"=",
"types",
".",
"ModuleType",
"(",
"\"tf.compat.v2\"",
")",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
"=",
"types",
".",
"ModuleType",
"(",
"\"tf.compat.v2.data\"",
")",
"from",
"tensorflow",
".",
"python",
".",
"data",
".",
"ops",
"import",
"dataset_ops",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
".",
"Dataset",
"=",
"dataset_ops",
".",
"DatasetV2",
"if",
"not",
"hasattr",
"(",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
".",
"Dataset",
",",
"\"output_shapes\"",
")",
":",
"from",
"tensorflow",
".",
"python",
".",
"data",
".",
"ops",
"import",
"dataset_ops",
"if",
"hasattr",
"(",
"dataset_ops",
",",
"\"get_legacy_output_shapes\"",
")",
":",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
".",
"Dataset",
".",
"output_shapes",
"=",
"property",
"(",
"dataset_ops",
".",
"get_legacy_output_shapes",
")",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
".",
"Dataset",
".",
"output_types",
"=",
"property",
"(",
"dataset_ops",
".",
"get_legacy_output_types",
")"
] |
Monkey patch tf 1.13 so tfds can use it.
|
[
"Monkey",
"patch",
"tf",
"1",
".",
"13",
"so",
"tfds",
"can",
"use",
"it",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L135-L152
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/tf_compat.py
|
is_dataset
|
def is_dataset(ds):
"""Whether ds is a Dataset. Compatible across TF versions."""
import tensorflow as tf
from tensorflow_datasets.core.utils import py_utils
dataset_types = [tf.data.Dataset]
v1_ds = py_utils.rgetattr(tf, "compat.v1.data.Dataset", None)
v2_ds = py_utils.rgetattr(tf, "compat.v2.data.Dataset", None)
if v1_ds is not None:
dataset_types.append(v1_ds)
if v2_ds is not None:
dataset_types.append(v2_ds)
return isinstance(ds, tuple(dataset_types))
|
python
|
def is_dataset(ds):
"""Whether ds is a Dataset. Compatible across TF versions."""
import tensorflow as tf
from tensorflow_datasets.core.utils import py_utils
dataset_types = [tf.data.Dataset]
v1_ds = py_utils.rgetattr(tf, "compat.v1.data.Dataset", None)
v2_ds = py_utils.rgetattr(tf, "compat.v2.data.Dataset", None)
if v1_ds is not None:
dataset_types.append(v1_ds)
if v2_ds is not None:
dataset_types.append(v2_ds)
return isinstance(ds, tuple(dataset_types))
|
[
"def",
"is_dataset",
"(",
"ds",
")",
":",
"import",
"tensorflow",
"as",
"tf",
"from",
"tensorflow_datasets",
".",
"core",
".",
"utils",
"import",
"py_utils",
"dataset_types",
"=",
"[",
"tf",
".",
"data",
".",
"Dataset",
"]",
"v1_ds",
"=",
"py_utils",
".",
"rgetattr",
"(",
"tf",
",",
"\"compat.v1.data.Dataset\"",
",",
"None",
")",
"v2_ds",
"=",
"py_utils",
".",
"rgetattr",
"(",
"tf",
",",
"\"compat.v2.data.Dataset\"",
",",
"None",
")",
"if",
"v1_ds",
"is",
"not",
"None",
":",
"dataset_types",
".",
"append",
"(",
"v1_ds",
")",
"if",
"v2_ds",
"is",
"not",
"None",
":",
"dataset_types",
".",
"append",
"(",
"v2_ds",
")",
"return",
"isinstance",
"(",
"ds",
",",
"tuple",
"(",
"dataset_types",
")",
")"
] |
Whether ds is a Dataset. Compatible across TF versions.
|
[
"Whether",
"ds",
"is",
"a",
"Dataset",
".",
"Compatible",
"across",
"TF",
"versions",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L155-L166
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/ted_multi.py
|
TedMultiTranslate._generate_examples
|
def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(data_file) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
# Everything in the row except for 'talk_name' will be a translation.
# Missing/incomplete translations will contain the string "__NULL__" or
# "_ _ NULL _ _".
yield {
'translations': {
lang: text
for lang, text in six.iteritems(row)
if lang != 'talk_name' and _is_translation_complete(text)
},
'talk_name': row['talk_name']
}
|
python
|
def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(data_file) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
# Everything in the row except for 'talk_name' will be a translation.
# Missing/incomplete translations will contain the string "__NULL__" or
# "_ _ NULL _ _".
yield {
'translations': {
lang: text
for lang, text in six.iteritems(row)
if lang != 'talk_name' and _is_translation_complete(text)
},
'talk_name': row['talk_name']
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"data_file",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"data_file",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_NONE",
")",
"for",
"row",
"in",
"reader",
":",
"# Everything in the row except for 'talk_name' will be a translation.",
"# Missing/incomplete translations will contain the string \"__NULL__\" or",
"# \"_ _ NULL _ _\".",
"yield",
"{",
"'translations'",
":",
"{",
"lang",
":",
"text",
"for",
"lang",
",",
"text",
"in",
"six",
".",
"iteritems",
"(",
"row",
")",
"if",
"lang",
"!=",
"'talk_name'",
"and",
"_is_translation_complete",
"(",
"text",
")",
"}",
",",
"'talk_name'",
":",
"row",
"[",
"'talk_name'",
"]",
"}"
] |
This function returns the examples in the raw (text) form.
|
[
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/ted_multi.py#L108-L123
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/multi_nli.py
|
MultiNLI._generate_examples
|
def _generate_examples(self, filepath):
"""Generate mnli examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
for idx, line in enumerate(tf.io.gfile.GFile(filepath, "rb")):
if idx == 0:
continue # skip header
line = tf.compat.as_text(line.strip())
split_line = line.split("\t")
# Examples not marked with a three out of five consensus are marked with
# "-" and should not be used in standard evaluations.
if split_line[0] == "-":
continue
# Works for both splits even though dev has some extra human labels.
yield {
"premise": split_line[5],
"hypothesis": split_line[6],
"label": split_line[0]
}
|
python
|
def _generate_examples(self, filepath):
"""Generate mnli examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
for idx, line in enumerate(tf.io.gfile.GFile(filepath, "rb")):
if idx == 0:
continue # skip header
line = tf.compat.as_text(line.strip())
split_line = line.split("\t")
# Examples not marked with a three out of five consensus are marked with
# "-" and should not be used in standard evaluations.
if split_line[0] == "-":
continue
# Works for both splits even though dev has some extra human labels.
yield {
"premise": split_line[5],
"hypothesis": split_line[6],
"label": split_line[0]
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"for",
"idx",
",",
"line",
"in",
"enumerate",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"\"rb\"",
")",
")",
":",
"if",
"idx",
"==",
"0",
":",
"continue",
"# skip header",
"line",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"line",
".",
"strip",
"(",
")",
")",
"split_line",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"# Examples not marked with a three out of five consensus are marked with",
"# \"-\" and should not be used in standard evaluations.",
"if",
"split_line",
"[",
"0",
"]",
"==",
"\"-\"",
":",
"continue",
"# Works for both splits even though dev has some extra human labels.",
"yield",
"{",
"\"premise\"",
":",
"split_line",
"[",
"5",
"]",
",",
"\"hypothesis\"",
":",
"split_line",
"[",
"6",
"]",
",",
"\"label\"",
":",
"split_line",
"[",
"0",
"]",
"}"
] |
Generate mnli examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
|
[
"Generate",
"mnli",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/multi_nli.py#L148-L171
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/image_folder.py
|
ImageLabelFolder._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators from the folder names."""
# At data creation time, parse the folder to deduce number of splits,
# labels, image size,
# The splits correspond to the high level folders
split_names = list_folders(dl_manager.manual_dir)
# Extract all label names and associated images
split_label_images = {} # dict[split_name][label_name] = list(img_paths)
for split_name in split_names:
split_dir = os.path.join(dl_manager.manual_dir, split_name)
split_label_images[split_name] = {
label_name: list_imgs(os.path.join(split_dir, label_name))
for label_name in list_folders(split_dir)
}
# Merge all label names from all splits to get the final list of labels
# Sorted list for determinism
labels = [split.keys() for split in split_label_images.values()]
labels = list(sorted(set(itertools.chain(*labels))))
# Could improve the automated encoding format detection
# Extract the list of all image paths
image_paths = [
image_paths
for label_images in split_label_images.values()
for image_paths in label_images.values()
]
if any(f.lower().endswith(".png") for f in itertools.chain(*image_paths)):
encoding_format = "png"
else:
encoding_format = "jpeg"
# Update the info.features. Those info will be automatically resored when
# the dataset is re-created
self.info.features["image"].set_encoding_format(encoding_format)
self.info.features["label"].names = labels
def num_examples(label_images):
return sum(len(imgs) for imgs in label_images.values())
# Define the splits
return [
tfds.core.SplitGenerator(
name=split_name,
# The number of shards is a dynamic function of the total
# number of images (between 0-10)
num_shards=min(10, max(num_examples(label_images) // 1000, 1)),
gen_kwargs=dict(label_images=label_images,),
) for split_name, label_images in split_label_images.items()
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators from the folder names."""
# At data creation time, parse the folder to deduce number of splits,
# labels, image size,
# The splits correspond to the high level folders
split_names = list_folders(dl_manager.manual_dir)
# Extract all label names and associated images
split_label_images = {} # dict[split_name][label_name] = list(img_paths)
for split_name in split_names:
split_dir = os.path.join(dl_manager.manual_dir, split_name)
split_label_images[split_name] = {
label_name: list_imgs(os.path.join(split_dir, label_name))
for label_name in list_folders(split_dir)
}
# Merge all label names from all splits to get the final list of labels
# Sorted list for determinism
labels = [split.keys() for split in split_label_images.values()]
labels = list(sorted(set(itertools.chain(*labels))))
# Could improve the automated encoding format detection
# Extract the list of all image paths
image_paths = [
image_paths
for label_images in split_label_images.values()
for image_paths in label_images.values()
]
if any(f.lower().endswith(".png") for f in itertools.chain(*image_paths)):
encoding_format = "png"
else:
encoding_format = "jpeg"
# Update the info.features. Those info will be automatically resored when
# the dataset is re-created
self.info.features["image"].set_encoding_format(encoding_format)
self.info.features["label"].names = labels
def num_examples(label_images):
return sum(len(imgs) for imgs in label_images.values())
# Define the splits
return [
tfds.core.SplitGenerator(
name=split_name,
# The number of shards is a dynamic function of the total
# number of images (between 0-10)
num_shards=min(10, max(num_examples(label_images) // 1000, 1)),
gen_kwargs=dict(label_images=label_images,),
) for split_name, label_images in split_label_images.items()
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"# At data creation time, parse the folder to deduce number of splits,",
"# labels, image size,",
"# The splits correspond to the high level folders",
"split_names",
"=",
"list_folders",
"(",
"dl_manager",
".",
"manual_dir",
")",
"# Extract all label names and associated images",
"split_label_images",
"=",
"{",
"}",
"# dict[split_name][label_name] = list(img_paths)",
"for",
"split_name",
"in",
"split_names",
":",
"split_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_manager",
".",
"manual_dir",
",",
"split_name",
")",
"split_label_images",
"[",
"split_name",
"]",
"=",
"{",
"label_name",
":",
"list_imgs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"split_dir",
",",
"label_name",
")",
")",
"for",
"label_name",
"in",
"list_folders",
"(",
"split_dir",
")",
"}",
"# Merge all label names from all splits to get the final list of labels",
"# Sorted list for determinism",
"labels",
"=",
"[",
"split",
".",
"keys",
"(",
")",
"for",
"split",
"in",
"split_label_images",
".",
"values",
"(",
")",
"]",
"labels",
"=",
"list",
"(",
"sorted",
"(",
"set",
"(",
"itertools",
".",
"chain",
"(",
"*",
"labels",
")",
")",
")",
")",
"# Could improve the automated encoding format detection",
"# Extract the list of all image paths",
"image_paths",
"=",
"[",
"image_paths",
"for",
"label_images",
"in",
"split_label_images",
".",
"values",
"(",
")",
"for",
"image_paths",
"in",
"label_images",
".",
"values",
"(",
")",
"]",
"if",
"any",
"(",
"f",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".png\"",
")",
"for",
"f",
"in",
"itertools",
".",
"chain",
"(",
"*",
"image_paths",
")",
")",
":",
"encoding_format",
"=",
"\"png\"",
"else",
":",
"encoding_format",
"=",
"\"jpeg\"",
"# Update the info.features. Those info will be automatically resored when",
"# the dataset is re-created",
"self",
".",
"info",
".",
"features",
"[",
"\"image\"",
"]",
".",
"set_encoding_format",
"(",
"encoding_format",
")",
"self",
".",
"info",
".",
"features",
"[",
"\"label\"",
"]",
".",
"names",
"=",
"labels",
"def",
"num_examples",
"(",
"label_images",
")",
":",
"return",
"sum",
"(",
"len",
"(",
"imgs",
")",
"for",
"imgs",
"in",
"label_images",
".",
"values",
"(",
")",
")",
"# Define the splits",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"split_name",
",",
"# The number of shards is a dynamic function of the total",
"# number of images (between 0-10)",
"num_shards",
"=",
"min",
"(",
"10",
",",
"max",
"(",
"num_examples",
"(",
"label_images",
")",
"//",
"1000",
",",
"1",
")",
")",
",",
"gen_kwargs",
"=",
"dict",
"(",
"label_images",
"=",
"label_images",
",",
")",
",",
")",
"for",
"split_name",
",",
"label_images",
"in",
"split_label_images",
".",
"items",
"(",
")",
"]"
] |
Returns SplitGenerators from the folder names.
|
[
"Returns",
"SplitGenerators",
"from",
"the",
"folder",
"names",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/image_folder.py#L103-L154
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/image_folder.py
|
ImageLabelFolder._generate_examples
|
def _generate_examples(self, label_images):
"""Generate example for each image in the dict."""
for label, image_paths in label_images.items():
for image_path in image_paths:
yield {
"image": image_path,
"label": label,
}
|
python
|
def _generate_examples(self, label_images):
"""Generate example for each image in the dict."""
for label, image_paths in label_images.items():
for image_path in image_paths:
yield {
"image": image_path,
"label": label,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"label_images",
")",
":",
"for",
"label",
",",
"image_paths",
"in",
"label_images",
".",
"items",
"(",
")",
":",
"for",
"image_path",
"in",
"image_paths",
":",
"yield",
"{",
"\"image\"",
":",
"image_path",
",",
"\"label\"",
":",
"label",
",",
"}"
] |
Generate example for each image in the dict.
|
[
"Generate",
"example",
"for",
"each",
"image",
"in",
"the",
"dict",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/image_folder.py#L156-L164
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/create_new_dataset.py
|
create_dataset_file
|
def create_dataset_file(root_dir, data):
"""Create a new dataset from a template."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}.py')
context = (
_HEADER + _DATASET_DEFAULT_IMPORTS + _CITATION
+ _DESCRIPTION + _DATASET_DEFAULTS
)
with gfile.GFile(file_path.format(**data), 'w') as f:
f.write(context.format(**data))
|
python
|
def create_dataset_file(root_dir, data):
"""Create a new dataset from a template."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}.py')
context = (
_HEADER + _DATASET_DEFAULT_IMPORTS + _CITATION
+ _DESCRIPTION + _DATASET_DEFAULTS
)
with gfile.GFile(file_path.format(**data), 'w') as f:
f.write(context.format(**data))
|
[
"def",
"create_dataset_file",
"(",
"root_dir",
",",
"data",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'{dataset_type}'",
",",
"'{dataset_name}.py'",
")",
"context",
"=",
"(",
"_HEADER",
"+",
"_DATASET_DEFAULT_IMPORTS",
"+",
"_CITATION",
"+",
"_DESCRIPTION",
"+",
"_DATASET_DEFAULTS",
")",
"with",
"gfile",
".",
"GFile",
"(",
"file_path",
".",
"format",
"(",
"*",
"*",
"data",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"context",
".",
"format",
"(",
"*",
"*",
"data",
")",
")"
] |
Create a new dataset from a template.
|
[
"Create",
"a",
"new",
"dataset",
"from",
"a",
"template",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/create_new_dataset.py#L155-L164
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/create_new_dataset.py
|
add_the_init
|
def add_the_init(root_dir, data):
"""Append the new dataset file to the __init__.py."""
init_file = os.path.join(root_dir, '{dataset_type}', '__init__.py')
context = (
'from tensorflow_datasets.{dataset_type}.{dataset_name} import '
'{dataset_cls} # {TODO} Sort alphabetically\n'
)
with gfile.GFile(init_file.format(**data), 'a') as f:
f.write(context.format(**data))
|
python
|
def add_the_init(root_dir, data):
"""Append the new dataset file to the __init__.py."""
init_file = os.path.join(root_dir, '{dataset_type}', '__init__.py')
context = (
'from tensorflow_datasets.{dataset_type}.{dataset_name} import '
'{dataset_cls} # {TODO} Sort alphabetically\n'
)
with gfile.GFile(init_file.format(**data), 'a') as f:
f.write(context.format(**data))
|
[
"def",
"add_the_init",
"(",
"root_dir",
",",
"data",
")",
":",
"init_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'{dataset_type}'",
",",
"'__init__.py'",
")",
"context",
"=",
"(",
"'from tensorflow_datasets.{dataset_type}.{dataset_name} import '",
"'{dataset_cls} # {TODO} Sort alphabetically\\n'",
")",
"with",
"gfile",
".",
"GFile",
"(",
"init_file",
".",
"format",
"(",
"*",
"*",
"data",
")",
",",
"'a'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"context",
".",
"format",
"(",
"*",
"*",
"data",
")",
")"
] |
Append the new dataset file to the __init__.py.
|
[
"Append",
"the",
"new",
"dataset",
"file",
"to",
"the",
"__init__",
".",
"py",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/create_new_dataset.py#L167-L175
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/svhn.py
|
SvhnCropped._generate_examples
|
def _generate_examples(self, filepath):
"""Generate examples as dicts.
Args:
filepath: `str` path of the file to process.
Yields:
Generator yielding the next samples
"""
with tf.io.gfile.GFile(filepath, "rb") as f:
data = tfds.core.lazy_imports.scipy.io.loadmat(f)
# Maybe should shuffle ?
assert np.max(data["y"]) <= 10 # Sanity check
assert np.min(data["y"]) > 0
for image, label in zip(np.rollaxis(data["X"], -1), data["y"]):
yield {
"image": image,
"label": label % 10, # digit 0 is saved as 0 (instead of 10)
}
|
python
|
def _generate_examples(self, filepath):
"""Generate examples as dicts.
Args:
filepath: `str` path of the file to process.
Yields:
Generator yielding the next samples
"""
with tf.io.gfile.GFile(filepath, "rb") as f:
data = tfds.core.lazy_imports.scipy.io.loadmat(f)
# Maybe should shuffle ?
assert np.max(data["y"]) <= 10 # Sanity check
assert np.min(data["y"]) > 0
for image, label in zip(np.rollaxis(data["X"], -1), data["y"]):
yield {
"image": image,
"label": label % 10, # digit 0 is saved as 0 (instead of 10)
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"data",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"scipy",
".",
"io",
".",
"loadmat",
"(",
"f",
")",
"# Maybe should shuffle ?",
"assert",
"np",
".",
"max",
"(",
"data",
"[",
"\"y\"",
"]",
")",
"<=",
"10",
"# Sanity check",
"assert",
"np",
".",
"min",
"(",
"data",
"[",
"\"y\"",
"]",
")",
">",
"0",
"for",
"image",
",",
"label",
"in",
"zip",
"(",
"np",
".",
"rollaxis",
"(",
"data",
"[",
"\"X\"",
"]",
",",
"-",
"1",
")",
",",
"data",
"[",
"\"y\"",
"]",
")",
":",
"yield",
"{",
"\"image\"",
":",
"image",
",",
"\"label\"",
":",
"label",
"%",
"10",
",",
"# digit 0 is saved as 0 (instead of 10)",
"}"
] |
Generate examples as dicts.
Args:
filepath: `str` path of the file to process.
Yields:
Generator yielding the next samples
|
[
"Generate",
"examples",
"as",
"dicts",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/svhn.py#L92-L113
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/chexpert.py
|
Chexpert._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
path = dl_manager.manual_dir
train_path = os.path.join(path, _TRAIN_DIR)
val_path = os.path.join(path, _VALIDATION_DIR)
if not tf.io.gfile.exists(train_path) or not tf.io.gfile.exists(val_path):
msg = ("You must download the dataset folder from CheXpert"
"website manually and place it into %s." % path)
raise AssertionError(msg)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=100,
gen_kwargs={
"imgs_path": path, # Relative img path is provided in csv
"csv_path": os.path.join(path, _TRAIN_LABELS_FNAME)
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=10,
gen_kwargs={
"imgs_path": path,
"csv_path": os.path.join(path, _VALIDATION_LABELS_FNAME)
},
),
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
path = dl_manager.manual_dir
train_path = os.path.join(path, _TRAIN_DIR)
val_path = os.path.join(path, _VALIDATION_DIR)
if not tf.io.gfile.exists(train_path) or not tf.io.gfile.exists(val_path):
msg = ("You must download the dataset folder from CheXpert"
"website manually and place it into %s." % path)
raise AssertionError(msg)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=100,
gen_kwargs={
"imgs_path": path, # Relative img path is provided in csv
"csv_path": os.path.join(path, _TRAIN_LABELS_FNAME)
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=10,
gen_kwargs={
"imgs_path": path,
"csv_path": os.path.join(path, _VALIDATION_LABELS_FNAME)
},
),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"path",
"=",
"dl_manager",
".",
"manual_dir",
"train_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"_TRAIN_DIR",
")",
"val_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"_VALIDATION_DIR",
")",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"train_path",
")",
"or",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"val_path",
")",
":",
"msg",
"=",
"(",
"\"You must download the dataset folder from CheXpert\"",
"\"website manually and place it into %s.\"",
"%",
"path",
")",
"raise",
"AssertionError",
"(",
"msg",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
",",
"num_shards",
"=",
"100",
",",
"gen_kwargs",
"=",
"{",
"\"imgs_path\"",
":",
"path",
",",
"# Relative img path is provided in csv",
"\"csv_path\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"_TRAIN_LABELS_FNAME",
")",
"}",
",",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"VALIDATION",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"{",
"\"imgs_path\"",
":",
"path",
",",
"\"csv_path\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"_VALIDATION_LABELS_FNAME",
")",
"}",
",",
")",
",",
"]"
] |
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/chexpert.py#L93-L121
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/chexpert.py
|
Chexpert._generate_examples
|
def _generate_examples(self, imgs_path, csv_path):
"""Yields examples."""
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
# Get keys for each label from csv
label_keys = reader.fieldnames[5:]
data = []
for row in reader:
# Get image based on indicated path in csv
name = row["Path"]
labels = [_LABELS[row[key]] for key in label_keys]
data.append((name, labels))
for name, labels in data:
yield {
"name": name,
"image": os.path.join(imgs_path, name),
"label": labels
}
|
python
|
def _generate_examples(self, imgs_path, csv_path):
"""Yields examples."""
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
# Get keys for each label from csv
label_keys = reader.fieldnames[5:]
data = []
for row in reader:
# Get image based on indicated path in csv
name = row["Path"]
labels = [_LABELS[row[key]] for key in label_keys]
data.append((name, labels))
for name, labels in data:
yield {
"name": name,
"image": os.path.join(imgs_path, name),
"label": labels
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"imgs_path",
",",
"csv_path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"csv_path",
")",
"as",
"csv_f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"csv_f",
")",
"# Get keys for each label from csv",
"label_keys",
"=",
"reader",
".",
"fieldnames",
"[",
"5",
":",
"]",
"data",
"=",
"[",
"]",
"for",
"row",
"in",
"reader",
":",
"# Get image based on indicated path in csv",
"name",
"=",
"row",
"[",
"\"Path\"",
"]",
"labels",
"=",
"[",
"_LABELS",
"[",
"row",
"[",
"key",
"]",
"]",
"for",
"key",
"in",
"label_keys",
"]",
"data",
".",
"append",
"(",
"(",
"name",
",",
"labels",
")",
")",
"for",
"name",
",",
"labels",
"in",
"data",
":",
"yield",
"{",
"\"name\"",
":",
"name",
",",
"\"image\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"imgs_path",
",",
"name",
")",
",",
"\"label\"",
":",
"labels",
"}"
] |
Yields examples.
|
[
"Yields",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/chexpert.py#L123-L141
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/imagenet2012_corrupted.py
|
_make_builder_configs
|
def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 60 Imagenet2012CorruptedConfig objects, corresponding to
the 12 corruption types, with each type having 5 severities.
Returns:
A list of 60 Imagenet2012CorruptedConfig objects.
"""
config_list = []
for each_corruption in TYPE_LIST:
for each_severity in range(1, 6):
name_str = each_corruption + '_' + str(each_severity)
version_str = '0.0.1'
description_str = 'corruption type = ' + each_corruption + ', severity = '
description_str += str(each_severity)
config_list.append(
Imagenet2012CorruptedConfig(
name=name_str,
version=version_str,
description=description_str,
corruption_type=each_corruption,
severity=each_severity,
))
return config_list
|
python
|
def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 60 Imagenet2012CorruptedConfig objects, corresponding to
the 12 corruption types, with each type having 5 severities.
Returns:
A list of 60 Imagenet2012CorruptedConfig objects.
"""
config_list = []
for each_corruption in TYPE_LIST:
for each_severity in range(1, 6):
name_str = each_corruption + '_' + str(each_severity)
version_str = '0.0.1'
description_str = 'corruption type = ' + each_corruption + ', severity = '
description_str += str(each_severity)
config_list.append(
Imagenet2012CorruptedConfig(
name=name_str,
version=version_str,
description=description_str,
corruption_type=each_corruption,
severity=each_severity,
))
return config_list
|
[
"def",
"_make_builder_configs",
"(",
")",
":",
"config_list",
"=",
"[",
"]",
"for",
"each_corruption",
"in",
"TYPE_LIST",
":",
"for",
"each_severity",
"in",
"range",
"(",
"1",
",",
"6",
")",
":",
"name_str",
"=",
"each_corruption",
"+",
"'_'",
"+",
"str",
"(",
"each_severity",
")",
"version_str",
"=",
"'0.0.1'",
"description_str",
"=",
"'corruption type = '",
"+",
"each_corruption",
"+",
"', severity = '",
"description_str",
"+=",
"str",
"(",
"each_severity",
")",
"config_list",
".",
"append",
"(",
"Imagenet2012CorruptedConfig",
"(",
"name",
"=",
"name_str",
",",
"version",
"=",
"version_str",
",",
"description",
"=",
"description_str",
",",
"corruption_type",
"=",
"each_corruption",
",",
"severity",
"=",
"each_severity",
",",
")",
")",
"return",
"config_list"
] |
Construct a list of BuilderConfigs.
Construct a list of 60 Imagenet2012CorruptedConfig objects, corresponding to
the 12 corruption types, with each type having 5 severities.
Returns:
A list of 60 Imagenet2012CorruptedConfig objects.
|
[
"Construct",
"a",
"list",
"of",
"BuilderConfigs",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet2012_corrupted.py#L83-L107
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/imagenet2012_corrupted.py
|
Imagenet2012Corrupted._split_generators
|
def _split_generators(self, dl_manager):
"""Return the validation split of ImageNet2012.
Args:
dl_manager: download manager object.
Returns:
validation split.
"""
splits = super(Imagenet2012Corrupted, self)._split_generators(dl_manager)
validation = splits[1]
return [validation]
|
python
|
def _split_generators(self, dl_manager):
"""Return the validation split of ImageNet2012.
Args:
dl_manager: download manager object.
Returns:
validation split.
"""
splits = super(Imagenet2012Corrupted, self)._split_generators(dl_manager)
validation = splits[1]
return [validation]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"splits",
"=",
"super",
"(",
"Imagenet2012Corrupted",
",",
"self",
")",
".",
"_split_generators",
"(",
"dl_manager",
")",
"validation",
"=",
"splits",
"[",
"1",
"]",
"return",
"[",
"validation",
"]"
] |
Return the validation split of ImageNet2012.
Args:
dl_manager: download manager object.
Returns:
validation split.
|
[
"Return",
"the",
"validation",
"split",
"of",
"ImageNet2012",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet2012_corrupted.py#L134-L145
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/imagenet2012_corrupted.py
|
Imagenet2012Corrupted._generate_examples_validation
|
def _generate_examples_validation(self, archive, labels):
"""Generate corrupted imagenet validation data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
archive: an iterator for the raw dataset.
labels: a dictionary that maps the file names to imagenet labels.
Yields:
dictionary with the file name, an image file objective, and label of each
imagenet validation data.
"""
# Get the current random seeds.
numpy_st0 = np.random.get_state()
# Set new random seeds.
np.random.seed(135)
logging.warning('Overwriting cv2 RNG seed.')
tfds.core.lazy_imports.cv2.setRNGSeed(357)
for example in super(Imagenet2012Corrupted,
self)._generate_examples_validation(archive, labels):
with tf.Graph().as_default():
tf_img = tf.image.decode_jpeg(example['image'].read(), channels=3)
image_np = tfds.as_numpy(tf_img)
example['image'] = self._get_corrupted_example(image_np)
yield example
# Reset the seeds back to their original values.
np.random.set_state(numpy_st0)
|
python
|
def _generate_examples_validation(self, archive, labels):
"""Generate corrupted imagenet validation data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
archive: an iterator for the raw dataset.
labels: a dictionary that maps the file names to imagenet labels.
Yields:
dictionary with the file name, an image file objective, and label of each
imagenet validation data.
"""
# Get the current random seeds.
numpy_st0 = np.random.get_state()
# Set new random seeds.
np.random.seed(135)
logging.warning('Overwriting cv2 RNG seed.')
tfds.core.lazy_imports.cv2.setRNGSeed(357)
for example in super(Imagenet2012Corrupted,
self)._generate_examples_validation(archive, labels):
with tf.Graph().as_default():
tf_img = tf.image.decode_jpeg(example['image'].read(), channels=3)
image_np = tfds.as_numpy(tf_img)
example['image'] = self._get_corrupted_example(image_np)
yield example
# Reset the seeds back to their original values.
np.random.set_state(numpy_st0)
|
[
"def",
"_generate_examples_validation",
"(",
"self",
",",
"archive",
",",
"labels",
")",
":",
"# Get the current random seeds.",
"numpy_st0",
"=",
"np",
".",
"random",
".",
"get_state",
"(",
")",
"# Set new random seeds.",
"np",
".",
"random",
".",
"seed",
"(",
"135",
")",
"logging",
".",
"warning",
"(",
"'Overwriting cv2 RNG seed.'",
")",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"setRNGSeed",
"(",
"357",
")",
"for",
"example",
"in",
"super",
"(",
"Imagenet2012Corrupted",
",",
"self",
")",
".",
"_generate_examples_validation",
"(",
"archive",
",",
"labels",
")",
":",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"tf_img",
"=",
"tf",
".",
"image",
".",
"decode_jpeg",
"(",
"example",
"[",
"'image'",
"]",
".",
"read",
"(",
")",
",",
"channels",
"=",
"3",
")",
"image_np",
"=",
"tfds",
".",
"as_numpy",
"(",
"tf_img",
")",
"example",
"[",
"'image'",
"]",
"=",
"self",
".",
"_get_corrupted_example",
"(",
"image_np",
")",
"yield",
"example",
"# Reset the seeds back to their original values.",
"np",
".",
"random",
".",
"set_state",
"(",
"numpy_st0",
")"
] |
Generate corrupted imagenet validation data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
archive: an iterator for the raw dataset.
labels: a dictionary that maps the file names to imagenet labels.
Yields:
dictionary with the file name, an image file objective, and label of each
imagenet validation data.
|
[
"Generate",
"corrupted",
"imagenet",
"validation",
"data",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet2012_corrupted.py#L147-L175
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/imagenet2012_corrupted.py
|
Imagenet2012Corrupted._get_corrupted_example
|
def _get_corrupted_example(self, x):
"""Return corrupted images.
Args:
x: numpy array, uncorrupted image.
Returns:
numpy array, corrupted images.
"""
corruption_type = self.builder_config.corruption_type
severity = self.builder_config.severity
return {
'gaussian_noise': corruptions.gaussian_noise,
'shot_noise': corruptions.shot_noise,
'impulse_noise': corruptions.impulse_noise,
'defocus_blur': corruptions.defocus_blur,
'frosted_glass_blur': corruptions.frosted_glass_blur,
'zoom_blur': corruptions.zoom_blur,
'fog': corruptions.fog,
'brightness': corruptions.brightness,
'contrast': corruptions.contrast,
'elastic': corruptions.elastic,
'pixelate': corruptions.pixelate,
'jpeg_compression': corruptions.jpeg_compression,
}[corruption_type](x, severity)
|
python
|
def _get_corrupted_example(self, x):
"""Return corrupted images.
Args:
x: numpy array, uncorrupted image.
Returns:
numpy array, corrupted images.
"""
corruption_type = self.builder_config.corruption_type
severity = self.builder_config.severity
return {
'gaussian_noise': corruptions.gaussian_noise,
'shot_noise': corruptions.shot_noise,
'impulse_noise': corruptions.impulse_noise,
'defocus_blur': corruptions.defocus_blur,
'frosted_glass_blur': corruptions.frosted_glass_blur,
'zoom_blur': corruptions.zoom_blur,
'fog': corruptions.fog,
'brightness': corruptions.brightness,
'contrast': corruptions.contrast,
'elastic': corruptions.elastic,
'pixelate': corruptions.pixelate,
'jpeg_compression': corruptions.jpeg_compression,
}[corruption_type](x, severity)
|
[
"def",
"_get_corrupted_example",
"(",
"self",
",",
"x",
")",
":",
"corruption_type",
"=",
"self",
".",
"builder_config",
".",
"corruption_type",
"severity",
"=",
"self",
".",
"builder_config",
".",
"severity",
"return",
"{",
"'gaussian_noise'",
":",
"corruptions",
".",
"gaussian_noise",
",",
"'shot_noise'",
":",
"corruptions",
".",
"shot_noise",
",",
"'impulse_noise'",
":",
"corruptions",
".",
"impulse_noise",
",",
"'defocus_blur'",
":",
"corruptions",
".",
"defocus_blur",
",",
"'frosted_glass_blur'",
":",
"corruptions",
".",
"frosted_glass_blur",
",",
"'zoom_blur'",
":",
"corruptions",
".",
"zoom_blur",
",",
"'fog'",
":",
"corruptions",
".",
"fog",
",",
"'brightness'",
":",
"corruptions",
".",
"brightness",
",",
"'contrast'",
":",
"corruptions",
".",
"contrast",
",",
"'elastic'",
":",
"corruptions",
".",
"elastic",
",",
"'pixelate'",
":",
"corruptions",
".",
"pixelate",
",",
"'jpeg_compression'",
":",
"corruptions",
".",
"jpeg_compression",
",",
"}",
"[",
"corruption_type",
"]",
"(",
"x",
",",
"severity",
")"
] |
Return corrupted images.
Args:
x: numpy array, uncorrupted image.
Returns:
numpy array, corrupted images.
|
[
"Return",
"corrupted",
"images",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet2012_corrupted.py#L177-L202
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tf_utils.py
|
assert_shape_match
|
def assert_shape_match(shape1, shape2):
"""Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None)
"""
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(shape2)
if shape1.ndims is None or shape2.ndims is None:
raise ValueError('Shapes must have known rank. Got %s and %s.' %
(shape1.ndims, shape2.ndims))
shape1.assert_same_rank(shape2)
shape1.assert_is_compatible_with(shape2)
|
python
|
def assert_shape_match(shape1, shape2):
"""Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None)
"""
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(shape2)
if shape1.ndims is None or shape2.ndims is None:
raise ValueError('Shapes must have known rank. Got %s and %s.' %
(shape1.ndims, shape2.ndims))
shape1.assert_same_rank(shape2)
shape1.assert_is_compatible_with(shape2)
|
[
"def",
"assert_shape_match",
"(",
"shape1",
",",
"shape2",
")",
":",
"shape1",
"=",
"tf",
".",
"TensorShape",
"(",
"shape1",
")",
"shape2",
"=",
"tf",
".",
"TensorShape",
"(",
"shape2",
")",
"if",
"shape1",
".",
"ndims",
"is",
"None",
"or",
"shape2",
".",
"ndims",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Shapes must have known rank. Got %s and %s.'",
"%",
"(",
"shape1",
".",
"ndims",
",",
"shape2",
".",
"ndims",
")",
")",
"shape1",
".",
"assert_same_rank",
"(",
"shape2",
")",
"shape1",
".",
"assert_is_compatible_with",
"(",
"shape2",
")"
] |
Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None)
|
[
"Ensure",
"the",
"shape1",
"match",
"the",
"pattern",
"given",
"by",
"shape2",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L132-L148
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tf_utils.py
|
raw_nogpu_session
|
def raw_nogpu_session(graph=None):
"""tf.Session, hiding GPUs."""
config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})
return tf.compat.v1.Session(config=config, graph=graph)
|
python
|
def raw_nogpu_session(graph=None):
"""tf.Session, hiding GPUs."""
config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})
return tf.compat.v1.Session(config=config, graph=graph)
|
[
"def",
"raw_nogpu_session",
"(",
"graph",
"=",
"None",
")",
":",
"config",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"ConfigProto",
"(",
"device_count",
"=",
"{",
"'GPU'",
":",
"0",
"}",
")",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"Session",
"(",
"config",
"=",
"config",
",",
"graph",
"=",
"graph",
")"
] |
tf.Session, hiding GPUs.
|
[
"tf",
".",
"Session",
"hiding",
"GPUs",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L161-L164
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tf_utils.py
|
maybe_with_graph
|
def maybe_with_graph(graph=None, create_if_none=True):
"""Eager-compatible Graph().as_default() yielding the graph."""
if tf.executing_eagerly():
yield None
else:
if graph is None and create_if_none:
graph = tf.Graph()
if graph is None:
yield None
else:
with graph.as_default():
yield graph
|
python
|
def maybe_with_graph(graph=None, create_if_none=True):
"""Eager-compatible Graph().as_default() yielding the graph."""
if tf.executing_eagerly():
yield None
else:
if graph is None and create_if_none:
graph = tf.Graph()
if graph is None:
yield None
else:
with graph.as_default():
yield graph
|
[
"def",
"maybe_with_graph",
"(",
"graph",
"=",
"None",
",",
"create_if_none",
"=",
"True",
")",
":",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"yield",
"None",
"else",
":",
"if",
"graph",
"is",
"None",
"and",
"create_if_none",
":",
"graph",
"=",
"tf",
".",
"Graph",
"(",
")",
"if",
"graph",
"is",
"None",
":",
"yield",
"None",
"else",
":",
"with",
"graph",
".",
"as_default",
"(",
")",
":",
"yield",
"graph"
] |
Eager-compatible Graph().as_default() yielding the graph.
|
[
"Eager",
"-",
"compatible",
"Graph",
"()",
".",
"as_default",
"()",
"yielding",
"the",
"graph",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L168-L180
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tf_utils.py
|
TFGraphRunner.run
|
def run(self, fct, input_):
"""Execute the given TensorFlow function."""
# TF 2.0
if tf.executing_eagerly():
return fct(input_).numpy()
# TF 1.0
else:
# Should compile the function if this is the first time encountered
if not isinstance(input_, np.ndarray):
input_ = np.array(input_)
run_args = RunArgs(fct=fct, input=input_)
signature = self._build_signature(run_args)
if signature not in self._graph_run_cache:
graph_run = self._build_graph_run(run_args)
self._graph_run_cache[signature] = graph_run
else:
graph_run = self._graph_run_cache[signature]
# Then execute the cached graph
return graph_run.session.run(
graph_run.output,
feed_dict={graph_run.placeholder: input_},
)
|
python
|
def run(self, fct, input_):
"""Execute the given TensorFlow function."""
# TF 2.0
if tf.executing_eagerly():
return fct(input_).numpy()
# TF 1.0
else:
# Should compile the function if this is the first time encountered
if not isinstance(input_, np.ndarray):
input_ = np.array(input_)
run_args = RunArgs(fct=fct, input=input_)
signature = self._build_signature(run_args)
if signature not in self._graph_run_cache:
graph_run = self._build_graph_run(run_args)
self._graph_run_cache[signature] = graph_run
else:
graph_run = self._graph_run_cache[signature]
# Then execute the cached graph
return graph_run.session.run(
graph_run.output,
feed_dict={graph_run.placeholder: input_},
)
|
[
"def",
"run",
"(",
"self",
",",
"fct",
",",
"input_",
")",
":",
"# TF 2.0",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"return",
"fct",
"(",
"input_",
")",
".",
"numpy",
"(",
")",
"# TF 1.0",
"else",
":",
"# Should compile the function if this is the first time encountered",
"if",
"not",
"isinstance",
"(",
"input_",
",",
"np",
".",
"ndarray",
")",
":",
"input_",
"=",
"np",
".",
"array",
"(",
"input_",
")",
"run_args",
"=",
"RunArgs",
"(",
"fct",
"=",
"fct",
",",
"input",
"=",
"input_",
")",
"signature",
"=",
"self",
".",
"_build_signature",
"(",
"run_args",
")",
"if",
"signature",
"not",
"in",
"self",
".",
"_graph_run_cache",
":",
"graph_run",
"=",
"self",
".",
"_build_graph_run",
"(",
"run_args",
")",
"self",
".",
"_graph_run_cache",
"[",
"signature",
"]",
"=",
"graph_run",
"else",
":",
"graph_run",
"=",
"self",
".",
"_graph_run_cache",
"[",
"signature",
"]",
"# Then execute the cached graph",
"return",
"graph_run",
".",
"session",
".",
"run",
"(",
"graph_run",
".",
"output",
",",
"feed_dict",
"=",
"{",
"graph_run",
".",
"placeholder",
":",
"input_",
"}",
",",
")"
] |
Execute the given TensorFlow function.
|
[
"Execute",
"the",
"given",
"TensorFlow",
"function",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L70-L92
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tf_utils.py
|
TFGraphRunner._build_graph_run
|
def _build_graph_run(self, run_args):
"""Create a new graph for the given args."""
# Could try to use tfe.py_func(fct) but this would require knowing
# information about the signature of the function.
# Create a new graph:
with tf.Graph().as_default() as g:
# Create placeholder
input_ = run_args.input
placeholder = tf.compat.v1.placeholder(
dtype=input_.dtype, shape=input_.shape)
output = run_args.fct(placeholder)
return GraphRun(
session=raw_nogpu_session(g),
graph=g,
placeholder=placeholder,
output=output,
)
|
python
|
def _build_graph_run(self, run_args):
"""Create a new graph for the given args."""
# Could try to use tfe.py_func(fct) but this would require knowing
# information about the signature of the function.
# Create a new graph:
with tf.Graph().as_default() as g:
# Create placeholder
input_ = run_args.input
placeholder = tf.compat.v1.placeholder(
dtype=input_.dtype, shape=input_.shape)
output = run_args.fct(placeholder)
return GraphRun(
session=raw_nogpu_session(g),
graph=g,
placeholder=placeholder,
output=output,
)
|
[
"def",
"_build_graph_run",
"(",
"self",
",",
"run_args",
")",
":",
"# Could try to use tfe.py_func(fct) but this would require knowing",
"# information about the signature of the function.",
"# Create a new graph:",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
"as",
"g",
":",
"# Create placeholder",
"input_",
"=",
"run_args",
".",
"input",
"placeholder",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"placeholder",
"(",
"dtype",
"=",
"input_",
".",
"dtype",
",",
"shape",
"=",
"input_",
".",
"shape",
")",
"output",
"=",
"run_args",
".",
"fct",
"(",
"placeholder",
")",
"return",
"GraphRun",
"(",
"session",
"=",
"raw_nogpu_session",
"(",
"g",
")",
",",
"graph",
"=",
"g",
",",
"placeholder",
"=",
"placeholder",
",",
"output",
"=",
"output",
",",
")"
] |
Create a new graph for the given args.
|
[
"Create",
"a",
"new",
"graph",
"for",
"the",
"given",
"args",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L94-L111
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/tf_utils.py
|
TFGraphRunner._build_signature
|
def _build_signature(self, run_args):
"""Create a unique signature for each fct/inputs."""
return (id(run_args.fct), run_args.input.dtype, run_args.input.shape)
|
python
|
def _build_signature(self, run_args):
"""Create a unique signature for each fct/inputs."""
return (id(run_args.fct), run_args.input.dtype, run_args.input.shape)
|
[
"def",
"_build_signature",
"(",
"self",
",",
"run_args",
")",
":",
"return",
"(",
"id",
"(",
"run_args",
".",
"fct",
")",
",",
"run_args",
".",
"input",
".",
"dtype",
",",
"run_args",
".",
"input",
".",
"shape",
")"
] |
Create a unique signature for each fct/inputs.
|
[
"Create",
"a",
"unique",
"signature",
"for",
"each",
"fct",
"/",
"inputs",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L113-L115
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/video_feature.py
|
Video.encode_example
|
def encode_example(self, video_or_path_or_fobj):
"""Converts the given image into a dict convertible to tf example."""
if isinstance(video_or_path_or_fobj, six.string_types):
if not os.path.isfile(video_or_path_or_fobj):
_, video_temp_path = tempfile.mkstemp()
try:
tf.gfile.Copy(video_or_path_or_fobj, video_temp_path, overwrite=True)
encoded_video = self._ffmpeg_decode(video_temp_path)
finally:
os.unlink(video_temp_path)
else:
encoded_video = self._ffmpeg_decode(video_or_path_or_fobj)
elif hasattr(video_or_path_or_fobj, 'read'):
encoded_video = self._ffmpeg_decode(video_or_path_or_fobj)
else:
encoded_video = video_or_path_or_fobj
return super(Video, self).encode_example(encoded_video)
|
python
|
def encode_example(self, video_or_path_or_fobj):
"""Converts the given image into a dict convertible to tf example."""
if isinstance(video_or_path_or_fobj, six.string_types):
if not os.path.isfile(video_or_path_or_fobj):
_, video_temp_path = tempfile.mkstemp()
try:
tf.gfile.Copy(video_or_path_or_fobj, video_temp_path, overwrite=True)
encoded_video = self._ffmpeg_decode(video_temp_path)
finally:
os.unlink(video_temp_path)
else:
encoded_video = self._ffmpeg_decode(video_or_path_or_fobj)
elif hasattr(video_or_path_or_fobj, 'read'):
encoded_video = self._ffmpeg_decode(video_or_path_or_fobj)
else:
encoded_video = video_or_path_or_fobj
return super(Video, self).encode_example(encoded_video)
|
[
"def",
"encode_example",
"(",
"self",
",",
"video_or_path_or_fobj",
")",
":",
"if",
"isinstance",
"(",
"video_or_path_or_fobj",
",",
"six",
".",
"string_types",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"video_or_path_or_fobj",
")",
":",
"_",
",",
"video_temp_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"try",
":",
"tf",
".",
"gfile",
".",
"Copy",
"(",
"video_or_path_or_fobj",
",",
"video_temp_path",
",",
"overwrite",
"=",
"True",
")",
"encoded_video",
"=",
"self",
".",
"_ffmpeg_decode",
"(",
"video_temp_path",
")",
"finally",
":",
"os",
".",
"unlink",
"(",
"video_temp_path",
")",
"else",
":",
"encoded_video",
"=",
"self",
".",
"_ffmpeg_decode",
"(",
"video_or_path_or_fobj",
")",
"elif",
"hasattr",
"(",
"video_or_path_or_fobj",
",",
"'read'",
")",
":",
"encoded_video",
"=",
"self",
".",
"_ffmpeg_decode",
"(",
"video_or_path_or_fobj",
")",
"else",
":",
"encoded_video",
"=",
"video_or_path_or_fobj",
"return",
"super",
"(",
"Video",
",",
"self",
")",
".",
"encode_example",
"(",
"encoded_video",
")"
] |
Converts the given image into a dict convertible to tf example.
|
[
"Converts",
"the",
"given",
"image",
"into",
"a",
"dict",
"convertible",
"to",
"tf",
"example",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/video_feature.py#L148-L164
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/rock_paper_scissors.py
|
RockPaperScissors._generate_examples
|
def _generate_examples(self, archive):
"""Generate rock, paper or scissors images and labels given the directory path.
Args:
archive: object that iterates over the zip.
Yields:
The image path and its corresponding label.
"""
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # if anything other than .png; skip
continue
label = res.group(2).lower()
yield {
"image": fobj,
"label": label,
}
|
python
|
def _generate_examples(self, archive):
"""Generate rock, paper or scissors images and labels given the directory path.
Args:
archive: object that iterates over the zip.
Yields:
The image path and its corresponding label.
"""
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # if anything other than .png; skip
continue
label = res.group(2).lower()
yield {
"image": fobj,
"label": label,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
")",
":",
"for",
"fname",
",",
"fobj",
"in",
"archive",
":",
"res",
"=",
"_NAME_RE",
".",
"match",
"(",
"fname",
")",
"if",
"not",
"res",
":",
"# if anything other than .png; skip",
"continue",
"label",
"=",
"res",
".",
"group",
"(",
"2",
")",
".",
"lower",
"(",
")",
"yield",
"{",
"\"image\"",
":",
"fobj",
",",
"\"label\"",
":",
"label",
",",
"}"
] |
Generate rock, paper or scissors images and labels given the directory path.
Args:
archive: object that iterates over the zip.
Yields:
The image path and its corresponding label.
|
[
"Generate",
"rock",
"paper",
"or",
"scissors",
"images",
"and",
"labels",
"given",
"the",
"directory",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/rock_paper_scissors.py#L82-L100
|
train
|
tensorflow/datasets
|
tensorflow_datasets/structured/titanic.py
|
Titanic._generate_examples
|
def _generate_examples(self, file_path):
"""Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target
"""
with tf.io.gfile.GFile(file_path) as f:
raw_data = csv.DictReader(f)
for row in raw_data:
survive_val = row.pop("survived")
yield {
"survived": convert_to_label(survive_val, _SURVIVED_DICT),
"features": {
name: FEATURE_DICT[name][1](value)
for name, value in row.items()
}
}
|
python
|
def _generate_examples(self, file_path):
"""Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target
"""
with tf.io.gfile.GFile(file_path) as f:
raw_data = csv.DictReader(f)
for row in raw_data:
survive_val = row.pop("survived")
yield {
"survived": convert_to_label(survive_val, _SURVIVED_DICT),
"features": {
name: FEATURE_DICT[name][1](value)
for name, value in row.items()
}
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"file_path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"file_path",
")",
"as",
"f",
":",
"raw_data",
"=",
"csv",
".",
"DictReader",
"(",
"f",
")",
"for",
"row",
"in",
"raw_data",
":",
"survive_val",
"=",
"row",
".",
"pop",
"(",
"\"survived\"",
")",
"yield",
"{",
"\"survived\"",
":",
"convert_to_label",
"(",
"survive_val",
",",
"_SURVIVED_DICT",
")",
",",
"\"features\"",
":",
"{",
"name",
":",
"FEATURE_DICT",
"[",
"name",
"]",
"[",
"1",
"]",
"(",
"value",
")",
"for",
"name",
",",
"value",
"in",
"row",
".",
"items",
"(",
")",
"}",
"}"
] |
Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target
|
[
"Generate",
"features",
"and",
"target",
"given",
"the",
"directory",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/structured/titanic.py#L130-L150
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/text_encoder.py
|
pad_decr
|
def pad_decr(ids):
"""Strip ID 0 and decrement ids by 1."""
if len(ids) < 1:
return list(ids)
if not any(ids):
return [] # all padding.
idx = -1
while not ids[idx]:
idx -= 1
if idx == -1:
ids = ids
else:
ids = ids[:idx + 1]
return [i - 1 for i in ids]
|
python
|
def pad_decr(ids):
"""Strip ID 0 and decrement ids by 1."""
if len(ids) < 1:
return list(ids)
if not any(ids):
return [] # all padding.
idx = -1
while not ids[idx]:
idx -= 1
if idx == -1:
ids = ids
else:
ids = ids[:idx + 1]
return [i - 1 for i in ids]
|
[
"def",
"pad_decr",
"(",
"ids",
")",
":",
"if",
"len",
"(",
"ids",
")",
"<",
"1",
":",
"return",
"list",
"(",
"ids",
")",
"if",
"not",
"any",
"(",
"ids",
")",
":",
"return",
"[",
"]",
"# all padding.",
"idx",
"=",
"-",
"1",
"while",
"not",
"ids",
"[",
"idx",
"]",
":",
"idx",
"-=",
"1",
"if",
"idx",
"==",
"-",
"1",
":",
"ids",
"=",
"ids",
"else",
":",
"ids",
"=",
"ids",
"[",
":",
"idx",
"+",
"1",
"]",
"return",
"[",
"i",
"-",
"1",
"for",
"i",
"in",
"ids",
"]"
] |
Strip ID 0 and decrement ids by 1.
|
[
"Strip",
"ID",
"0",
"and",
"decrement",
"ids",
"by",
"1",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L426-L439
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/text_encoder.py
|
_prepare_reserved_tokens
|
def _prepare_reserved_tokens(reserved_tokens):
"""Prepare reserved tokens and a regex for splitting them out of strings."""
reserved_tokens = [tf.compat.as_text(tok) for tok in reserved_tokens or []]
dups = _find_duplicates(reserved_tokens)
if dups:
raise ValueError("Duplicates found in tokens: %s" % dups)
reserved_tokens_re = _make_reserved_tokens_re(reserved_tokens)
return reserved_tokens, reserved_tokens_re
|
python
|
def _prepare_reserved_tokens(reserved_tokens):
"""Prepare reserved tokens and a regex for splitting them out of strings."""
reserved_tokens = [tf.compat.as_text(tok) for tok in reserved_tokens or []]
dups = _find_duplicates(reserved_tokens)
if dups:
raise ValueError("Duplicates found in tokens: %s" % dups)
reserved_tokens_re = _make_reserved_tokens_re(reserved_tokens)
return reserved_tokens, reserved_tokens_re
|
[
"def",
"_prepare_reserved_tokens",
"(",
"reserved_tokens",
")",
":",
"reserved_tokens",
"=",
"[",
"tf",
".",
"compat",
".",
"as_text",
"(",
"tok",
")",
"for",
"tok",
"in",
"reserved_tokens",
"or",
"[",
"]",
"]",
"dups",
"=",
"_find_duplicates",
"(",
"reserved_tokens",
")",
"if",
"dups",
":",
"raise",
"ValueError",
"(",
"\"Duplicates found in tokens: %s\"",
"%",
"dups",
")",
"reserved_tokens_re",
"=",
"_make_reserved_tokens_re",
"(",
"reserved_tokens",
")",
"return",
"reserved_tokens",
",",
"reserved_tokens_re"
] |
Prepare reserved tokens and a regex for splitting them out of strings.
|
[
"Prepare",
"reserved",
"tokens",
"and",
"a",
"regex",
"for",
"splitting",
"them",
"out",
"of",
"strings",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L447-L454
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/text_encoder.py
|
_make_reserved_tokens_re
|
def _make_reserved_tokens_re(reserved_tokens):
"""Constructs compiled regex to parse out reserved tokens."""
if not reserved_tokens:
return None
escaped_tokens = [_re_escape(rt) for rt in reserved_tokens]
pattern = "(%s)" % "|".join(escaped_tokens)
reserved_tokens_re = _re_compile(pattern)
return reserved_tokens_re
|
python
|
def _make_reserved_tokens_re(reserved_tokens):
"""Constructs compiled regex to parse out reserved tokens."""
if not reserved_tokens:
return None
escaped_tokens = [_re_escape(rt) for rt in reserved_tokens]
pattern = "(%s)" % "|".join(escaped_tokens)
reserved_tokens_re = _re_compile(pattern)
return reserved_tokens_re
|
[
"def",
"_make_reserved_tokens_re",
"(",
"reserved_tokens",
")",
":",
"if",
"not",
"reserved_tokens",
":",
"return",
"None",
"escaped_tokens",
"=",
"[",
"_re_escape",
"(",
"rt",
")",
"for",
"rt",
"in",
"reserved_tokens",
"]",
"pattern",
"=",
"\"(%s)\"",
"%",
"\"|\"",
".",
"join",
"(",
"escaped_tokens",
")",
"reserved_tokens_re",
"=",
"_re_compile",
"(",
"pattern",
")",
"return",
"reserved_tokens_re"
] |
Constructs compiled regex to parse out reserved tokens.
|
[
"Constructs",
"compiled",
"regex",
"to",
"parse",
"out",
"reserved",
"tokens",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L463-L470
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/text_encoder.py
|
write_lines_to_file
|
def write_lines_to_file(cls_name, filename, lines, metadata_dict):
"""Writes lines to file prepended by header and metadata."""
metadata_dict = metadata_dict or {}
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
metadata_line = "%s%s" % (_METADATA_PREFIX,
json.dumps(metadata_dict, sort_keys=True))
with tf.io.gfile.GFile(filename, "wb") as f:
for line in [header_line, metadata_line]:
f.write(tf.compat.as_bytes(line))
f.write(tf.compat.as_bytes("\n"))
if lines:
f.write(tf.compat.as_bytes("\n".join(lines)))
f.write(tf.compat.as_bytes("\n"))
|
python
|
def write_lines_to_file(cls_name, filename, lines, metadata_dict):
"""Writes lines to file prepended by header and metadata."""
metadata_dict = metadata_dict or {}
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
metadata_line = "%s%s" % (_METADATA_PREFIX,
json.dumps(metadata_dict, sort_keys=True))
with tf.io.gfile.GFile(filename, "wb") as f:
for line in [header_line, metadata_line]:
f.write(tf.compat.as_bytes(line))
f.write(tf.compat.as_bytes("\n"))
if lines:
f.write(tf.compat.as_bytes("\n".join(lines)))
f.write(tf.compat.as_bytes("\n"))
|
[
"def",
"write_lines_to_file",
"(",
"cls_name",
",",
"filename",
",",
"lines",
",",
"metadata_dict",
")",
":",
"metadata_dict",
"=",
"metadata_dict",
"or",
"{",
"}",
"header_line",
"=",
"\"%s%s\"",
"%",
"(",
"_HEADER_PREFIX",
",",
"cls_name",
")",
"metadata_line",
"=",
"\"%s%s\"",
"%",
"(",
"_METADATA_PREFIX",
",",
"json",
".",
"dumps",
"(",
"metadata_dict",
",",
"sort_keys",
"=",
"True",
")",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"\"wb\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"[",
"header_line",
",",
"metadata_line",
"]",
":",
"f",
".",
"write",
"(",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"line",
")",
")",
"f",
".",
"write",
"(",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"\"\\n\"",
")",
")",
"if",
"lines",
":",
"f",
".",
"write",
"(",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
")",
")",
"f",
".",
"write",
"(",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"\"\\n\"",
")",
")"
] |
Writes lines to file prepended by header and metadata.
|
[
"Writes",
"lines",
"to",
"file",
"prepended",
"by",
"header",
"and",
"metadata",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L492-L504
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/text_encoder.py
|
read_lines_from_file
|
def read_lines_from_file(cls_name, filename):
"""Read lines from file, parsing out header and metadata."""
with tf.io.gfile.GFile(filename, "rb") as f:
lines = [tf.compat.as_text(line)[:-1] for line in f]
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
if lines[0] != header_line:
raise ValueError("File {fname} does not seem to have been created from "
"{name}.save_to_file.".format(
fname=filename, name=cls_name))
metadata_dict = json.loads(lines[1][len(_METADATA_PREFIX):])
return lines[2:], metadata_dict
|
python
|
def read_lines_from_file(cls_name, filename):
"""Read lines from file, parsing out header and metadata."""
with tf.io.gfile.GFile(filename, "rb") as f:
lines = [tf.compat.as_text(line)[:-1] for line in f]
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
if lines[0] != header_line:
raise ValueError("File {fname} does not seem to have been created from "
"{name}.save_to_file.".format(
fname=filename, name=cls_name))
metadata_dict = json.loads(lines[1][len(_METADATA_PREFIX):])
return lines[2:], metadata_dict
|
[
"def",
"read_lines_from_file",
"(",
"cls_name",
",",
"filename",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"lines",
"=",
"[",
"tf",
".",
"compat",
".",
"as_text",
"(",
"line",
")",
"[",
":",
"-",
"1",
"]",
"for",
"line",
"in",
"f",
"]",
"header_line",
"=",
"\"%s%s\"",
"%",
"(",
"_HEADER_PREFIX",
",",
"cls_name",
")",
"if",
"lines",
"[",
"0",
"]",
"!=",
"header_line",
":",
"raise",
"ValueError",
"(",
"\"File {fname} does not seem to have been created from \"",
"\"{name}.save_to_file.\"",
".",
"format",
"(",
"fname",
"=",
"filename",
",",
"name",
"=",
"cls_name",
")",
")",
"metadata_dict",
"=",
"json",
".",
"loads",
"(",
"lines",
"[",
"1",
"]",
"[",
"len",
"(",
"_METADATA_PREFIX",
")",
":",
"]",
")",
"return",
"lines",
"[",
"2",
":",
"]",
",",
"metadata_dict"
] |
Read lines from file, parsing out header and metadata.
|
[
"Read",
"lines",
"from",
"file",
"parsing",
"out",
"header",
"and",
"metadata",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L507-L517
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/text_encoder.py
|
Tokenizer.tokenize
|
def tokenize(self, s):
"""Splits a string into tokens."""
s = tf.compat.as_text(s)
if self.reserved_tokens:
# First split out the reserved tokens
substrs = self._reserved_tokens_re.split(s)
else:
substrs = [s]
toks = []
for substr in substrs:
if substr in self.reserved_tokens:
toks.append(substr)
else:
toks.extend(self._alphanum_re.split(substr))
# Filter out empty strings
toks = [t for t in toks if t]
return toks
|
python
|
def tokenize(self, s):
"""Splits a string into tokens."""
s = tf.compat.as_text(s)
if self.reserved_tokens:
# First split out the reserved tokens
substrs = self._reserved_tokens_re.split(s)
else:
substrs = [s]
toks = []
for substr in substrs:
if substr in self.reserved_tokens:
toks.append(substr)
else:
toks.extend(self._alphanum_re.split(substr))
# Filter out empty strings
toks = [t for t in toks if t]
return toks
|
[
"def",
"tokenize",
"(",
"self",
",",
"s",
")",
":",
"s",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"s",
")",
"if",
"self",
".",
"reserved_tokens",
":",
"# First split out the reserved tokens",
"substrs",
"=",
"self",
".",
"_reserved_tokens_re",
".",
"split",
"(",
"s",
")",
"else",
":",
"substrs",
"=",
"[",
"s",
"]",
"toks",
"=",
"[",
"]",
"for",
"substr",
"in",
"substrs",
":",
"if",
"substr",
"in",
"self",
".",
"reserved_tokens",
":",
"toks",
".",
"append",
"(",
"substr",
")",
"else",
":",
"toks",
".",
"extend",
"(",
"self",
".",
"_alphanum_re",
".",
"split",
"(",
"substr",
")",
")",
"# Filter out empty strings",
"toks",
"=",
"[",
"t",
"for",
"t",
"in",
"toks",
"if",
"t",
"]",
"return",
"toks"
] |
Splits a string into tokens.
|
[
"Splits",
"a",
"string",
"into",
"tokens",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L378-L397
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/splits.py
|
slice_to_percent_mask
|
def slice_to_percent_mask(slice_value):
"""Convert a python slice [15:50] into a list[bool] mask of 100 elements."""
if slice_value is None:
slice_value = slice(None)
# Select only the elements of the slice
selected = set(list(range(100))[slice_value])
# Create the binary mask
return [i in selected for i in range(100)]
|
python
|
def slice_to_percent_mask(slice_value):
"""Convert a python slice [15:50] into a list[bool] mask of 100 elements."""
if slice_value is None:
slice_value = slice(None)
# Select only the elements of the slice
selected = set(list(range(100))[slice_value])
# Create the binary mask
return [i in selected for i in range(100)]
|
[
"def",
"slice_to_percent_mask",
"(",
"slice_value",
")",
":",
"if",
"slice_value",
"is",
"None",
":",
"slice_value",
"=",
"slice",
"(",
"None",
")",
"# Select only the elements of the slice",
"selected",
"=",
"set",
"(",
"list",
"(",
"range",
"(",
"100",
")",
")",
"[",
"slice_value",
"]",
")",
"# Create the binary mask",
"return",
"[",
"i",
"in",
"selected",
"for",
"i",
"in",
"range",
"(",
"100",
")",
"]"
] |
Convert a python slice [15:50] into a list[bool] mask of 100 elements.
|
[
"Convert",
"a",
"python",
"slice",
"[",
"15",
":",
"50",
"]",
"into",
"a",
"list",
"[",
"bool",
"]",
"mask",
"of",
"100",
"elements",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L479-L486
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/splits.py
|
get_shard_id2num_examples
|
def get_shard_id2num_examples(num_shards, total_num_examples):
"""Return the mapping shard_id=>num_examples, assuming round-robin."""
# TODO(b/130353071): This has the strong assumption that the shards have
# been written in a round-robin fashion. This assumption does not hold, for
# instance, with Beam generation. The mapping shard_id=>num_examples
# should be computed during generation.
# Minimum number of example per shards
num_example_in_shard = total_num_examples // num_shards
shard_id2num_examples = [num_example_in_shard for _ in range(num_shards)]
# If there are remaining examples, we add them to the first shards
for shard_id in range(total_num_examples % num_shards):
shard_id2num_examples[shard_id] += 1
return shard_id2num_examples
|
python
|
def get_shard_id2num_examples(num_shards, total_num_examples):
"""Return the mapping shard_id=>num_examples, assuming round-robin."""
# TODO(b/130353071): This has the strong assumption that the shards have
# been written in a round-robin fashion. This assumption does not hold, for
# instance, with Beam generation. The mapping shard_id=>num_examples
# should be computed during generation.
# Minimum number of example per shards
num_example_in_shard = total_num_examples // num_shards
shard_id2num_examples = [num_example_in_shard for _ in range(num_shards)]
# If there are remaining examples, we add them to the first shards
for shard_id in range(total_num_examples % num_shards):
shard_id2num_examples[shard_id] += 1
return shard_id2num_examples
|
[
"def",
"get_shard_id2num_examples",
"(",
"num_shards",
",",
"total_num_examples",
")",
":",
"# TODO(b/130353071): This has the strong assumption that the shards have",
"# been written in a round-robin fashion. This assumption does not hold, for",
"# instance, with Beam generation. The mapping shard_id=>num_examples",
"# should be computed during generation.",
"# Minimum number of example per shards",
"num_example_in_shard",
"=",
"total_num_examples",
"//",
"num_shards",
"shard_id2num_examples",
"=",
"[",
"num_example_in_shard",
"for",
"_",
"in",
"range",
"(",
"num_shards",
")",
"]",
"# If there are remaining examples, we add them to the first shards",
"for",
"shard_id",
"in",
"range",
"(",
"total_num_examples",
"%",
"num_shards",
")",
":",
"shard_id2num_examples",
"[",
"shard_id",
"]",
"+=",
"1",
"return",
"shard_id2num_examples"
] |
Return the mapping shard_id=>num_examples, assuming round-robin.
|
[
"Return",
"the",
"mapping",
"shard_id",
"=",
">",
"num_examples",
"assuming",
"round",
"-",
"robin",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L489-L502
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/splits.py
|
compute_mask_offsets
|
def compute_mask_offsets(shard_id2num_examples):
"""Return the list of offsets associated with each shards.
Args:
shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples
Returns:
mask_offsets: `list[int]`, offset to skip for each of the shard
"""
total_num_examples = sum(shard_id2num_examples)
mask_offsets = []
total_num_examples = 0
for num_examples_in_shard in shard_id2num_examples:
# The offset (nb of examples to skip in the next shard) correspond to the
# number of examples remaining in the current shard
mask_offsets.append(total_num_examples % 100)
total_num_examples += num_examples_in_shard
return mask_offsets
|
python
|
def compute_mask_offsets(shard_id2num_examples):
"""Return the list of offsets associated with each shards.
Args:
shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples
Returns:
mask_offsets: `list[int]`, offset to skip for each of the shard
"""
total_num_examples = sum(shard_id2num_examples)
mask_offsets = []
total_num_examples = 0
for num_examples_in_shard in shard_id2num_examples:
# The offset (nb of examples to skip in the next shard) correspond to the
# number of examples remaining in the current shard
mask_offsets.append(total_num_examples % 100)
total_num_examples += num_examples_in_shard
return mask_offsets
|
[
"def",
"compute_mask_offsets",
"(",
"shard_id2num_examples",
")",
":",
"total_num_examples",
"=",
"sum",
"(",
"shard_id2num_examples",
")",
"mask_offsets",
"=",
"[",
"]",
"total_num_examples",
"=",
"0",
"for",
"num_examples_in_shard",
"in",
"shard_id2num_examples",
":",
"# The offset (nb of examples to skip in the next shard) correspond to the",
"# number of examples remaining in the current shard",
"mask_offsets",
".",
"append",
"(",
"total_num_examples",
"%",
"100",
")",
"total_num_examples",
"+=",
"num_examples_in_shard",
"return",
"mask_offsets"
] |
Return the list of offsets associated with each shards.
Args:
shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples
Returns:
mask_offsets: `list[int]`, offset to skip for each of the shard
|
[
"Return",
"the",
"list",
"of",
"offsets",
"associated",
"with",
"each",
"shards",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L505-L524
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/splits.py
|
check_splits_equals
|
def check_splits_equals(splits1, splits2):
"""Check that the two split dicts have the same names and num_shards."""
if set(splits1) ^ set(splits2): # Name intersection should be null
return False
for _, (split1, split2) in utils.zip_dict(splits1, splits2):
if split1.num_shards != split2.num_shards:
return False
return True
|
python
|
def check_splits_equals(splits1, splits2):
"""Check that the two split dicts have the same names and num_shards."""
if set(splits1) ^ set(splits2): # Name intersection should be null
return False
for _, (split1, split2) in utils.zip_dict(splits1, splits2):
if split1.num_shards != split2.num_shards:
return False
return True
|
[
"def",
"check_splits_equals",
"(",
"splits1",
",",
"splits2",
")",
":",
"if",
"set",
"(",
"splits1",
")",
"^",
"set",
"(",
"splits2",
")",
":",
"# Name intersection should be null",
"return",
"False",
"for",
"_",
",",
"(",
"split1",
",",
"split2",
")",
"in",
"utils",
".",
"zip_dict",
"(",
"splits1",
",",
"splits2",
")",
":",
"if",
"split1",
".",
"num_shards",
"!=",
"split2",
".",
"num_shards",
":",
"return",
"False",
"return",
"True"
] |
Check that the two split dicts have the same names and num_shards.
|
[
"Check",
"that",
"the",
"two",
"split",
"dicts",
"have",
"the",
"same",
"names",
"and",
"num_shards",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L573-L580
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/splits.py
|
SplitDict.add
|
def add(self, split_info):
"""Add the split info."""
if split_info.name in self:
raise ValueError("Split {} already present".format(split_info.name))
# TODO(epot): Make sure this works with Named splits correctly.
super(SplitDict, self).__setitem__(split_info.name, split_info)
|
python
|
def add(self, split_info):
"""Add the split info."""
if split_info.name in self:
raise ValueError("Split {} already present".format(split_info.name))
# TODO(epot): Make sure this works with Named splits correctly.
super(SplitDict, self).__setitem__(split_info.name, split_info)
|
[
"def",
"add",
"(",
"self",
",",
"split_info",
")",
":",
"if",
"split_info",
".",
"name",
"in",
"self",
":",
"raise",
"ValueError",
"(",
"\"Split {} already present\"",
".",
"format",
"(",
"split_info",
".",
"name",
")",
")",
"# TODO(epot): Make sure this works with Named splits correctly.",
"super",
"(",
"SplitDict",
",",
"self",
")",
".",
"__setitem__",
"(",
"split_info",
".",
"name",
",",
"split_info",
")"
] |
Add the split info.
|
[
"Add",
"the",
"split",
"info",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L542-L547
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/splits.py
|
SplitDict.from_proto
|
def from_proto(cls, repeated_split_infos):
"""Returns a new SplitDict initialized from the `repeated_split_infos`."""
split_dict = cls()
for split_info_proto in repeated_split_infos:
split_info = SplitInfo()
split_info.CopyFrom(split_info_proto)
split_dict.add(split_info)
return split_dict
|
python
|
def from_proto(cls, repeated_split_infos):
"""Returns a new SplitDict initialized from the `repeated_split_infos`."""
split_dict = cls()
for split_info_proto in repeated_split_infos:
split_info = SplitInfo()
split_info.CopyFrom(split_info_proto)
split_dict.add(split_info)
return split_dict
|
[
"def",
"from_proto",
"(",
"cls",
",",
"repeated_split_infos",
")",
":",
"split_dict",
"=",
"cls",
"(",
")",
"for",
"split_info_proto",
"in",
"repeated_split_infos",
":",
"split_info",
"=",
"SplitInfo",
"(",
")",
"split_info",
".",
"CopyFrom",
"(",
"split_info_proto",
")",
"split_dict",
".",
"add",
"(",
"split_info",
")",
"return",
"split_dict"
] |
Returns a new SplitDict initialized from the `repeated_split_infos`.
|
[
"Returns",
"a",
"new",
"SplitDict",
"initialized",
"from",
"the",
"repeated_split_infos",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L550-L557
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/splits.py
|
SplitDict.to_proto
|
def to_proto(self):
"""Returns a list of SplitInfo protos that we have."""
# Return the proto.SplitInfo, sorted by name
return sorted((s.get_proto() for s in self.values()), key=lambda s: s.name)
|
python
|
def to_proto(self):
"""Returns a list of SplitInfo protos that we have."""
# Return the proto.SplitInfo, sorted by name
return sorted((s.get_proto() for s in self.values()), key=lambda s: s.name)
|
[
"def",
"to_proto",
"(",
"self",
")",
":",
"# Return the proto.SplitInfo, sorted by name",
"return",
"sorted",
"(",
"(",
"s",
".",
"get_proto",
"(",
")",
"for",
"s",
"in",
"self",
".",
"values",
"(",
")",
")",
",",
"key",
"=",
"lambda",
"s",
":",
"s",
".",
"name",
")"
] |
Returns a list of SplitInfo protos that we have.
|
[
"Returns",
"a",
"list",
"of",
"SplitInfo",
"protos",
"that",
"we",
"have",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L559-L562
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/squad.py
|
Squad._generate_examples
|
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
squad = json.load(f)
for article in squad["data"]:
if "title" in article:
title = article["title"].strip()
else:
title = ""
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
example = {
"title": title,
"context": context,
"question": question,
"id": id_,
"answer_starts": answer_starts,
"answers": answers,
}
yield {
"question": example["question"],
# TODO(b/121176753): return all the answers.
"first_answer": example["answers"][0],
"context": example["context"]
}
|
python
|
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
squad = json.load(f)
for article in squad["data"]:
if "title" in article:
title = article["title"].strip()
else:
title = ""
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
example = {
"title": title,
"context": context,
"question": question,
"id": id_,
"answer_starts": answer_starts,
"answers": answers,
}
yield {
"question": example["question"],
# TODO(b/121176753): return all the answers.
"first_answer": example["answers"][0],
"context": example["context"]
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"logging",
".",
"info",
"(",
"\"generating examples from = %s\"",
",",
"filepath",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
")",
"as",
"f",
":",
"squad",
"=",
"json",
".",
"load",
"(",
"f",
")",
"for",
"article",
"in",
"squad",
"[",
"\"data\"",
"]",
":",
"if",
"\"title\"",
"in",
"article",
":",
"title",
"=",
"article",
"[",
"\"title\"",
"]",
".",
"strip",
"(",
")",
"else",
":",
"title",
"=",
"\"\"",
"for",
"paragraph",
"in",
"article",
"[",
"\"paragraphs\"",
"]",
":",
"context",
"=",
"paragraph",
"[",
"\"context\"",
"]",
".",
"strip",
"(",
")",
"for",
"qa",
"in",
"paragraph",
"[",
"\"qas\"",
"]",
":",
"question",
"=",
"qa",
"[",
"\"question\"",
"]",
".",
"strip",
"(",
")",
"id_",
"=",
"qa",
"[",
"\"id\"",
"]",
"answer_starts",
"=",
"[",
"answer",
"[",
"\"answer_start\"",
"]",
"for",
"answer",
"in",
"qa",
"[",
"\"answers\"",
"]",
"]",
"answers",
"=",
"[",
"answer",
"[",
"\"text\"",
"]",
".",
"strip",
"(",
")",
"for",
"answer",
"in",
"qa",
"[",
"\"answers\"",
"]",
"]",
"# Features currently used are \"context\", \"question\", and \"answers\".",
"# Others are extracted here for the ease of future expansions.",
"example",
"=",
"{",
"\"title\"",
":",
"title",
",",
"\"context\"",
":",
"context",
",",
"\"question\"",
":",
"question",
",",
"\"id\"",
":",
"id_",
",",
"\"answer_starts\"",
":",
"answer_starts",
",",
"\"answers\"",
":",
"answers",
",",
"}",
"yield",
"{",
"\"question\"",
":",
"example",
"[",
"\"question\"",
"]",
",",
"# TODO(b/121176753): return all the answers.",
"\"first_answer\"",
":",
"example",
"[",
"\"answers\"",
"]",
"[",
"0",
"]",
",",
"\"context\"",
":",
"example",
"[",
"\"context\"",
"]",
"}"
] |
This function returns the examples in the raw (text) form.
|
[
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/squad.py#L164-L198
|
train
|
tensorflow/datasets
|
tensorflow_datasets/translate/para_crawl.py
|
ParaCrawl._generate_examples
|
def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
target_language = self.builder_config.target_language
with tf.io.gfile.GFile(data_file) as f:
for i, line in enumerate(f):
line_parts = line.strip().split("\t")
if len(line_parts) != 2:
raise ValueError(("Wrong data format in line {}. The line '{}' does "
"not have exactly one delimiter.").format(i, line))
source, target = line_parts[0].strip(), line_parts[1].strip()
yield {"en": source, target_language: target}
|
python
|
def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
target_language = self.builder_config.target_language
with tf.io.gfile.GFile(data_file) as f:
for i, line in enumerate(f):
line_parts = line.strip().split("\t")
if len(line_parts) != 2:
raise ValueError(("Wrong data format in line {}. The line '{}' does "
"not have exactly one delimiter.").format(i, line))
source, target = line_parts[0].strip(), line_parts[1].strip()
yield {"en": source, target_language: target}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"data_file",
")",
":",
"target_language",
"=",
"self",
".",
"builder_config",
".",
"target_language",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"data_file",
")",
"as",
"f",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"f",
")",
":",
"line_parts",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"if",
"len",
"(",
"line_parts",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"(",
"\"Wrong data format in line {}. The line '{}' does \"",
"\"not have exactly one delimiter.\"",
")",
".",
"format",
"(",
"i",
",",
"line",
")",
")",
"source",
",",
"target",
"=",
"line_parts",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"line_parts",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"yield",
"{",
"\"en\"",
":",
"source",
",",
"target_language",
":",
"target",
"}"
] |
This function returns the examples in the raw (text) form.
|
[
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/para_crawl.py#L148-L160
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/util.py
|
build_synchronize_decorator
|
def build_synchronize_decorator():
"""Returns a decorator which prevents concurrent calls to functions.
Usage:
synchronized = build_synchronize_decorator()
@synchronized
def read_value():
...
@synchronized
def write_value(x):
...
Returns:
make_threadsafe (fct): The decorator which lock all functions to which it
is applied under a same lock
"""
lock = threading.Lock()
def lock_decorator(fn):
@functools.wraps(fn)
def lock_decorated(*args, **kwargs):
with lock:
return fn(*args, **kwargs)
return lock_decorated
return lock_decorator
|
python
|
def build_synchronize_decorator():
"""Returns a decorator which prevents concurrent calls to functions.
Usage:
synchronized = build_synchronize_decorator()
@synchronized
def read_value():
...
@synchronized
def write_value(x):
...
Returns:
make_threadsafe (fct): The decorator which lock all functions to which it
is applied under a same lock
"""
lock = threading.Lock()
def lock_decorator(fn):
@functools.wraps(fn)
def lock_decorated(*args, **kwargs):
with lock:
return fn(*args, **kwargs)
return lock_decorated
return lock_decorator
|
[
"def",
"build_synchronize_decorator",
"(",
")",
":",
"lock",
"=",
"threading",
".",
"Lock",
"(",
")",
"def",
"lock_decorator",
"(",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"lock_decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"lock",
":",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"lock_decorated",
"return",
"lock_decorator"
] |
Returns a decorator which prevents concurrent calls to functions.
Usage:
synchronized = build_synchronize_decorator()
@synchronized
def read_value():
...
@synchronized
def write_value(x):
...
Returns:
make_threadsafe (fct): The decorator which lock all functions to which it
is applied under a same lock
|
[
"Returns",
"a",
"decorator",
"which",
"prevents",
"concurrent",
"calls",
"to",
"functions",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/util.py#L72-L101
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/util.py
|
get_file_name
|
def get_file_name(url):
"""Returns file name of file at given url."""
return os.path.basename(urllib.parse.urlparse(url).path) or 'unknown_name'
|
python
|
def get_file_name(url):
"""Returns file name of file at given url."""
return os.path.basename(urllib.parse.urlparse(url).path) or 'unknown_name'
|
[
"def",
"get_file_name",
"(",
"url",
")",
":",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
".",
"path",
")",
"or",
"'unknown_name'"
] |
Returns file name of file at given url.
|
[
"Returns",
"file",
"name",
"of",
"file",
"at",
"given",
"url",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/util.py#L104-L106
|
train
|
tensorflow/datasets
|
tensorflow_datasets/audio/librispeech.py
|
_make_builder_configs
|
def _make_builder_configs():
"""Make built-in Librispeech BuilderConfigs.
Uses 4 text encodings (plain text, bytes, subwords with 8k vocab, subwords
with 32k vocab) crossed with the data subsets (clean100, clean360, all).
Returns:
`list<tfds.audio.LibrispeechConfig>`
"""
text_encoder_configs = [
None,
tfds.features.text.TextEncoderConfig(
name="bytes", encoder=tfds.features.text.ByteTextEncoder()),
tfds.features.text.TextEncoderConfig(
name="subwords8k",
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=2**13),
tfds.features.text.TextEncoderConfig(
name="subwords32k",
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=2**15),
]
version = "0.1.0"
configs = []
for text_encoder_config in text_encoder_configs:
for data in _DATA_OPTIONS:
config = LibrispeechConfig(
version=version, text_encoder_config=text_encoder_config, data=data)
configs.append(config)
return configs
|
python
|
def _make_builder_configs():
"""Make built-in Librispeech BuilderConfigs.
Uses 4 text encodings (plain text, bytes, subwords with 8k vocab, subwords
with 32k vocab) crossed with the data subsets (clean100, clean360, all).
Returns:
`list<tfds.audio.LibrispeechConfig>`
"""
text_encoder_configs = [
None,
tfds.features.text.TextEncoderConfig(
name="bytes", encoder=tfds.features.text.ByteTextEncoder()),
tfds.features.text.TextEncoderConfig(
name="subwords8k",
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=2**13),
tfds.features.text.TextEncoderConfig(
name="subwords32k",
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=2**15),
]
version = "0.1.0"
configs = []
for text_encoder_config in text_encoder_configs:
for data in _DATA_OPTIONS:
config = LibrispeechConfig(
version=version, text_encoder_config=text_encoder_config, data=data)
configs.append(config)
return configs
|
[
"def",
"_make_builder_configs",
"(",
")",
":",
"text_encoder_configs",
"=",
"[",
"None",
",",
"tfds",
".",
"features",
".",
"text",
".",
"TextEncoderConfig",
"(",
"name",
"=",
"\"bytes\"",
",",
"encoder",
"=",
"tfds",
".",
"features",
".",
"text",
".",
"ByteTextEncoder",
"(",
")",
")",
",",
"tfds",
".",
"features",
".",
"text",
".",
"TextEncoderConfig",
"(",
"name",
"=",
"\"subwords8k\"",
",",
"encoder_cls",
"=",
"tfds",
".",
"features",
".",
"text",
".",
"SubwordTextEncoder",
",",
"vocab_size",
"=",
"2",
"**",
"13",
")",
",",
"tfds",
".",
"features",
".",
"text",
".",
"TextEncoderConfig",
"(",
"name",
"=",
"\"subwords32k\"",
",",
"encoder_cls",
"=",
"tfds",
".",
"features",
".",
"text",
".",
"SubwordTextEncoder",
",",
"vocab_size",
"=",
"2",
"**",
"15",
")",
",",
"]",
"version",
"=",
"\"0.1.0\"",
"configs",
"=",
"[",
"]",
"for",
"text_encoder_config",
"in",
"text_encoder_configs",
":",
"for",
"data",
"in",
"_DATA_OPTIONS",
":",
"config",
"=",
"LibrispeechConfig",
"(",
"version",
"=",
"version",
",",
"text_encoder_config",
"=",
"text_encoder_config",
",",
"data",
"=",
"data",
")",
"configs",
".",
"append",
"(",
"config",
")",
"return",
"configs"
] |
Make built-in Librispeech BuilderConfigs.
Uses 4 text encodings (plain text, bytes, subwords with 8k vocab, subwords
with 32k vocab) crossed with the data subsets (clean100, clean360, all).
Returns:
`list<tfds.audio.LibrispeechConfig>`
|
[
"Make",
"built",
"-",
"in",
"Librispeech",
"BuilderConfigs",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/audio/librispeech.py#L130-L159
|
train
|
tensorflow/datasets
|
tensorflow_datasets/audio/librispeech.py
|
_walk_librispeech_dir
|
def _walk_librispeech_dir(directory):
"""Walk a Librispeech directory and yield examples."""
directory = os.path.join(directory, "LibriSpeech")
for path, _, files in tf.io.gfile.walk(directory):
if not files:
continue
transcript_file = [f for f in files if f.endswith(".txt")]
if not transcript_file:
continue
assert len(transcript_file) == 1
transcript_file, = transcript_file
transcripts = {}
with tf.io.gfile.GFile(os.path.join(path, transcript_file)) as f:
for line in f:
line = line.strip()
key, transcript = line.split(" ", 1)
transcripts[key] = transcript
audio_files = [f for f in files if not f.endswith(".txt")]
for audio_file in audio_files:
assert audio_file.endswith(".flac")
key = audio_file[:-len(".flac")]
transcript = transcripts[key]
speaker_id, chapter_id = [int(el) for el in key.split("-")[:2]]
yield LibrispeechExample(
speaker_id=speaker_id,
chapter_id=chapter_id,
audio_file=os.path.join(path, audio_file),
transcript=transcript)
|
python
|
def _walk_librispeech_dir(directory):
"""Walk a Librispeech directory and yield examples."""
directory = os.path.join(directory, "LibriSpeech")
for path, _, files in tf.io.gfile.walk(directory):
if not files:
continue
transcript_file = [f for f in files if f.endswith(".txt")]
if not transcript_file:
continue
assert len(transcript_file) == 1
transcript_file, = transcript_file
transcripts = {}
with tf.io.gfile.GFile(os.path.join(path, transcript_file)) as f:
for line in f:
line = line.strip()
key, transcript = line.split(" ", 1)
transcripts[key] = transcript
audio_files = [f for f in files if not f.endswith(".txt")]
for audio_file in audio_files:
assert audio_file.endswith(".flac")
key = audio_file[:-len(".flac")]
transcript = transcripts[key]
speaker_id, chapter_id = [int(el) for el in key.split("-")[:2]]
yield LibrispeechExample(
speaker_id=speaker_id,
chapter_id=chapter_id,
audio_file=os.path.join(path, audio_file),
transcript=transcript)
|
[
"def",
"_walk_librispeech_dir",
"(",
"directory",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"LibriSpeech\"",
")",
"for",
"path",
",",
"_",
",",
"files",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"walk",
"(",
"directory",
")",
":",
"if",
"not",
"files",
":",
"continue",
"transcript_file",
"=",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"f",
".",
"endswith",
"(",
"\".txt\"",
")",
"]",
"if",
"not",
"transcript_file",
":",
"continue",
"assert",
"len",
"(",
"transcript_file",
")",
"==",
"1",
"transcript_file",
",",
"=",
"transcript_file",
"transcripts",
"=",
"{",
"}",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"transcript_file",
")",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"key",
",",
"transcript",
"=",
"line",
".",
"split",
"(",
"\" \"",
",",
"1",
")",
"transcripts",
"[",
"key",
"]",
"=",
"transcript",
"audio_files",
"=",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"not",
"f",
".",
"endswith",
"(",
"\".txt\"",
")",
"]",
"for",
"audio_file",
"in",
"audio_files",
":",
"assert",
"audio_file",
".",
"endswith",
"(",
"\".flac\"",
")",
"key",
"=",
"audio_file",
"[",
":",
"-",
"len",
"(",
"\".flac\"",
")",
"]",
"transcript",
"=",
"transcripts",
"[",
"key",
"]",
"speaker_id",
",",
"chapter_id",
"=",
"[",
"int",
"(",
"el",
")",
"for",
"el",
"in",
"key",
".",
"split",
"(",
"\"-\"",
")",
"[",
":",
"2",
"]",
"]",
"yield",
"LibrispeechExample",
"(",
"speaker_id",
"=",
"speaker_id",
",",
"chapter_id",
"=",
"chapter_id",
",",
"audio_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"audio_file",
")",
",",
"transcript",
"=",
"transcript",
")"
] |
Walk a Librispeech directory and yield examples.
|
[
"Walk",
"a",
"Librispeech",
"directory",
"and",
"yield",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/audio/librispeech.py#L237-L265
|
train
|
tensorflow/datasets
|
tensorflow_datasets/audio/librispeech.py
|
LibrispeechConfig.download_urls
|
def download_urls(self):
"""Returns download urls for this config."""
urls = {
tfds.Split.TRAIN: ["train_clean100"],
tfds.Split.VALIDATION: ["dev_clean"],
tfds.Split.TEST: ["test_clean"],
}
if self.data in ["all", "clean360"]:
urls[tfds.Split.TRAIN].append("train_clean360")
if self.data == "all":
urls[tfds.Split.TRAIN].extend(["train_clean360", "train_other500"])
urls[tfds.Split.VALIDATION].append("dev_other")
urls[tfds.Split.TEST].append("test_other")
urls = {
split: [_DL_URLS[name] for name in names
] for split, names in urls.items()
}
return urls
|
python
|
def download_urls(self):
"""Returns download urls for this config."""
urls = {
tfds.Split.TRAIN: ["train_clean100"],
tfds.Split.VALIDATION: ["dev_clean"],
tfds.Split.TEST: ["test_clean"],
}
if self.data in ["all", "clean360"]:
urls[tfds.Split.TRAIN].append("train_clean360")
if self.data == "all":
urls[tfds.Split.TRAIN].extend(["train_clean360", "train_other500"])
urls[tfds.Split.VALIDATION].append("dev_other")
urls[tfds.Split.TEST].append("test_other")
urls = {
split: [_DL_URLS[name] for name in names
] for split, names in urls.items()
}
return urls
|
[
"def",
"download_urls",
"(",
"self",
")",
":",
"urls",
"=",
"{",
"tfds",
".",
"Split",
".",
"TRAIN",
":",
"[",
"\"train_clean100\"",
"]",
",",
"tfds",
".",
"Split",
".",
"VALIDATION",
":",
"[",
"\"dev_clean\"",
"]",
",",
"tfds",
".",
"Split",
".",
"TEST",
":",
"[",
"\"test_clean\"",
"]",
",",
"}",
"if",
"self",
".",
"data",
"in",
"[",
"\"all\"",
",",
"\"clean360\"",
"]",
":",
"urls",
"[",
"tfds",
".",
"Split",
".",
"TRAIN",
"]",
".",
"append",
"(",
"\"train_clean360\"",
")",
"if",
"self",
".",
"data",
"==",
"\"all\"",
":",
"urls",
"[",
"tfds",
".",
"Split",
".",
"TRAIN",
"]",
".",
"extend",
"(",
"[",
"\"train_clean360\"",
",",
"\"train_other500\"",
"]",
")",
"urls",
"[",
"tfds",
".",
"Split",
".",
"VALIDATION",
"]",
".",
"append",
"(",
"\"dev_other\"",
")",
"urls",
"[",
"tfds",
".",
"Split",
".",
"TEST",
"]",
".",
"append",
"(",
"\"test_other\"",
")",
"urls",
"=",
"{",
"split",
":",
"[",
"_DL_URLS",
"[",
"name",
"]",
"for",
"name",
"in",
"names",
"]",
"for",
"split",
",",
"names",
"in",
"urls",
".",
"items",
"(",
")",
"}",
"return",
"urls"
] |
Returns download urls for this config.
|
[
"Returns",
"download",
"urls",
"for",
"this",
"config",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/audio/librispeech.py#L109-L127
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/class_label_feature.py
|
ClassLabel.str2int
|
def str2int(self, str_value):
"""Conversion class name string => integer."""
str_value = tf.compat.as_text(str_value)
if self._str2int:
return self._str2int[str_value]
# No names provided, try to integerize
failed_parse = False
try:
int_value = int(str_value)
except ValueError:
failed_parse = True
if failed_parse or not 0 <= int_value < self._num_classes:
raise ValueError("Invalid string class label %s" % str_value)
return int_value
|
python
|
def str2int(self, str_value):
"""Conversion class name string => integer."""
str_value = tf.compat.as_text(str_value)
if self._str2int:
return self._str2int[str_value]
# No names provided, try to integerize
failed_parse = False
try:
int_value = int(str_value)
except ValueError:
failed_parse = True
if failed_parse or not 0 <= int_value < self._num_classes:
raise ValueError("Invalid string class label %s" % str_value)
return int_value
|
[
"def",
"str2int",
"(",
"self",
",",
"str_value",
")",
":",
"str_value",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"str_value",
")",
"if",
"self",
".",
"_str2int",
":",
"return",
"self",
".",
"_str2int",
"[",
"str_value",
"]",
"# No names provided, try to integerize",
"failed_parse",
"=",
"False",
"try",
":",
"int_value",
"=",
"int",
"(",
"str_value",
")",
"except",
"ValueError",
":",
"failed_parse",
"=",
"True",
"if",
"failed_parse",
"or",
"not",
"0",
"<=",
"int_value",
"<",
"self",
".",
"_num_classes",
":",
"raise",
"ValueError",
"(",
"\"Invalid string class label %s\"",
"%",
"str_value",
")",
"return",
"int_value"
] |
Conversion class name string => integer.
|
[
"Conversion",
"class",
"name",
"string",
"=",
">",
"integer",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/class_label_feature.py#L99-L113
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/class_label_feature.py
|
ClassLabel.int2str
|
def int2str(self, int_value):
"""Conversion integer => class name string."""
if self._int2str:
# Maybe should support batched np array/eager tensors, to allow things
# like
# out_ids = model(inputs)
# labels = cifar10.info.features['label'].int2str(out_ids)
return self._int2str[int_value]
# No names provided, return str(int)
if not 0 <= int_value < self._num_classes:
raise ValueError("Invalid integer class label %d" % int_value)
return tf.compat.as_text(str(int_value))
|
python
|
def int2str(self, int_value):
"""Conversion integer => class name string."""
if self._int2str:
# Maybe should support batched np array/eager tensors, to allow things
# like
# out_ids = model(inputs)
# labels = cifar10.info.features['label'].int2str(out_ids)
return self._int2str[int_value]
# No names provided, return str(int)
if not 0 <= int_value < self._num_classes:
raise ValueError("Invalid integer class label %d" % int_value)
return tf.compat.as_text(str(int_value))
|
[
"def",
"int2str",
"(",
"self",
",",
"int_value",
")",
":",
"if",
"self",
".",
"_int2str",
":",
"# Maybe should support batched np array/eager tensors, to allow things",
"# like",
"# out_ids = model(inputs)",
"# labels = cifar10.info.features['label'].int2str(out_ids)",
"return",
"self",
".",
"_int2str",
"[",
"int_value",
"]",
"# No names provided, return str(int)",
"if",
"not",
"0",
"<=",
"int_value",
"<",
"self",
".",
"_num_classes",
":",
"raise",
"ValueError",
"(",
"\"Invalid integer class label %d\"",
"%",
"int_value",
")",
"return",
"tf",
".",
"compat",
".",
"as_text",
"(",
"str",
"(",
"int_value",
")",
")"
] |
Conversion integer => class name string.
|
[
"Conversion",
"integer",
"=",
">",
"class",
"name",
"string",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/class_label_feature.py#L115-L127
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/class_label_feature.py
|
ClassLabel.save_metadata
|
def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Save names if defined
if self._str2int is not None:
names_filepath = _get_names_filepath(data_dir, feature_name)
_write_names_to_file(names_filepath, self.names)
|
python
|
def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Save names if defined
if self._str2int is not None:
names_filepath = _get_names_filepath(data_dir, feature_name)
_write_names_to_file(names_filepath, self.names)
|
[
"def",
"save_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"# Save names if defined",
"if",
"self",
".",
"_str2int",
"is",
"not",
"None",
":",
"names_filepath",
"=",
"_get_names_filepath",
"(",
"data_dir",
",",
"feature_name",
")",
"_write_names_to_file",
"(",
"names_filepath",
",",
"self",
".",
"names",
")"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/class_label_feature.py#L152-L157
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/class_label_feature.py
|
ClassLabel.load_metadata
|
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
names_filepath = _get_names_filepath(data_dir, feature_name)
if tf.io.gfile.exists(names_filepath):
self.names = _load_names_from_file(names_filepath)
|
python
|
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
names_filepath = _get_names_filepath(data_dir, feature_name)
if tf.io.gfile.exists(names_filepath):
self.names = _load_names_from_file(names_filepath)
|
[
"def",
"load_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"# Restore names if defined",
"names_filepath",
"=",
"_get_names_filepath",
"(",
"data_dir",
",",
"feature_name",
")",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"names_filepath",
")",
":",
"self",
".",
"names",
"=",
"_load_names_from_file",
"(",
"names_filepath",
")"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/class_label_feature.py#L159-L164
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
_token_counts_from_generator
|
def _token_counts_from_generator(generator, max_chars, reserved_tokens):
"""Builds token counts from generator."""
reserved_tokens = list(reserved_tokens) + [_UNDERSCORE_REPLACEMENT]
tokenizer = text_encoder.Tokenizer(
alphanum_only=False, reserved_tokens=reserved_tokens)
num_chars = 0
token_counts = collections.defaultdict(int)
for s in generator:
s = tf.compat.as_text(s)
if max_chars and (num_chars + len(s)) >= max_chars:
s = s[:(max_chars - num_chars)]
tokens = tokenizer.tokenize(s)
tokens = _prepare_tokens_for_encode(tokens)
for t in tokens:
token_counts[t] += 1
if max_chars:
num_chars += len(s)
if num_chars > max_chars:
break
return token_counts
|
python
|
def _token_counts_from_generator(generator, max_chars, reserved_tokens):
"""Builds token counts from generator."""
reserved_tokens = list(reserved_tokens) + [_UNDERSCORE_REPLACEMENT]
tokenizer = text_encoder.Tokenizer(
alphanum_only=False, reserved_tokens=reserved_tokens)
num_chars = 0
token_counts = collections.defaultdict(int)
for s in generator:
s = tf.compat.as_text(s)
if max_chars and (num_chars + len(s)) >= max_chars:
s = s[:(max_chars - num_chars)]
tokens = tokenizer.tokenize(s)
tokens = _prepare_tokens_for_encode(tokens)
for t in tokens:
token_counts[t] += 1
if max_chars:
num_chars += len(s)
if num_chars > max_chars:
break
return token_counts
|
[
"def",
"_token_counts_from_generator",
"(",
"generator",
",",
"max_chars",
",",
"reserved_tokens",
")",
":",
"reserved_tokens",
"=",
"list",
"(",
"reserved_tokens",
")",
"+",
"[",
"_UNDERSCORE_REPLACEMENT",
"]",
"tokenizer",
"=",
"text_encoder",
".",
"Tokenizer",
"(",
"alphanum_only",
"=",
"False",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"num_chars",
"=",
"0",
"token_counts",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"s",
"in",
"generator",
":",
"s",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"s",
")",
"if",
"max_chars",
"and",
"(",
"num_chars",
"+",
"len",
"(",
"s",
")",
")",
">=",
"max_chars",
":",
"s",
"=",
"s",
"[",
":",
"(",
"max_chars",
"-",
"num_chars",
")",
"]",
"tokens",
"=",
"tokenizer",
".",
"tokenize",
"(",
"s",
")",
"tokens",
"=",
"_prepare_tokens_for_encode",
"(",
"tokens",
")",
"for",
"t",
"in",
"tokens",
":",
"token_counts",
"[",
"t",
"]",
"+=",
"1",
"if",
"max_chars",
":",
"num_chars",
"+=",
"len",
"(",
"s",
")",
"if",
"num_chars",
">",
"max_chars",
":",
"break",
"return",
"token_counts"
] |
Builds token counts from generator.
|
[
"Builds",
"token",
"counts",
"from",
"generator",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L388-L407
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
_validate_build_arguments
|
def _validate_build_arguments(max_subword_length, reserved_tokens,
target_vocab_size):
"""Validate arguments for SubwordTextEncoder.build_from_corpus."""
if max_subword_length <= 0:
raise ValueError(
"max_subword_length must be > 0. Note that memory and compute for "
"building the vocabulary scale quadratically in the length of the "
"longest token.")
for t in reserved_tokens:
if t.endswith("_") or not text_encoder.is_mixed_alphanum(t):
raise ValueError(
"Reserved tokens must not end with _ and they must contain a mix "
"of alphanumeric and non-alphanumeric characters. For example, "
"'<EOS>'.")
# Minimum vocab size = bytes + pad + 1
minimum_vocab_size = text_encoder.NUM_BYTES + 1 + 1
if target_vocab_size < minimum_vocab_size:
raise ValueError("target_vocab_size must be >= %d. Got %d" %
(minimum_vocab_size, target_vocab_size))
|
python
|
def _validate_build_arguments(max_subword_length, reserved_tokens,
target_vocab_size):
"""Validate arguments for SubwordTextEncoder.build_from_corpus."""
if max_subword_length <= 0:
raise ValueError(
"max_subword_length must be > 0. Note that memory and compute for "
"building the vocabulary scale quadratically in the length of the "
"longest token.")
for t in reserved_tokens:
if t.endswith("_") or not text_encoder.is_mixed_alphanum(t):
raise ValueError(
"Reserved tokens must not end with _ and they must contain a mix "
"of alphanumeric and non-alphanumeric characters. For example, "
"'<EOS>'.")
# Minimum vocab size = bytes + pad + 1
minimum_vocab_size = text_encoder.NUM_BYTES + 1 + 1
if target_vocab_size < minimum_vocab_size:
raise ValueError("target_vocab_size must be >= %d. Got %d" %
(minimum_vocab_size, target_vocab_size))
|
[
"def",
"_validate_build_arguments",
"(",
"max_subword_length",
",",
"reserved_tokens",
",",
"target_vocab_size",
")",
":",
"if",
"max_subword_length",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"max_subword_length must be > 0. Note that memory and compute for \"",
"\"building the vocabulary scale quadratically in the length of the \"",
"\"longest token.\"",
")",
"for",
"t",
"in",
"reserved_tokens",
":",
"if",
"t",
".",
"endswith",
"(",
"\"_\"",
")",
"or",
"not",
"text_encoder",
".",
"is_mixed_alphanum",
"(",
"t",
")",
":",
"raise",
"ValueError",
"(",
"\"Reserved tokens must not end with _ and they must contain a mix \"",
"\"of alphanumeric and non-alphanumeric characters. For example, \"",
"\"'<EOS>'.\"",
")",
"# Minimum vocab size = bytes + pad + 1",
"minimum_vocab_size",
"=",
"text_encoder",
".",
"NUM_BYTES",
"+",
"1",
"+",
"1",
"if",
"target_vocab_size",
"<",
"minimum_vocab_size",
":",
"raise",
"ValueError",
"(",
"\"target_vocab_size must be >= %d. Got %d\"",
"%",
"(",
"minimum_vocab_size",
",",
"target_vocab_size",
")",
")"
] |
Validate arguments for SubwordTextEncoder.build_from_corpus.
|
[
"Validate",
"arguments",
"for",
"SubwordTextEncoder",
".",
"build_from_corpus",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L410-L428
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
_prepare_tokens_for_encode
|
def _prepare_tokens_for_encode(tokens):
"""Prepare tokens for encoding.
Tokens followed by a single space have "_" appended and the single space token
is dropped.
If a token is _UNDERSCORE_REPLACEMENT, it is broken up into 2 tokens.
Args:
tokens: `list<str>`, tokens to prepare.
Returns:
`list<str>` prepared tokens.
"""
prepared_tokens = []
def _prepare_token(t, next_t):
skip_next = False
t = _escape(t)
# If next token is a single space, add _ suffix to token and skip the
# empty space.
if next_t == " ":
t += "_"
skip_next = True
return t, skip_next
next_tokens = tokens[1:] + [None]
skip_single_token = False
for token, next_token in zip(tokens, next_tokens):
if skip_single_token:
skip_single_token = False
continue
# If the user-supplied string contains the underscore replacement string,
# break it into 2 tokens and encode those separately.
if token == _UNDERSCORE_REPLACEMENT:
t1, t2 = _UNDERSCORE_REPLACEMENT[:2], _UNDERSCORE_REPLACEMENT[2:]
t1, _ = _prepare_token(t1, None)
t2, _ = _prepare_token(t2, next_token)
prepared_tokens.append(t1)
prepared_tokens.append(t2)
continue
token, skip_single_token = _prepare_token(token, next_token)
prepared_tokens.append(token)
return prepared_tokens
|
python
|
def _prepare_tokens_for_encode(tokens):
"""Prepare tokens for encoding.
Tokens followed by a single space have "_" appended and the single space token
is dropped.
If a token is _UNDERSCORE_REPLACEMENT, it is broken up into 2 tokens.
Args:
tokens: `list<str>`, tokens to prepare.
Returns:
`list<str>` prepared tokens.
"""
prepared_tokens = []
def _prepare_token(t, next_t):
skip_next = False
t = _escape(t)
# If next token is a single space, add _ suffix to token and skip the
# empty space.
if next_t == " ":
t += "_"
skip_next = True
return t, skip_next
next_tokens = tokens[1:] + [None]
skip_single_token = False
for token, next_token in zip(tokens, next_tokens):
if skip_single_token:
skip_single_token = False
continue
# If the user-supplied string contains the underscore replacement string,
# break it into 2 tokens and encode those separately.
if token == _UNDERSCORE_REPLACEMENT:
t1, t2 = _UNDERSCORE_REPLACEMENT[:2], _UNDERSCORE_REPLACEMENT[2:]
t1, _ = _prepare_token(t1, None)
t2, _ = _prepare_token(t2, next_token)
prepared_tokens.append(t1)
prepared_tokens.append(t2)
continue
token, skip_single_token = _prepare_token(token, next_token)
prepared_tokens.append(token)
return prepared_tokens
|
[
"def",
"_prepare_tokens_for_encode",
"(",
"tokens",
")",
":",
"prepared_tokens",
"=",
"[",
"]",
"def",
"_prepare_token",
"(",
"t",
",",
"next_t",
")",
":",
"skip_next",
"=",
"False",
"t",
"=",
"_escape",
"(",
"t",
")",
"# If next token is a single space, add _ suffix to token and skip the",
"# empty space.",
"if",
"next_t",
"==",
"\" \"",
":",
"t",
"+=",
"\"_\"",
"skip_next",
"=",
"True",
"return",
"t",
",",
"skip_next",
"next_tokens",
"=",
"tokens",
"[",
"1",
":",
"]",
"+",
"[",
"None",
"]",
"skip_single_token",
"=",
"False",
"for",
"token",
",",
"next_token",
"in",
"zip",
"(",
"tokens",
",",
"next_tokens",
")",
":",
"if",
"skip_single_token",
":",
"skip_single_token",
"=",
"False",
"continue",
"# If the user-supplied string contains the underscore replacement string,",
"# break it into 2 tokens and encode those separately.",
"if",
"token",
"==",
"_UNDERSCORE_REPLACEMENT",
":",
"t1",
",",
"t2",
"=",
"_UNDERSCORE_REPLACEMENT",
"[",
":",
"2",
"]",
",",
"_UNDERSCORE_REPLACEMENT",
"[",
"2",
":",
"]",
"t1",
",",
"_",
"=",
"_prepare_token",
"(",
"t1",
",",
"None",
")",
"t2",
",",
"_",
"=",
"_prepare_token",
"(",
"t2",
",",
"next_token",
")",
"prepared_tokens",
".",
"append",
"(",
"t1",
")",
"prepared_tokens",
".",
"append",
"(",
"t2",
")",
"continue",
"token",
",",
"skip_single_token",
"=",
"_prepare_token",
"(",
"token",
",",
"next_token",
")",
"prepared_tokens",
".",
"append",
"(",
"token",
")",
"return",
"prepared_tokens"
] |
Prepare tokens for encoding.
Tokens followed by a single space have "_" appended and the single space token
is dropped.
If a token is _UNDERSCORE_REPLACEMENT, it is broken up into 2 tokens.
Args:
tokens: `list<str>`, tokens to prepare.
Returns:
`list<str>` prepared tokens.
|
[
"Prepare",
"tokens",
"for",
"encoding",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L451-L496
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder.encode
|
def encode(self, s):
"""Encodes text into a list of integers."""
s = tf.compat.as_text(s)
tokens = self._tokenizer.tokenize(s)
tokens = _prepare_tokens_for_encode(tokens)
ids = []
for token in tokens:
ids.extend(self._token_to_ids(token))
return text_encoder.pad_incr(ids)
|
python
|
def encode(self, s):
"""Encodes text into a list of integers."""
s = tf.compat.as_text(s)
tokens = self._tokenizer.tokenize(s)
tokens = _prepare_tokens_for_encode(tokens)
ids = []
for token in tokens:
ids.extend(self._token_to_ids(token))
return text_encoder.pad_incr(ids)
|
[
"def",
"encode",
"(",
"self",
",",
"s",
")",
":",
"s",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"s",
")",
"tokens",
"=",
"self",
".",
"_tokenizer",
".",
"tokenize",
"(",
"s",
")",
"tokens",
"=",
"_prepare_tokens_for_encode",
"(",
"tokens",
")",
"ids",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"ids",
".",
"extend",
"(",
"self",
".",
"_token_to_ids",
"(",
"token",
")",
")",
"return",
"text_encoder",
".",
"pad_incr",
"(",
"ids",
")"
] |
Encodes text into a list of integers.
|
[
"Encodes",
"text",
"into",
"a",
"list",
"of",
"integers",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L80-L88
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder.decode
|
def decode(self, ids):
"""Decodes a list of integers into text."""
ids = text_encoder.pad_decr(ids)
subword_ids = ids
del ids
subwords = []
# Some ids correspond to bytes. Because unicode characters are composed of
# possibly multiple bytes, we attempt to decode contiguous lists of bytes
# all together. Invalid byte sequences are replaced with the unicode
# replacement (i.e. unknown) character U+FFFD.
prev_bytes = []
def consume_prev_bytes():
if prev_bytes:
bytestr = b"".join(prev_bytes)
bytes_text = bytestr.decode("utf-8", "replace")
subwords.append(bytes_text)
return []
for subword_id in subword_ids:
subword = self._id_to_subword(subword_id)
if isinstance(subword, six.binary_type):
# Byte-encoded
prev_bytes.append(subword)
else:
# If there were bytes previously, convert to unicode.
prev_bytes = consume_prev_bytes()
trimmed, add_space = _trim_underscore_and_tell(subword)
subwords.append(trimmed)
if add_space:
subwords.append(" ")
# If there were trailing bytes, convert to unicode.
prev_bytes = consume_prev_bytes()
return tf.compat.as_text("".join(subwords))
|
python
|
def decode(self, ids):
"""Decodes a list of integers into text."""
ids = text_encoder.pad_decr(ids)
subword_ids = ids
del ids
subwords = []
# Some ids correspond to bytes. Because unicode characters are composed of
# possibly multiple bytes, we attempt to decode contiguous lists of bytes
# all together. Invalid byte sequences are replaced with the unicode
# replacement (i.e. unknown) character U+FFFD.
prev_bytes = []
def consume_prev_bytes():
if prev_bytes:
bytestr = b"".join(prev_bytes)
bytes_text = bytestr.decode("utf-8", "replace")
subwords.append(bytes_text)
return []
for subword_id in subword_ids:
subword = self._id_to_subword(subword_id)
if isinstance(subword, six.binary_type):
# Byte-encoded
prev_bytes.append(subword)
else:
# If there were bytes previously, convert to unicode.
prev_bytes = consume_prev_bytes()
trimmed, add_space = _trim_underscore_and_tell(subword)
subwords.append(trimmed)
if add_space:
subwords.append(" ")
# If there were trailing bytes, convert to unicode.
prev_bytes = consume_prev_bytes()
return tf.compat.as_text("".join(subwords))
|
[
"def",
"decode",
"(",
"self",
",",
"ids",
")",
":",
"ids",
"=",
"text_encoder",
".",
"pad_decr",
"(",
"ids",
")",
"subword_ids",
"=",
"ids",
"del",
"ids",
"subwords",
"=",
"[",
"]",
"# Some ids correspond to bytes. Because unicode characters are composed of",
"# possibly multiple bytes, we attempt to decode contiguous lists of bytes",
"# all together. Invalid byte sequences are replaced with the unicode",
"# replacement (i.e. unknown) character U+FFFD.",
"prev_bytes",
"=",
"[",
"]",
"def",
"consume_prev_bytes",
"(",
")",
":",
"if",
"prev_bytes",
":",
"bytestr",
"=",
"b\"\"",
".",
"join",
"(",
"prev_bytes",
")",
"bytes_text",
"=",
"bytestr",
".",
"decode",
"(",
"\"utf-8\"",
",",
"\"replace\"",
")",
"subwords",
".",
"append",
"(",
"bytes_text",
")",
"return",
"[",
"]",
"for",
"subword_id",
"in",
"subword_ids",
":",
"subword",
"=",
"self",
".",
"_id_to_subword",
"(",
"subword_id",
")",
"if",
"isinstance",
"(",
"subword",
",",
"six",
".",
"binary_type",
")",
":",
"# Byte-encoded",
"prev_bytes",
".",
"append",
"(",
"subword",
")",
"else",
":",
"# If there were bytes previously, convert to unicode.",
"prev_bytes",
"=",
"consume_prev_bytes",
"(",
")",
"trimmed",
",",
"add_space",
"=",
"_trim_underscore_and_tell",
"(",
"subword",
")",
"subwords",
".",
"append",
"(",
"trimmed",
")",
"if",
"add_space",
":",
"subwords",
".",
"append",
"(",
"\" \"",
")",
"# If there were trailing bytes, convert to unicode.",
"prev_bytes",
"=",
"consume_prev_bytes",
"(",
")",
"return",
"tf",
".",
"compat",
".",
"as_text",
"(",
"\"\"",
".",
"join",
"(",
"subwords",
")",
")"
] |
Decodes a list of integers into text.
|
[
"Decodes",
"a",
"list",
"of",
"integers",
"into",
"text",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L90-L126
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder._token_to_ids
|
def _token_to_ids(self, token):
"""Convert a single token to a list of integer ids."""
# Check cache
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._token_to_ids_cache[cache_location]
if cache_key == token:
return cache_value
subwords = self._token_to_subwords(token)
ids = []
for subword in subwords:
if subword == _UNDERSCORE_REPLACEMENT:
ids.append(len(self._subwords) + ord("_"))
continue
subword_id = self._subword_to_id.get(subword)
if subword_id is None:
# Byte-encode
ids.extend(self._byte_encode(subword))
else:
ids.append(subword_id)
# Update cache
self._token_to_ids_cache[cache_location] = (token, ids)
return ids
|
python
|
def _token_to_ids(self, token):
"""Convert a single token to a list of integer ids."""
# Check cache
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._token_to_ids_cache[cache_location]
if cache_key == token:
return cache_value
subwords = self._token_to_subwords(token)
ids = []
for subword in subwords:
if subword == _UNDERSCORE_REPLACEMENT:
ids.append(len(self._subwords) + ord("_"))
continue
subword_id = self._subword_to_id.get(subword)
if subword_id is None:
# Byte-encode
ids.extend(self._byte_encode(subword))
else:
ids.append(subword_id)
# Update cache
self._token_to_ids_cache[cache_location] = (token, ids)
return ids
|
[
"def",
"_token_to_ids",
"(",
"self",
",",
"token",
")",
":",
"# Check cache",
"cache_location",
"=",
"hash",
"(",
"token",
")",
"%",
"self",
".",
"_cache_size",
"cache_key",
",",
"cache_value",
"=",
"self",
".",
"_token_to_ids_cache",
"[",
"cache_location",
"]",
"if",
"cache_key",
"==",
"token",
":",
"return",
"cache_value",
"subwords",
"=",
"self",
".",
"_token_to_subwords",
"(",
"token",
")",
"ids",
"=",
"[",
"]",
"for",
"subword",
"in",
"subwords",
":",
"if",
"subword",
"==",
"_UNDERSCORE_REPLACEMENT",
":",
"ids",
".",
"append",
"(",
"len",
"(",
"self",
".",
"_subwords",
")",
"+",
"ord",
"(",
"\"_\"",
")",
")",
"continue",
"subword_id",
"=",
"self",
".",
"_subword_to_id",
".",
"get",
"(",
"subword",
")",
"if",
"subword_id",
"is",
"None",
":",
"# Byte-encode",
"ids",
".",
"extend",
"(",
"self",
".",
"_byte_encode",
"(",
"subword",
")",
")",
"else",
":",
"ids",
".",
"append",
"(",
"subword_id",
")",
"# Update cache",
"self",
".",
"_token_to_ids_cache",
"[",
"cache_location",
"]",
"=",
"(",
"token",
",",
"ids",
")",
"return",
"ids"
] |
Convert a single token to a list of integer ids.
|
[
"Convert",
"a",
"single",
"token",
"to",
"a",
"list",
"of",
"integer",
"ids",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L140-L164
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder._byte_encode
|
def _byte_encode(self, token):
"""Encode a single token byte-wise into integer ids."""
# Vocab ids for all bytes follow ids for the subwords
offset = len(self._subwords)
if token == "_":
return [len(self._subwords) + ord(" ")]
return [i + offset for i in list(bytearray(tf.compat.as_bytes(token)))]
|
python
|
def _byte_encode(self, token):
"""Encode a single token byte-wise into integer ids."""
# Vocab ids for all bytes follow ids for the subwords
offset = len(self._subwords)
if token == "_":
return [len(self._subwords) + ord(" ")]
return [i + offset for i in list(bytearray(tf.compat.as_bytes(token)))]
|
[
"def",
"_byte_encode",
"(",
"self",
",",
"token",
")",
":",
"# Vocab ids for all bytes follow ids for the subwords",
"offset",
"=",
"len",
"(",
"self",
".",
"_subwords",
")",
"if",
"token",
"==",
"\"_\"",
":",
"return",
"[",
"len",
"(",
"self",
".",
"_subwords",
")",
"+",
"ord",
"(",
"\" \"",
")",
"]",
"return",
"[",
"i",
"+",
"offset",
"for",
"i",
"in",
"list",
"(",
"bytearray",
"(",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"token",
")",
")",
")",
"]"
] |
Encode a single token byte-wise into integer ids.
|
[
"Encode",
"a",
"single",
"token",
"byte",
"-",
"wise",
"into",
"integer",
"ids",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L166-L172
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder._id_to_subword
|
def _id_to_subword(self, subword_id):
"""Converts a subword integer ID to a subword string."""
if subword_id < 0 or subword_id >= (self.vocab_size - 1):
raise ValueError("Received id %d which is invalid. Ids must be within "
"[0, %d)." % (subword_id + 1, self.vocab_size))
if 0 <= subword_id < len(self._subwords):
# Subword
return self._subwords[subword_id]
else:
# Byte
offset = len(self._subwords)
subword_id -= offset
bytestr = bytes(bytearray([subword_id]))
return bytestr
|
python
|
def _id_to_subword(self, subword_id):
"""Converts a subword integer ID to a subword string."""
if subword_id < 0 or subword_id >= (self.vocab_size - 1):
raise ValueError("Received id %d which is invalid. Ids must be within "
"[0, %d)." % (subword_id + 1, self.vocab_size))
if 0 <= subword_id < len(self._subwords):
# Subword
return self._subwords[subword_id]
else:
# Byte
offset = len(self._subwords)
subword_id -= offset
bytestr = bytes(bytearray([subword_id]))
return bytestr
|
[
"def",
"_id_to_subword",
"(",
"self",
",",
"subword_id",
")",
":",
"if",
"subword_id",
"<",
"0",
"or",
"subword_id",
">=",
"(",
"self",
".",
"vocab_size",
"-",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"Received id %d which is invalid. Ids must be within \"",
"\"[0, %d).\"",
"%",
"(",
"subword_id",
"+",
"1",
",",
"self",
".",
"vocab_size",
")",
")",
"if",
"0",
"<=",
"subword_id",
"<",
"len",
"(",
"self",
".",
"_subwords",
")",
":",
"# Subword",
"return",
"self",
".",
"_subwords",
"[",
"subword_id",
"]",
"else",
":",
"# Byte",
"offset",
"=",
"len",
"(",
"self",
".",
"_subwords",
")",
"subword_id",
"-=",
"offset",
"bytestr",
"=",
"bytes",
"(",
"bytearray",
"(",
"[",
"subword_id",
"]",
")",
")",
"return",
"bytestr"
] |
Converts a subword integer ID to a subword string.
|
[
"Converts",
"a",
"subword",
"integer",
"ID",
"to",
"a",
"subword",
"string",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L174-L188
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder._token_to_subwords
|
def _token_to_subwords(self, token):
"""Greedily split token into subwords."""
subwords = []
start = 0
while start < len(token):
subword = None
for end in range(
min(len(token), start + self._max_subword_len), start, -1):
candidate = token[start:end]
if (candidate in self._subword_to_id or
candidate == _UNDERSCORE_REPLACEMENT):
subword = candidate
subwords.append(subword)
start = end
break
# No subword match found. Consume a single (unicode) character.
if subword is None:
subwords.append(token[start])
start += 1
return subwords
|
python
|
def _token_to_subwords(self, token):
"""Greedily split token into subwords."""
subwords = []
start = 0
while start < len(token):
subword = None
for end in range(
min(len(token), start + self._max_subword_len), start, -1):
candidate = token[start:end]
if (candidate in self._subword_to_id or
candidate == _UNDERSCORE_REPLACEMENT):
subword = candidate
subwords.append(subword)
start = end
break
# No subword match found. Consume a single (unicode) character.
if subword is None:
subwords.append(token[start])
start += 1
return subwords
|
[
"def",
"_token_to_subwords",
"(",
"self",
",",
"token",
")",
":",
"subwords",
"=",
"[",
"]",
"start",
"=",
"0",
"while",
"start",
"<",
"len",
"(",
"token",
")",
":",
"subword",
"=",
"None",
"for",
"end",
"in",
"range",
"(",
"min",
"(",
"len",
"(",
"token",
")",
",",
"start",
"+",
"self",
".",
"_max_subword_len",
")",
",",
"start",
",",
"-",
"1",
")",
":",
"candidate",
"=",
"token",
"[",
"start",
":",
"end",
"]",
"if",
"(",
"candidate",
"in",
"self",
".",
"_subword_to_id",
"or",
"candidate",
"==",
"_UNDERSCORE_REPLACEMENT",
")",
":",
"subword",
"=",
"candidate",
"subwords",
".",
"append",
"(",
"subword",
")",
"start",
"=",
"end",
"break",
"# No subword match found. Consume a single (unicode) character.",
"if",
"subword",
"is",
"None",
":",
"subwords",
".",
"append",
"(",
"token",
"[",
"start",
"]",
")",
"start",
"+=",
"1",
"return",
"subwords"
] |
Greedily split token into subwords.
|
[
"Greedily",
"split",
"token",
"into",
"subwords",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L190-L211
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder._init_from_list
|
def _init_from_list(self, subwords):
"""Initializes the encoder from a list of subwords."""
subwords = [tf.compat.as_text(s) for s in subwords if s]
self._subwords = subwords
# Note that internally everything is 0-indexed. Padding is dealt with at the
# end of encode and the beginning of decode.
self._subword_to_id = {s: i for i, s in enumerate(subwords)}
# We remember the maximum length of any subword to avoid having to
# check arbitrarily long strings.
self._max_subword_len = max(
len(_UNDERSCORE_REPLACEMENT), max([len(s) for s in subwords] or [1]))
# Initialize the cache
self._cache_size = 2**20
self._token_to_ids_cache = [(None, None)] * self._cache_size
# Setup tokenizer
# Reserved tokens are all tokens that are mixed alphanum and non-alphanum.
reserved_tokens = set([_UNDERSCORE_REPLACEMENT])
for t in self._subwords:
if text_encoder.is_mixed_alphanum(t):
reserved_tokens.add(t)
self._tokenizer = text_encoder.Tokenizer(
alphanum_only=False, reserved_tokens=reserved_tokens)
|
python
|
def _init_from_list(self, subwords):
"""Initializes the encoder from a list of subwords."""
subwords = [tf.compat.as_text(s) for s in subwords if s]
self._subwords = subwords
# Note that internally everything is 0-indexed. Padding is dealt with at the
# end of encode and the beginning of decode.
self._subword_to_id = {s: i for i, s in enumerate(subwords)}
# We remember the maximum length of any subword to avoid having to
# check arbitrarily long strings.
self._max_subword_len = max(
len(_UNDERSCORE_REPLACEMENT), max([len(s) for s in subwords] or [1]))
# Initialize the cache
self._cache_size = 2**20
self._token_to_ids_cache = [(None, None)] * self._cache_size
# Setup tokenizer
# Reserved tokens are all tokens that are mixed alphanum and non-alphanum.
reserved_tokens = set([_UNDERSCORE_REPLACEMENT])
for t in self._subwords:
if text_encoder.is_mixed_alphanum(t):
reserved_tokens.add(t)
self._tokenizer = text_encoder.Tokenizer(
alphanum_only=False, reserved_tokens=reserved_tokens)
|
[
"def",
"_init_from_list",
"(",
"self",
",",
"subwords",
")",
":",
"subwords",
"=",
"[",
"tf",
".",
"compat",
".",
"as_text",
"(",
"s",
")",
"for",
"s",
"in",
"subwords",
"if",
"s",
"]",
"self",
".",
"_subwords",
"=",
"subwords",
"# Note that internally everything is 0-indexed. Padding is dealt with at the",
"# end of encode and the beginning of decode.",
"self",
".",
"_subword_to_id",
"=",
"{",
"s",
":",
"i",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"subwords",
")",
"}",
"# We remember the maximum length of any subword to avoid having to",
"# check arbitrarily long strings.",
"self",
".",
"_max_subword_len",
"=",
"max",
"(",
"len",
"(",
"_UNDERSCORE_REPLACEMENT",
")",
",",
"max",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"subwords",
"]",
"or",
"[",
"1",
"]",
")",
")",
"# Initialize the cache",
"self",
".",
"_cache_size",
"=",
"2",
"**",
"20",
"self",
".",
"_token_to_ids_cache",
"=",
"[",
"(",
"None",
",",
"None",
")",
"]",
"*",
"self",
".",
"_cache_size",
"# Setup tokenizer",
"# Reserved tokens are all tokens that are mixed alphanum and non-alphanum.",
"reserved_tokens",
"=",
"set",
"(",
"[",
"_UNDERSCORE_REPLACEMENT",
"]",
")",
"for",
"t",
"in",
"self",
".",
"_subwords",
":",
"if",
"text_encoder",
".",
"is_mixed_alphanum",
"(",
"t",
")",
":",
"reserved_tokens",
".",
"add",
"(",
"t",
")",
"self",
".",
"_tokenizer",
"=",
"text_encoder",
".",
"Tokenizer",
"(",
"alphanum_only",
"=",
"False",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")"
] |
Initializes the encoder from a list of subwords.
|
[
"Initializes",
"the",
"encoder",
"from",
"a",
"list",
"of",
"subwords",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L213-L237
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder.save_to_file
|
def save_to_file(self, filename_prefix):
"""Save the vocabulary to a file."""
# Wrap in single quotes to make it easier to see the full subword when
# it has spaces and make it easier to search with ctrl+f.
filename = self._filename(filename_prefix)
lines = ["'%s'" % s for s in self._subwords]
self._write_lines_to_file(filename, lines)
|
python
|
def save_to_file(self, filename_prefix):
"""Save the vocabulary to a file."""
# Wrap in single quotes to make it easier to see the full subword when
# it has spaces and make it easier to search with ctrl+f.
filename = self._filename(filename_prefix)
lines = ["'%s'" % s for s in self._subwords]
self._write_lines_to_file(filename, lines)
|
[
"def",
"save_to_file",
"(",
"self",
",",
"filename_prefix",
")",
":",
"# Wrap in single quotes to make it easier to see the full subword when",
"# it has spaces and make it easier to search with ctrl+f.",
"filename",
"=",
"self",
".",
"_filename",
"(",
"filename_prefix",
")",
"lines",
"=",
"[",
"\"'%s'\"",
"%",
"s",
"for",
"s",
"in",
"self",
".",
"_subwords",
"]",
"self",
".",
"_write_lines_to_file",
"(",
"filename",
",",
"lines",
")"
] |
Save the vocabulary to a file.
|
[
"Save",
"the",
"vocabulary",
"to",
"a",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L243-L249
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder.load_from_file
|
def load_from_file(cls, filename_prefix):
"""Extracts list of subwords from file."""
filename = cls._filename(filename_prefix)
lines, _ = cls._read_lines_from_file(filename)
# Strip wrapping single quotes
vocab_list = [line[1:-1] for line in lines]
return cls(vocab_list=vocab_list)
|
python
|
def load_from_file(cls, filename_prefix):
"""Extracts list of subwords from file."""
filename = cls._filename(filename_prefix)
lines, _ = cls._read_lines_from_file(filename)
# Strip wrapping single quotes
vocab_list = [line[1:-1] for line in lines]
return cls(vocab_list=vocab_list)
|
[
"def",
"load_from_file",
"(",
"cls",
",",
"filename_prefix",
")",
":",
"filename",
"=",
"cls",
".",
"_filename",
"(",
"filename_prefix",
")",
"lines",
",",
"_",
"=",
"cls",
".",
"_read_lines_from_file",
"(",
"filename",
")",
"# Strip wrapping single quotes",
"vocab_list",
"=",
"[",
"line",
"[",
"1",
":",
"-",
"1",
"]",
"for",
"line",
"in",
"lines",
"]",
"return",
"cls",
"(",
"vocab_list",
"=",
"vocab_list",
")"
] |
Extracts list of subwords from file.
|
[
"Extracts",
"list",
"of",
"subwords",
"from",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L252-L258
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text/subword_text_encoder.py
|
SubwordTextEncoder.build_from_corpus
|
def build_from_corpus(cls,
corpus_generator,
target_vocab_size,
max_subword_length=20,
max_corpus_chars=None,
reserved_tokens=None):
"""Builds a `SubwordTextEncoder` based on the `corpus_generator`.
Args:
corpus_generator: generator yielding `str`, from which subwords will be
constructed.
target_vocab_size: `int`, approximate size of the vocabulary to create.
max_subword_length: `int`, maximum length of a subword. Note that memory
and compute scale quadratically in the length of the longest token.
max_corpus_chars: `int`, the maximum number of characters to consume from
`corpus_generator` for the purposes of building the subword vocabulary.
reserved_tokens: `list<str>`, list of tokens that will always be treated
as whole tokens and not split up. Note that these must contain a mix of
alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end
in an underscore.
Returns:
`SubwordTextEncoder`.
"""
reserved_tokens = reserved_tokens or []
_validate_build_arguments(
max_subword_length=max_subword_length,
reserved_tokens=reserved_tokens,
target_vocab_size=target_vocab_size)
token_counts = _token_counts_from_generator(
generator=corpus_generator,
max_chars=max_corpus_chars,
reserved_tokens=reserved_tokens)
# Binary search on the minimum token count to build a vocabulary with
# approximately the right size
def _binary_search(min_token_count, max_token_count):
"""Binary search min_token_count to build SubwordTextEncoder vocab."""
candidate_min = (min_token_count + max_token_count) // 2
logging.info("SubwordTextEncoder build: trying min_token_count %d",
candidate_min)
encoder = cls._build_from_token_counts(
token_counts=token_counts,
min_token_count=candidate_min,
reserved_tokens=reserved_tokens,
num_iterations=4,
max_subword_length=max_subword_length)
vocab_size = encoder.vocab_size
# Being within 1% of the target vocab size is ok
target_achieved = (
abs(vocab_size - target_vocab_size) * 100 < target_vocab_size)
if (target_achieved or min_token_count >= max_token_count or
candidate_min <= 1):
# Search complete
return encoder
# Recurse
if vocab_size > target_vocab_size:
next_encoder = _binary_search(candidate_min + 1, max_token_count)
else:
next_encoder = _binary_search(min_token_count, candidate_min - 1)
# Return the one that's closest to the target_vocab_size
if (abs(vocab_size - target_vocab_size) <
abs(next_encoder.vocab_size - target_vocab_size)):
return encoder
else:
return next_encoder
# Get min and max token counts.
min_token_count = max(min(token_counts.values()), 1)
max_token_count = max(token_counts.values())
# Another option could be to do a binary search over *ranks* of the tokens.
return _binary_search(min_token_count, max_token_count)
|
python
|
def build_from_corpus(cls,
corpus_generator,
target_vocab_size,
max_subword_length=20,
max_corpus_chars=None,
reserved_tokens=None):
"""Builds a `SubwordTextEncoder` based on the `corpus_generator`.
Args:
corpus_generator: generator yielding `str`, from which subwords will be
constructed.
target_vocab_size: `int`, approximate size of the vocabulary to create.
max_subword_length: `int`, maximum length of a subword. Note that memory
and compute scale quadratically in the length of the longest token.
max_corpus_chars: `int`, the maximum number of characters to consume from
`corpus_generator` for the purposes of building the subword vocabulary.
reserved_tokens: `list<str>`, list of tokens that will always be treated
as whole tokens and not split up. Note that these must contain a mix of
alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end
in an underscore.
Returns:
`SubwordTextEncoder`.
"""
reserved_tokens = reserved_tokens or []
_validate_build_arguments(
max_subword_length=max_subword_length,
reserved_tokens=reserved_tokens,
target_vocab_size=target_vocab_size)
token_counts = _token_counts_from_generator(
generator=corpus_generator,
max_chars=max_corpus_chars,
reserved_tokens=reserved_tokens)
# Binary search on the minimum token count to build a vocabulary with
# approximately the right size
def _binary_search(min_token_count, max_token_count):
"""Binary search min_token_count to build SubwordTextEncoder vocab."""
candidate_min = (min_token_count + max_token_count) // 2
logging.info("SubwordTextEncoder build: trying min_token_count %d",
candidate_min)
encoder = cls._build_from_token_counts(
token_counts=token_counts,
min_token_count=candidate_min,
reserved_tokens=reserved_tokens,
num_iterations=4,
max_subword_length=max_subword_length)
vocab_size = encoder.vocab_size
# Being within 1% of the target vocab size is ok
target_achieved = (
abs(vocab_size - target_vocab_size) * 100 < target_vocab_size)
if (target_achieved or min_token_count >= max_token_count or
candidate_min <= 1):
# Search complete
return encoder
# Recurse
if vocab_size > target_vocab_size:
next_encoder = _binary_search(candidate_min + 1, max_token_count)
else:
next_encoder = _binary_search(min_token_count, candidate_min - 1)
# Return the one that's closest to the target_vocab_size
if (abs(vocab_size - target_vocab_size) <
abs(next_encoder.vocab_size - target_vocab_size)):
return encoder
else:
return next_encoder
# Get min and max token counts.
min_token_count = max(min(token_counts.values()), 1)
max_token_count = max(token_counts.values())
# Another option could be to do a binary search over *ranks* of the tokens.
return _binary_search(min_token_count, max_token_count)
|
[
"def",
"build_from_corpus",
"(",
"cls",
",",
"corpus_generator",
",",
"target_vocab_size",
",",
"max_subword_length",
"=",
"20",
",",
"max_corpus_chars",
"=",
"None",
",",
"reserved_tokens",
"=",
"None",
")",
":",
"reserved_tokens",
"=",
"reserved_tokens",
"or",
"[",
"]",
"_validate_build_arguments",
"(",
"max_subword_length",
"=",
"max_subword_length",
",",
"reserved_tokens",
"=",
"reserved_tokens",
",",
"target_vocab_size",
"=",
"target_vocab_size",
")",
"token_counts",
"=",
"_token_counts_from_generator",
"(",
"generator",
"=",
"corpus_generator",
",",
"max_chars",
"=",
"max_corpus_chars",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"# Binary search on the minimum token count to build a vocabulary with",
"# approximately the right size",
"def",
"_binary_search",
"(",
"min_token_count",
",",
"max_token_count",
")",
":",
"\"\"\"Binary search min_token_count to build SubwordTextEncoder vocab.\"\"\"",
"candidate_min",
"=",
"(",
"min_token_count",
"+",
"max_token_count",
")",
"//",
"2",
"logging",
".",
"info",
"(",
"\"SubwordTextEncoder build: trying min_token_count %d\"",
",",
"candidate_min",
")",
"encoder",
"=",
"cls",
".",
"_build_from_token_counts",
"(",
"token_counts",
"=",
"token_counts",
",",
"min_token_count",
"=",
"candidate_min",
",",
"reserved_tokens",
"=",
"reserved_tokens",
",",
"num_iterations",
"=",
"4",
",",
"max_subword_length",
"=",
"max_subword_length",
")",
"vocab_size",
"=",
"encoder",
".",
"vocab_size",
"# Being within 1% of the target vocab size is ok",
"target_achieved",
"=",
"(",
"abs",
"(",
"vocab_size",
"-",
"target_vocab_size",
")",
"*",
"100",
"<",
"target_vocab_size",
")",
"if",
"(",
"target_achieved",
"or",
"min_token_count",
">=",
"max_token_count",
"or",
"candidate_min",
"<=",
"1",
")",
":",
"# Search complete",
"return",
"encoder",
"# Recurse",
"if",
"vocab_size",
">",
"target_vocab_size",
":",
"next_encoder",
"=",
"_binary_search",
"(",
"candidate_min",
"+",
"1",
",",
"max_token_count",
")",
"else",
":",
"next_encoder",
"=",
"_binary_search",
"(",
"min_token_count",
",",
"candidate_min",
"-",
"1",
")",
"# Return the one that's closest to the target_vocab_size",
"if",
"(",
"abs",
"(",
"vocab_size",
"-",
"target_vocab_size",
")",
"<",
"abs",
"(",
"next_encoder",
".",
"vocab_size",
"-",
"target_vocab_size",
")",
")",
":",
"return",
"encoder",
"else",
":",
"return",
"next_encoder",
"# Get min and max token counts.",
"min_token_count",
"=",
"max",
"(",
"min",
"(",
"token_counts",
".",
"values",
"(",
")",
")",
",",
"1",
")",
"max_token_count",
"=",
"max",
"(",
"token_counts",
".",
"values",
"(",
")",
")",
"# Another option could be to do a binary search over *ranks* of the tokens.",
"return",
"_binary_search",
"(",
"min_token_count",
",",
"max_token_count",
")"
] |
Builds a `SubwordTextEncoder` based on the `corpus_generator`.
Args:
corpus_generator: generator yielding `str`, from which subwords will be
constructed.
target_vocab_size: `int`, approximate size of the vocabulary to create.
max_subword_length: `int`, maximum length of a subword. Note that memory
and compute scale quadratically in the length of the longest token.
max_corpus_chars: `int`, the maximum number of characters to consume from
`corpus_generator` for the purposes of building the subword vocabulary.
reserved_tokens: `list<str>`, list of tokens that will always be treated
as whole tokens and not split up. Note that these must contain a mix of
alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end
in an underscore.
Returns:
`SubwordTextEncoder`.
|
[
"Builds",
"a",
"SubwordTextEncoder",
"based",
"on",
"the",
"corpus_generator",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L261-L336
|
train
|
tensorflow/datasets
|
tensorflow_datasets/structured/higgs.py
|
Higgs._generate_examples
|
def _generate_examples(self, file_path):
"""Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row.
"""
fieldnames = [
'class_label', 'lepton_pT', 'lepton_eta', 'lepton_phi',
'missing_energy_magnitude', 'missing_energy_phi', 'jet_1_pt',
'jet_1_eta', 'jet_1_phi', 'jet_1_b-tag', 'jet_2_pt', 'jet_2_eta',
'jet_2_phi', 'jet_2_b-tag', 'jet_3_pt', 'jet_3_eta', 'jet_3_phi',
'jet_3_b-tag', 'jet_4_pt', 'jet_4_eta', 'jet_4_phi', 'jet_4_b-tag',
'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'
]
with tf.io.gfile.GFile(file_path) as csvfile:
reader = csv.DictReader(csvfile, fieldnames=fieldnames)
for row in reader:
yield row
|
python
|
def _generate_examples(self, file_path):
"""Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row.
"""
fieldnames = [
'class_label', 'lepton_pT', 'lepton_eta', 'lepton_phi',
'missing_energy_magnitude', 'missing_energy_phi', 'jet_1_pt',
'jet_1_eta', 'jet_1_phi', 'jet_1_b-tag', 'jet_2_pt', 'jet_2_eta',
'jet_2_phi', 'jet_2_b-tag', 'jet_3_pt', 'jet_3_eta', 'jet_3_phi',
'jet_3_b-tag', 'jet_4_pt', 'jet_4_eta', 'jet_4_phi', 'jet_4_b-tag',
'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'
]
with tf.io.gfile.GFile(file_path) as csvfile:
reader = csv.DictReader(csvfile, fieldnames=fieldnames)
for row in reader:
yield row
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"file_path",
")",
":",
"fieldnames",
"=",
"[",
"'class_label'",
",",
"'lepton_pT'",
",",
"'lepton_eta'",
",",
"'lepton_phi'",
",",
"'missing_energy_magnitude'",
",",
"'missing_energy_phi'",
",",
"'jet_1_pt'",
",",
"'jet_1_eta'",
",",
"'jet_1_phi'",
",",
"'jet_1_b-tag'",
",",
"'jet_2_pt'",
",",
"'jet_2_eta'",
",",
"'jet_2_phi'",
",",
"'jet_2_b-tag'",
",",
"'jet_3_pt'",
",",
"'jet_3_eta'",
",",
"'jet_3_phi'",
",",
"'jet_3_b-tag'",
",",
"'jet_4_pt'",
",",
"'jet_4_eta'",
",",
"'jet_4_phi'",
",",
"'jet_4_b-tag'",
",",
"'m_jj'",
",",
"'m_jjj'",
",",
"'m_lv'",
",",
"'m_jlv'",
",",
"'m_bb'",
",",
"'m_wbb'",
",",
"'m_wwbb'",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"file_path",
")",
"as",
"csvfile",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"csvfile",
",",
"fieldnames",
"=",
"fieldnames",
")",
"for",
"row",
"in",
"reader",
":",
"yield",
"row"
] |
Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row.
|
[
"Generate",
"features",
"given",
"the",
"directory",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/structured/higgs.py#L122-L144
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/cats_vs_dogs.py
|
CatsVsDogs._generate_examples
|
def _generate_examples(self, archive):
"""Generate Cats vs Dogs images and labels given a directory path."""
num_skipped = 0
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # README file, ...
continue
label = res.group(1).lower()
if tf.compat.as_bytes("JFIF") not in fobj.peek(10):
num_skipped += 1
continue
yield {
"image": fobj,
"image/filename": fname,
"label": label,
}
if num_skipped != _NUM_CORRUPT_IMAGES:
raise ValueError("Expected %d corrupt images, but found %d" % (
_NUM_CORRUPT_IMAGES, num_skipped))
logging.warning("%d images were corrupted and were skipped", num_skipped)
|
python
|
def _generate_examples(self, archive):
"""Generate Cats vs Dogs images and labels given a directory path."""
num_skipped = 0
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # README file, ...
continue
label = res.group(1).lower()
if tf.compat.as_bytes("JFIF") not in fobj.peek(10):
num_skipped += 1
continue
yield {
"image": fobj,
"image/filename": fname,
"label": label,
}
if num_skipped != _NUM_CORRUPT_IMAGES:
raise ValueError("Expected %d corrupt images, but found %d" % (
_NUM_CORRUPT_IMAGES, num_skipped))
logging.warning("%d images were corrupted and were skipped", num_skipped)
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
")",
":",
"num_skipped",
"=",
"0",
"for",
"fname",
",",
"fobj",
"in",
"archive",
":",
"res",
"=",
"_NAME_RE",
".",
"match",
"(",
"fname",
")",
"if",
"not",
"res",
":",
"# README file, ...",
"continue",
"label",
"=",
"res",
".",
"group",
"(",
"1",
")",
".",
"lower",
"(",
")",
"if",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"\"JFIF\"",
")",
"not",
"in",
"fobj",
".",
"peek",
"(",
"10",
")",
":",
"num_skipped",
"+=",
"1",
"continue",
"yield",
"{",
"\"image\"",
":",
"fobj",
",",
"\"image/filename\"",
":",
"fname",
",",
"\"label\"",
":",
"label",
",",
"}",
"if",
"num_skipped",
"!=",
"_NUM_CORRUPT_IMAGES",
":",
"raise",
"ValueError",
"(",
"\"Expected %d corrupt images, but found %d\"",
"%",
"(",
"_NUM_CORRUPT_IMAGES",
",",
"num_skipped",
")",
")",
"logging",
".",
"warning",
"(",
"\"%d images were corrupted and were skipped\"",
",",
"num_skipped",
")"
] |
Generate Cats vs Dogs images and labels given a directory path.
|
[
"Generate",
"Cats",
"vs",
"Dogs",
"images",
"and",
"labels",
"given",
"a",
"directory",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cats_vs_dogs.py#L87-L107
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/smallnorb.py
|
_load_chunk
|
def _load_chunk(dat_path, cat_path, info_path):
"""Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays.
"""
dat_array = read_binary_matrix(dat_path)
# Even if the image is gray scale, we need to add an extra channel dimension
# to be compatible with tfds.features.Image.
dat_array = np.expand_dims(dat_array, -1)
cat_array = read_binary_matrix(cat_path)
info_array = read_binary_matrix(info_path)
info_array = np.copy(info_array) # Make read-only buffer array writable.
# Azimuth values are 0, 2, 4, .., 34. We divide by 2 to get proper labels.
info_array[:, 2] = info_array[:, 2] / 2
return dat_array, cat_array, info_array
|
python
|
def _load_chunk(dat_path, cat_path, info_path):
"""Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays.
"""
dat_array = read_binary_matrix(dat_path)
# Even if the image is gray scale, we need to add an extra channel dimension
# to be compatible with tfds.features.Image.
dat_array = np.expand_dims(dat_array, -1)
cat_array = read_binary_matrix(cat_path)
info_array = read_binary_matrix(info_path)
info_array = np.copy(info_array) # Make read-only buffer array writable.
# Azimuth values are 0, 2, 4, .., 34. We divide by 2 to get proper labels.
info_array[:, 2] = info_array[:, 2] / 2
return dat_array, cat_array, info_array
|
[
"def",
"_load_chunk",
"(",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
":",
"dat_array",
"=",
"read_binary_matrix",
"(",
"dat_path",
")",
"# Even if the image is gray scale, we need to add an extra channel dimension",
"# to be compatible with tfds.features.Image.",
"dat_array",
"=",
"np",
".",
"expand_dims",
"(",
"dat_array",
",",
"-",
"1",
")",
"cat_array",
"=",
"read_binary_matrix",
"(",
"cat_path",
")",
"info_array",
"=",
"read_binary_matrix",
"(",
"info_path",
")",
"info_array",
"=",
"np",
".",
"copy",
"(",
"info_array",
")",
"# Make read-only buffer array writable.",
"# Azimuth values are 0, 2, 4, .., 34. We divide by 2 to get proper labels.",
"info_array",
"[",
":",
",",
"2",
"]",
"=",
"info_array",
"[",
":",
",",
"2",
"]",
"/",
"2",
"return",
"dat_array",
",",
"cat_array",
",",
"info_array"
] |
Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays.
|
[
"Loads",
"a",
"data",
"chunk",
"as",
"specified",
"by",
"the",
"paths",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L141-L164
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/smallnorb.py
|
read_binary_matrix
|
def read_binary_matrix(filename):
"""Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file.
"""
with tf.io.gfile.GFile(filename, "rb") as f:
s = f.read()
# Data is stored in little-endian byte order.
int32_dtype = np.dtype("int32").newbyteorder("<")
# The first 4 bytes contain a magic code that specifies the data type.
magic = int(np.frombuffer(s, dtype=int32_dtype, count=1))
if magic == 507333717:
data_dtype = np.dtype("uint8") # uint8 does not have a byte order.
elif magic == 507333716:
data_dtype = np.dtype("int32").newbyteorder("<")
else:
raise ValueError("Invalid magic value for data type!")
# The second 4 bytes contain an int32 with the number of dimensions of the
# stored array.
ndim = int(np.frombuffer(s, dtype=int32_dtype, count=1, offset=4))
# The next ndim x 4 bytes contain the shape of the array in int32.
dims = np.frombuffer(s, dtype=int32_dtype, count=ndim, offset=8)
# If the array has less than three dimensions, three int32 are still used to
# save the shape info (remaining int32 are simply set to 1). The shape info
# hence uses max(3, ndim) bytes.
bytes_used_for_shape_info = max(3, ndim) * 4
# The remaining bytes are the array.
data = np.frombuffer(
s, dtype=data_dtype, offset=8 + bytes_used_for_shape_info)
return data.reshape(tuple(dims))
|
python
|
def read_binary_matrix(filename):
"""Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file.
"""
with tf.io.gfile.GFile(filename, "rb") as f:
s = f.read()
# Data is stored in little-endian byte order.
int32_dtype = np.dtype("int32").newbyteorder("<")
# The first 4 bytes contain a magic code that specifies the data type.
magic = int(np.frombuffer(s, dtype=int32_dtype, count=1))
if magic == 507333717:
data_dtype = np.dtype("uint8") # uint8 does not have a byte order.
elif magic == 507333716:
data_dtype = np.dtype("int32").newbyteorder("<")
else:
raise ValueError("Invalid magic value for data type!")
# The second 4 bytes contain an int32 with the number of dimensions of the
# stored array.
ndim = int(np.frombuffer(s, dtype=int32_dtype, count=1, offset=4))
# The next ndim x 4 bytes contain the shape of the array in int32.
dims = np.frombuffer(s, dtype=int32_dtype, count=ndim, offset=8)
# If the array has less than three dimensions, three int32 are still used to
# save the shape info (remaining int32 are simply set to 1). The shape info
# hence uses max(3, ndim) bytes.
bytes_used_for_shape_info = max(3, ndim) * 4
# The remaining bytes are the array.
data = np.frombuffer(
s, dtype=data_dtype, offset=8 + bytes_used_for_shape_info)
return data.reshape(tuple(dims))
|
[
"def",
"read_binary_matrix",
"(",
"filename",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"s",
"=",
"f",
".",
"read",
"(",
")",
"# Data is stored in little-endian byte order.",
"int32_dtype",
"=",
"np",
".",
"dtype",
"(",
"\"int32\"",
")",
".",
"newbyteorder",
"(",
"\"<\"",
")",
"# The first 4 bytes contain a magic code that specifies the data type.",
"magic",
"=",
"int",
"(",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"int32_dtype",
",",
"count",
"=",
"1",
")",
")",
"if",
"magic",
"==",
"507333717",
":",
"data_dtype",
"=",
"np",
".",
"dtype",
"(",
"\"uint8\"",
")",
"# uint8 does not have a byte order.",
"elif",
"magic",
"==",
"507333716",
":",
"data_dtype",
"=",
"np",
".",
"dtype",
"(",
"\"int32\"",
")",
".",
"newbyteorder",
"(",
"\"<\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid magic value for data type!\"",
")",
"# The second 4 bytes contain an int32 with the number of dimensions of the",
"# stored array.",
"ndim",
"=",
"int",
"(",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"int32_dtype",
",",
"count",
"=",
"1",
",",
"offset",
"=",
"4",
")",
")",
"# The next ndim x 4 bytes contain the shape of the array in int32.",
"dims",
"=",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"int32_dtype",
",",
"count",
"=",
"ndim",
",",
"offset",
"=",
"8",
")",
"# If the array has less than three dimensions, three int32 are still used to",
"# save the shape info (remaining int32 are simply set to 1). The shape info",
"# hence uses max(3, ndim) bytes.",
"bytes_used_for_shape_info",
"=",
"max",
"(",
"3",
",",
"ndim",
")",
"*",
"4",
"# The remaining bytes are the array.",
"data",
"=",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"data_dtype",
",",
"offset",
"=",
"8",
"+",
"bytes_used_for_shape_info",
")",
"return",
"data",
".",
"reshape",
"(",
"tuple",
"(",
"dims",
")",
")"
] |
Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file.
|
[
"Reads",
"and",
"returns",
"binary",
"formatted",
"matrix",
"stored",
"in",
"filename",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L167-L209
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/smallnorb.py
|
Smallnorb._split_generators
|
def _split_generators(self, dl_manager):
"""Returns splits."""
filenames = {
"training_dat": _TRAINING_URL_TEMPLATE.format(type="dat"),
"training_cat": _TRAINING_URL_TEMPLATE.format(type="cat"),
"training_info": _TRAINING_URL_TEMPLATE.format(type="info"),
"testing_dat": _TESTING_URL_TEMPLATE.format(type="dat"),
"testing_cat": _TESTING_URL_TEMPLATE.format(type="cat"),
"testing_info": _TESTING_URL_TEMPLATE.format(type="info"),
}
files = dl_manager.download_and_extract(filenames)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=1,
gen_kwargs=dict(
dat_path=files["training_dat"],
cat_path=files["training_cat"],
info_path=files["training_info"])),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs=dict(
dat_path=files["testing_dat"],
cat_path=files["testing_cat"],
info_path=files["testing_info"])),
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns splits."""
filenames = {
"training_dat": _TRAINING_URL_TEMPLATE.format(type="dat"),
"training_cat": _TRAINING_URL_TEMPLATE.format(type="cat"),
"training_info": _TRAINING_URL_TEMPLATE.format(type="info"),
"testing_dat": _TESTING_URL_TEMPLATE.format(type="dat"),
"testing_cat": _TESTING_URL_TEMPLATE.format(type="cat"),
"testing_info": _TESTING_URL_TEMPLATE.format(type="info"),
}
files = dl_manager.download_and_extract(filenames)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=1,
gen_kwargs=dict(
dat_path=files["training_dat"],
cat_path=files["training_cat"],
info_path=files["training_info"])),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs=dict(
dat_path=files["testing_dat"],
cat_path=files["testing_cat"],
info_path=files["testing_info"])),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"filenames",
"=",
"{",
"\"training_dat\"",
":",
"_TRAINING_URL_TEMPLATE",
".",
"format",
"(",
"type",
"=",
"\"dat\"",
")",
",",
"\"training_cat\"",
":",
"_TRAINING_URL_TEMPLATE",
".",
"format",
"(",
"type",
"=",
"\"cat\"",
")",
",",
"\"training_info\"",
":",
"_TRAINING_URL_TEMPLATE",
".",
"format",
"(",
"type",
"=",
"\"info\"",
")",
",",
"\"testing_dat\"",
":",
"_TESTING_URL_TEMPLATE",
".",
"format",
"(",
"type",
"=",
"\"dat\"",
")",
",",
"\"testing_cat\"",
":",
"_TESTING_URL_TEMPLATE",
".",
"format",
"(",
"type",
"=",
"\"cat\"",
")",
",",
"\"testing_info\"",
":",
"_TESTING_URL_TEMPLATE",
".",
"format",
"(",
"type",
"=",
"\"info\"",
")",
",",
"}",
"files",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"filenames",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"dict",
"(",
"dat_path",
"=",
"files",
"[",
"\"training_dat\"",
"]",
",",
"cat_path",
"=",
"files",
"[",
"\"training_cat\"",
"]",
",",
"info_path",
"=",
"files",
"[",
"\"training_info\"",
"]",
")",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TEST",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"dict",
"(",
"dat_path",
"=",
"files",
"[",
"\"testing_dat\"",
"]",
",",
"cat_path",
"=",
"files",
"[",
"\"testing_cat\"",
"]",
",",
"info_path",
"=",
"files",
"[",
"\"testing_info\"",
"]",
")",
")",
",",
"]"
] |
Returns splits.
|
[
"Returns",
"splits",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L86-L114
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/smallnorb.py
|
Smallnorb._generate_examples
|
def _generate_examples(self, dat_path, cat_path, info_path):
"""Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the different labels.
"""
dat_arr, cat_arr, info_arr = _load_chunk(dat_path, cat_path, info_path)
for image, category, info_vec in moves.zip(dat_arr, cat_arr, info_arr):
yield {
"image": image[0],
"image2": image[1],
"label_category": category,
"instance": info_vec[0],
"label_elevation": info_vec[1],
"label_azimuth": info_vec[2],
"label_lighting": info_vec[3],
}
|
python
|
def _generate_examples(self, dat_path, cat_path, info_path):
"""Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the different labels.
"""
dat_arr, cat_arr, info_arr = _load_chunk(dat_path, cat_path, info_path)
for image, category, info_vec in moves.zip(dat_arr, cat_arr, info_arr):
yield {
"image": image[0],
"image2": image[1],
"label_category": category,
"instance": info_vec[0],
"label_elevation": info_vec[1],
"label_azimuth": info_vec[2],
"label_lighting": info_vec[3],
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
":",
"dat_arr",
",",
"cat_arr",
",",
"info_arr",
"=",
"_load_chunk",
"(",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
"for",
"image",
",",
"category",
",",
"info_vec",
"in",
"moves",
".",
"zip",
"(",
"dat_arr",
",",
"cat_arr",
",",
"info_arr",
")",
":",
"yield",
"{",
"\"image\"",
":",
"image",
"[",
"0",
"]",
",",
"\"image2\"",
":",
"image",
"[",
"1",
"]",
",",
"\"label_category\"",
":",
"category",
",",
"\"instance\"",
":",
"info_vec",
"[",
"0",
"]",
",",
"\"label_elevation\"",
":",
"info_vec",
"[",
"1",
"]",
",",
"\"label_azimuth\"",
":",
"info_vec",
"[",
"2",
"]",
",",
"\"label_lighting\"",
":",
"info_vec",
"[",
"3",
"]",
",",
"}"
] |
Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the different labels.
|
[
"Generate",
"examples",
"for",
"the",
"Smallnorb",
"dataset",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L116-L138
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.