repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1
value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/datasets | tensorflow_datasets/core/file_format_adapter.py | _incomplete_files | def _incomplete_files(filenames):
"""Create temporary files for filenames and rename on exit."""
tmp_files = [get_incomplete_path(f) for f in filenames]
try:
yield tmp_files
for tmp, output in zip(tmp_files, filenames):
tf.io.gfile.rename(tmp, output)
finally:
for tmp in tmp_files:
if tf... | python | def _incomplete_files(filenames):
"""Create temporary files for filenames and rename on exit."""
tmp_files = [get_incomplete_path(f) for f in filenames]
try:
yield tmp_files
for tmp, output in zip(tmp_files, filenames):
tf.io.gfile.rename(tmp, output)
finally:
for tmp in tmp_files:
if tf... | [
"def",
"_incomplete_files",
"(",
"filenames",
")",
":",
"tmp_files",
"=",
"[",
"get_incomplete_path",
"(",
"f",
")",
"for",
"f",
"in",
"filenames",
"]",
"try",
":",
"yield",
"tmp_files",
"for",
"tmp",
",",
"output",
"in",
"zip",
"(",
"tmp_files",
",",
"f... | Create temporary files for filenames and rename on exit. | [
"Create",
"temporary",
"files",
"for",
"filenames",
"and",
"rename",
"on",
"exit",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L218-L228 | train |
tensorflow/datasets | tensorflow_datasets/core/file_format_adapter.py | incomplete_dir | def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
tmp_dir = get_incomplete_path(dirname)
tf.io.gfile.makedirs(tmp_dir)
try:
yield tmp_dir
tf.io.gfile.rename(tmp_dir, dirname)
finally:
if tf.io.gfile.exists(tmp_dir):
tf.io.gfile.rmtree(tmp_dir) | python | def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
tmp_dir = get_incomplete_path(dirname)
tf.io.gfile.makedirs(tmp_dir)
try:
yield tmp_dir
tf.io.gfile.rename(tmp_dir, dirname)
finally:
if tf.io.gfile.exists(tmp_dir):
tf.io.gfile.rmtree(tmp_dir) | [
"def",
"incomplete_dir",
"(",
"dirname",
")",
":",
"tmp_dir",
"=",
"get_incomplete_path",
"(",
"dirname",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"tmp_dir",
")",
"try",
":",
"yield",
"tmp_dir",
"tf",
".",
"io",
".",
"gfile",
".",
"ren... | Create temporary dir for dirname and rename on exit. | [
"Create",
"temporary",
"dir",
"for",
"dirname",
"and",
"rename",
"on",
"exit",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L232-L241 | train |
tensorflow/datasets | tensorflow_datasets/core/file_format_adapter.py | _shuffle_tfrecord | def _shuffle_tfrecord(path, random_gen):
"""Shuffle a single record file in memory."""
# Read all records
record_iter = tf.compat.v1.io.tf_record_iterator(path)
all_records = [
r for r in utils.tqdm(
record_iter, desc="Reading...", unit=" examples", leave=False)
]
# Shuffling in memory
ran... | python | def _shuffle_tfrecord(path, random_gen):
"""Shuffle a single record file in memory."""
# Read all records
record_iter = tf.compat.v1.io.tf_record_iterator(path)
all_records = [
r for r in utils.tqdm(
record_iter, desc="Reading...", unit=" examples", leave=False)
]
# Shuffling in memory
ran... | [
"def",
"_shuffle_tfrecord",
"(",
"path",
",",
"random_gen",
")",
":",
"# Read all records",
"record_iter",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"io",
".",
"tf_record_iterator",
"(",
"path",
")",
"all_records",
"=",
"[",
"r",
"for",
"r",
"in",
"utils",... | Shuffle a single record file in memory. | [
"Shuffle",
"a",
"single",
"record",
"file",
"in",
"memory",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L244-L258 | train |
tensorflow/datasets | tensorflow_datasets/core/file_format_adapter.py | _write_tfrecords_from_generator | def _write_tfrecords_from_generator(generator, output_files, shuffle=True):
"""Writes generated str records to output_files in round-robin order."""
if do_files_exist(output_files):
raise ValueError(
"Pre-processed files already exists: {}.".format(output_files))
with _incomplete_files(output_files) ... | python | def _write_tfrecords_from_generator(generator, output_files, shuffle=True):
"""Writes generated str records to output_files in round-robin order."""
if do_files_exist(output_files):
raise ValueError(
"Pre-processed files already exists: {}.".format(output_files))
with _incomplete_files(output_files) ... | [
"def",
"_write_tfrecords_from_generator",
"(",
"generator",
",",
"output_files",
",",
"shuffle",
"=",
"True",
")",
":",
"if",
"do_files_exist",
"(",
"output_files",
")",
":",
"raise",
"ValueError",
"(",
"\"Pre-processed files already exists: {}.\"",
".",
"format",
"("... | Writes generated str records to output_files in round-robin order. | [
"Writes",
"generated",
"str",
"records",
"to",
"output_files",
"in",
"round",
"-",
"robin",
"order",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L261-L281 | train |
tensorflow/datasets | tensorflow_datasets/core/file_format_adapter.py | _round_robin_write | def _round_robin_write(writers, generator):
"""Write records from generator round-robin across writers."""
for i, example in enumerate(utils.tqdm(
generator, unit=" examples", leave=False)):
writers[i % len(writers)].write(example) | python | def _round_robin_write(writers, generator):
"""Write records from generator round-robin across writers."""
for i, example in enumerate(utils.tqdm(
generator, unit=" examples", leave=False)):
writers[i % len(writers)].write(example) | [
"def",
"_round_robin_write",
"(",
"writers",
",",
"generator",
")",
":",
"for",
"i",
",",
"example",
"in",
"enumerate",
"(",
"utils",
".",
"tqdm",
"(",
"generator",
",",
"unit",
"=",
"\" examples\"",
",",
"leave",
"=",
"False",
")",
")",
":",
"writers",
... | Write records from generator round-robin across writers. | [
"Write",
"records",
"from",
"generator",
"round",
"-",
"robin",
"across",
"writers",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L284-L288 | train |
tensorflow/datasets | tensorflow_datasets/core/file_format_adapter.py | _item_to_tf_feature | def _item_to_tf_feature(item, key_name):
"""Single item to a tf.train.Feature."""
v = item
if isinstance(v, (list, tuple)) and not v:
raise ValueError(
"Feature {} received an empty list value, so is unable to infer the "
"feature type to record. To support empty value, the corresponding "
... | python | def _item_to_tf_feature(item, key_name):
"""Single item to a tf.train.Feature."""
v = item
if isinstance(v, (list, tuple)) and not v:
raise ValueError(
"Feature {} received an empty list value, so is unable to infer the "
"feature type to record. To support empty value, the corresponding "
... | [
"def",
"_item_to_tf_feature",
"(",
"item",
",",
"key_name",
")",
":",
"v",
"=",
"item",
"if",
"isinstance",
"(",
"v",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"not",
"v",
":",
"raise",
"ValueError",
"(",
"\"Feature {} received an empty list value, so... | Single item to a tf.train.Feature. | [
"Single",
"item",
"to",
"a",
"tf",
".",
"train",
".",
"Feature",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L307-L344 | train |
tensorflow/datasets | tensorflow_datasets/core/file_format_adapter.py | _dict_to_tf_features | def _dict_to_tf_features(example_dict):
"""Builds tf.train.Features from (string -> int/float/str list) dictionary."""
features = {k: _item_to_tf_feature(v, k) for k, v
in six.iteritems(example_dict)}
return tf.train.Features(feature=features) | python | def _dict_to_tf_features(example_dict):
"""Builds tf.train.Features from (string -> int/float/str list) dictionary."""
features = {k: _item_to_tf_feature(v, k) for k, v
in six.iteritems(example_dict)}
return tf.train.Features(feature=features) | [
"def",
"_dict_to_tf_features",
"(",
"example_dict",
")",
":",
"features",
"=",
"{",
"k",
":",
"_item_to_tf_feature",
"(",
"v",
",",
"k",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"example_dict",
")",
"}",
"return",
"tf",
".",
"trai... | Builds tf.train.Features from (string -> int/float/str list) dictionary. | [
"Builds",
"tf",
".",
"train",
".",
"Features",
"from",
"(",
"string",
"-",
">",
"int",
"/",
"float",
"/",
"str",
"list",
")",
"dictionary",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L347-L351 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tqdm_utils.py | _async_tqdm | def _async_tqdm(*args, **kwargs):
"""Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:... | python | def _async_tqdm(*args, **kwargs):
"""Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:... | [
"def",
"_async_tqdm",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"tqdm_lib",
".",
"tqdm",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"as",
"pbar",
":",
"pbar",
"=",
"_TqdmPbarAsync",
"(",
"pbar",
")",
"yield",
"pbar",
"pbar",... | Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:
pbar: Async pbar which can be shar... | [
"Wrapper",
"around",
"Tqdm",
"which",
"can",
"be",
"updated",
"in",
"threads",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L79-L102 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tqdm_utils.py | _TqdmPbarAsync.update_total | def update_total(self, n=1):
"""Increment total pbar value."""
with self._lock:
self._pbar.total += n
self.refresh() | python | def update_total(self, n=1):
"""Increment total pbar value."""
with self._lock:
self._pbar.total += n
self.refresh() | [
"def",
"update_total",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_pbar",
".",
"total",
"+=",
"n",
"self",
".",
"refresh",
"(",
")"
] | Increment total pbar value. | [
"Increment",
"total",
"pbar",
"value",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L114-L118 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tqdm_utils.py | _TqdmPbarAsync.update | def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh() | python | def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh() | [
"def",
"update",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_pbar",
".",
"update",
"(",
"n",
")",
"self",
".",
"refresh",
"(",
")"
] | Increment current value. | [
"Increment",
"current",
"value",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L120-L124 | train |
tensorflow/datasets | tensorflow_datasets/image/abstract_reasoning.py | AbstractReasoning._build_pcollection | def _build_pcollection(self, pipeline, folder, split):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
split_type = self.builder_config.split_type
filename = os.path.join(folder, "{}.tar.gz".format(split_type))
def _extract_data(inputs):
"""Extracts files from th... | python | def _build_pcollection(self, pipeline, folder, split):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
split_type = self.builder_config.split_type
filename = os.path.join(folder, "{}.tar.gz".format(split_type))
def _extract_data(inputs):
"""Extracts files from th... | [
"def",
"_build_pcollection",
"(",
"self",
",",
"pipeline",
",",
"folder",
",",
"split",
")",
":",
"beam",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"apache_beam",
"split_type",
"=",
"self",
".",
"builder_config",
".",
"split_type",
"filename",
"=",... | Generate examples as dicts. | [
"Generate",
"examples",
"as",
"dicts",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/abstract_reasoning.py#L250-L305 | train |
tensorflow/datasets | tensorflow_datasets/core/download/extractor.py | _copy | def _copy(src_file, dest_path):
"""Copy data read from src file obj to new file in dest_path."""
tf.io.gfile.makedirs(os.path.dirname(dest_path))
with tf.io.gfile.GFile(dest_path, 'wb') as dest_file:
while True:
data = src_file.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
dest_fil... | python | def _copy(src_file, dest_path):
"""Copy data read from src file obj to new file in dest_path."""
tf.io.gfile.makedirs(os.path.dirname(dest_path))
with tf.io.gfile.GFile(dest_path, 'wb') as dest_file:
while True:
data = src_file.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
dest_fil... | [
"def",
"_copy",
"(",
"src_file",
",",
"dest_path",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dest_path",
")",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"dest_path",
... | Copy data read from src file obj to new file in dest_path. | [
"Copy",
"data",
"read",
"from",
"src",
"file",
"obj",
"to",
"new",
"file",
"in",
"dest_path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L103-L111 | train |
tensorflow/datasets | tensorflow_datasets/core/download/extractor.py | iter_tar | def iter_tar(arch_f, gz=False, stream=False):
"""Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less tempo... | python | def iter_tar(arch_f, gz=False, stream=False):
"""Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less tempo... | [
"def",
"iter_tar",
"(",
"arch_f",
",",
"gz",
"=",
"False",
",",
"stream",
"=",
"False",
")",
":",
"read_type",
"=",
"'r'",
"+",
"(",
"'|'",
"if",
"stream",
"else",
"':'",
")",
"if",
"gz",
":",
"read_type",
"+=",
"'gz'",
"with",
"_open_or_pass",
"(",
... | Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less temporary disk consumption, but random access to the
... | [
"Iter",
"over",
"tar",
"archive",
"yielding",
"(",
"path",
"object",
"-",
"like",
")",
"tuples",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L133-L158 | train |
tensorflow/datasets | tensorflow_datasets/core/download/extractor.py | _Extractor.tqdm | def tqdm(self):
"""Add a progression bar for the current extraction."""
with utils.async_tqdm(
total=0, desc='Extraction completed...', unit=' file') as pbar_path:
self._pbar_path = pbar_path
yield | python | def tqdm(self):
"""Add a progression bar for the current extraction."""
with utils.async_tqdm(
total=0, desc='Extraction completed...', unit=' file') as pbar_path:
self._pbar_path = pbar_path
yield | [
"def",
"tqdm",
"(",
"self",
")",
":",
"with",
"utils",
".",
"async_tqdm",
"(",
"total",
"=",
"0",
",",
"desc",
"=",
"'Extraction completed...'",
",",
"unit",
"=",
"' file'",
")",
"as",
"pbar_path",
":",
"self",
".",
"_pbar_path",
"=",
"pbar_path",
"yield... | Add a progression bar for the current extraction. | [
"Add",
"a",
"progression",
"bar",
"for",
"the",
"current",
"extraction",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L68-L73 | train |
tensorflow/datasets | tensorflow_datasets/core/download/extractor.py | _Extractor.extract | def extract(self, path, extract_method, to_path):
"""Returns `promise.Promise` => to_path."""
self._pbar_path.update_total(1)
if extract_method not in _EXTRACT_METHODS:
raise ValueError('Unknown extraction method "%s".' % extract_method)
future = self._executor.submit(self._sync_extract,
... | python | def extract(self, path, extract_method, to_path):
"""Returns `promise.Promise` => to_path."""
self._pbar_path.update_total(1)
if extract_method not in _EXTRACT_METHODS:
raise ValueError('Unknown extraction method "%s".' % extract_method)
future = self._executor.submit(self._sync_extract,
... | [
"def",
"extract",
"(",
"self",
",",
"path",
",",
"extract_method",
",",
"to_path",
")",
":",
"self",
".",
"_pbar_path",
".",
"update_total",
"(",
"1",
")",
"if",
"extract_method",
"not",
"in",
"_EXTRACT_METHODS",
":",
"raise",
"ValueError",
"(",
"'Unknown ex... | Returns `promise.Promise` => to_path. | [
"Returns",
"promise",
".",
"Promise",
"=",
">",
"to_path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L75-L82 | train |
tensorflow/datasets | tensorflow_datasets/core/download/extractor.py | _Extractor._sync_extract | def _sync_extract(self, from_path, method, to_path):
"""Returns `to_path` once resource has been extracted there."""
to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX,
uuid.uuid4().hex)
try:
for path, handle in iter_archive(from_path, method):
_copy... | python | def _sync_extract(self, from_path, method, to_path):
"""Returns `to_path` once resource has been extracted there."""
to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX,
uuid.uuid4().hex)
try:
for path, handle in iter_archive(from_path, method):
_copy... | [
"def",
"_sync_extract",
"(",
"self",
",",
"from_path",
",",
"method",
",",
"to_path",
")",
":",
"to_path_tmp",
"=",
"'%s%s_%s'",
"%",
"(",
"to_path",
",",
"constants",
".",
"INCOMPLETE_SUFFIX",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"try",... | Returns `to_path` once resource has been extracted there. | [
"Returns",
"to_path",
"once",
"resource",
"has",
"been",
"extracted",
"there",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L84-L100 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | to_serialized_field | def to_serialized_field(tensor_info):
"""Convert a `TensorInfo` object into a feature proto object."""
# Select the type
dtype = tensor_info.dtype
# TODO(b/119937875): TF Examples proto only support int64, float32 and string
# This create limitation like float64 downsampled to float32, bool converted
# to ... | python | def to_serialized_field(tensor_info):
"""Convert a `TensorInfo` object into a feature proto object."""
# Select the type
dtype = tensor_info.dtype
# TODO(b/119937875): TF Examples proto only support int64, float32 and string
# This create limitation like float64 downsampled to float32, bool converted
# to ... | [
"def",
"to_serialized_field",
"(",
"tensor_info",
")",
":",
"# Select the type",
"dtype",
"=",
"tensor_info",
".",
"dtype",
"# TODO(b/119937875): TF Examples proto only support int64, float32 and string",
"# This create limitation like float64 downsampled to float32, bool converted",
"# ... | Convert a `TensorInfo` object into a feature proto object. | [
"Convert",
"a",
"TensorInfo",
"object",
"into",
"a",
"feature",
"proto",
"object",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L576-L612 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | to_feature | def to_feature(value):
"""Convert the given value to Feature if necessary."""
if isinstance(value, FeatureConnector):
return value
elif utils.is_dtype(value): # tf.int32, tf.string,...
return Tensor(shape=(), dtype=tf.as_dtype(value))
elif isinstance(value, dict):
return FeaturesDict(value)
else:... | python | def to_feature(value):
"""Convert the given value to Feature if necessary."""
if isinstance(value, FeatureConnector):
return value
elif utils.is_dtype(value): # tf.int32, tf.string,...
return Tensor(shape=(), dtype=tf.as_dtype(value))
elif isinstance(value, dict):
return FeaturesDict(value)
else:... | [
"def",
"to_feature",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"FeatureConnector",
")",
":",
"return",
"value",
"elif",
"utils",
".",
"is_dtype",
"(",
"value",
")",
":",
"# tf.int32, tf.string,...",
"return",
"Tensor",
"(",
"shape",
"=",... | Convert the given value to Feature if necessary. | [
"Convert",
"the",
"given",
"value",
"to",
"Feature",
"if",
"necessary",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L615-L624 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | decode_single_feature_from_dict | def decode_single_feature_from_dict(
feature_k,
feature,
tfexample_dict):
"""Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict contain... | python | def decode_single_feature_from_dict(
feature_k,
feature,
tfexample_dict):
"""Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict contain... | [
"def",
"decode_single_feature_from_dict",
"(",
"feature_k",
",",
"feature",
",",
"tfexample_dict",
")",
":",
"# Singleton case",
"if",
"not",
"feature",
".",
"serialized_keys",
":",
"data_to_decode",
"=",
"tfexample_dict",
"[",
"feature_k",
"]",
"# Feature contains sub ... | Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict containing the data to decode.
Returns:
decoded_feature: The output of the feature.decode_e... | [
"Decode",
"the",
"given",
"feature",
"from",
"the",
"tfexample_dict",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L627-L651 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | _assert_keys_match | def _assert_keys_match(keys1, keys2):
"""Ensure the two list of keys matches."""
if set(keys1) != set(keys2):
raise ValueError('{} {}'.format(list(keys1), list(keys2))) | python | def _assert_keys_match(keys1, keys2):
"""Ensure the two list of keys matches."""
if set(keys1) != set(keys2):
raise ValueError('{} {}'.format(list(keys1), list(keys2))) | [
"def",
"_assert_keys_match",
"(",
"keys1",
",",
"keys2",
")",
":",
"if",
"set",
"(",
"keys1",
")",
"!=",
"set",
"(",
"keys2",
")",
":",
"raise",
"ValueError",
"(",
"'{} {}'",
".",
"format",
"(",
"list",
"(",
"keys1",
")",
",",
"list",
"(",
"keys2",
... | Ensure the two list of keys matches. | [
"Ensure",
"the",
"two",
"list",
"of",
"keys",
"matches",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L654-L657 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | FeaturesDict.get_tensor_info | def get_tensor_info(self):
"""See base class for details."""
return {
feature_key: feature.get_tensor_info()
for feature_key, feature in self._feature_dict.items()
} | python | def get_tensor_info(self):
"""See base class for details."""
return {
feature_key: feature.get_tensor_info()
for feature_key, feature in self._feature_dict.items()
} | [
"def",
"get_tensor_info",
"(",
"self",
")",
":",
"return",
"{",
"feature_key",
":",
"feature",
".",
"get_tensor_info",
"(",
")",
"for",
"feature_key",
",",
"feature",
"in",
"self",
".",
"_feature_dict",
".",
"items",
"(",
")",
"}"
] | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L437-L442 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | FeaturesDict.get_serialized_info | def get_serialized_info(self):
"""See base class for details."""
# Flatten tf-example features dict
# Use NonMutableDict to ensure there is no collision between features keys
features_dict = utils.NonMutableDict()
for feature_key, feature in self._feature_dict.items():
serialized_info = featur... | python | def get_serialized_info(self):
"""See base class for details."""
# Flatten tf-example features dict
# Use NonMutableDict to ensure there is no collision between features keys
features_dict = utils.NonMutableDict()
for feature_key, feature in self._feature_dict.items():
serialized_info = featur... | [
"def",
"get_serialized_info",
"(",
"self",
")",
":",
"# Flatten tf-example features dict",
"# Use NonMutableDict to ensure there is no collision between features keys",
"features_dict",
"=",
"utils",
".",
"NonMutableDict",
"(",
")",
"for",
"feature_key",
",",
"feature",
"in",
... | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L444-L466 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | FeaturesDict.encode_example | def encode_example(self, example_dict):
"""See base class for details."""
# Flatten dict matching the tf-example features
# Use NonMutableDict to ensure there is no collision between features keys
tfexample_dict = utils.NonMutableDict()
# Iterate over example fields
for feature_key, (feature, e... | python | def encode_example(self, example_dict):
"""See base class for details."""
# Flatten dict matching the tf-example features
# Use NonMutableDict to ensure there is no collision between features keys
tfexample_dict = utils.NonMutableDict()
# Iterate over example fields
for feature_key, (feature, e... | [
"def",
"encode_example",
"(",
"self",
",",
"example_dict",
")",
":",
"# Flatten dict matching the tf-example features",
"# Use NonMutableDict to ensure there is no collision between features keys",
"tfexample_dict",
"=",
"utils",
".",
"NonMutableDict",
"(",
")",
"# Iterate over exa... | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L468-L490 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | FeaturesDict.decode_example | def decode_example(self, tfexample_dict):
"""See base class for details."""
tensor_dict = {}
# Iterate over the Tensor dict keys
for feature_key, feature in six.iteritems(self._feature_dict):
decoded_feature = decode_single_feature_from_dict(
feature_k=feature_key,
feature=feat... | python | def decode_example(self, tfexample_dict):
"""See base class for details."""
tensor_dict = {}
# Iterate over the Tensor dict keys
for feature_key, feature in six.iteritems(self._feature_dict):
decoded_feature = decode_single_feature_from_dict(
feature_k=feature_key,
feature=feat... | [
"def",
"decode_example",
"(",
"self",
",",
"tfexample_dict",
")",
":",
"tensor_dict",
"=",
"{",
"}",
"# Iterate over the Tensor dict keys",
"for",
"feature_key",
",",
"feature",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_feature_dict",
")",
":",
"decoded... | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L492-L503 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | FeaturesDict.save_metadata | def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Recursively save all child features
for feature_key, feature in six.iteritems(self._feature_dict):
if feature_name:
feature_key = '-'.join((feature_name, feature_key))
feature.save_metadata(data_dir... | python | def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Recursively save all child features
for feature_key, feature in six.iteritems(self._feature_dict):
if feature_name:
feature_key = '-'.join((feature_name, feature_key))
feature.save_metadata(data_dir... | [
"def",
"save_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"# Recursively save all child features",
"for",
"feature_key",
",",
"feature",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_feature_dict",
")",
":",
"if",
"... | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L508-L514 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | Tensor.encode_example | def encode_example(self, example_data):
"""See base class for details."""
np_dtype = np.dtype(self._dtype.as_numpy_dtype)
# Convert to numpy if possible
if not isinstance(example_data, np.ndarray):
example_data = np.array(example_data, dtype=np_dtype)
# Ensure the shape and dtype match
if ... | python | def encode_example(self, example_data):
"""See base class for details."""
np_dtype = np.dtype(self._dtype.as_numpy_dtype)
# Convert to numpy if possible
if not isinstance(example_data, np.ndarray):
example_data = np.array(example_data, dtype=np_dtype)
# Ensure the shape and dtype match
if ... | [
"def",
"encode_example",
"(",
"self",
",",
"example_data",
")",
":",
"np_dtype",
"=",
"np",
".",
"dtype",
"(",
"self",
".",
"_dtype",
".",
"as_numpy_dtype",
")",
"# Convert to numpy if possible",
"if",
"not",
"isinstance",
"(",
"example_data",
",",
"np",
".",
... | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L548-L562 | train |
tensorflow/datasets | tensorflow_datasets/core/features/feature.py | Tensor.decode_example | def decode_example(self, tfexample_data):
"""See base class for details."""
# TODO(epot): Support dynamic shape
if self.shape.count(None) < 2:
# Restore the shape if possible. TF Example flattened it.
shape = [-1 if i is None else i for i in self.shape]
tfexample_data = tf.reshape(tfexampl... | python | def decode_example(self, tfexample_data):
"""See base class for details."""
# TODO(epot): Support dynamic shape
if self.shape.count(None) < 2:
# Restore the shape if possible. TF Example flattened it.
shape = [-1 if i is None else i for i in self.shape]
tfexample_data = tf.reshape(tfexampl... | [
"def",
"decode_example",
"(",
"self",
",",
"tfexample_data",
")",
":",
"# TODO(epot): Support dynamic shape",
"if",
"self",
".",
"shape",
".",
"count",
"(",
"None",
")",
"<",
"2",
":",
"# Restore the shape if possible. TF Example flattened it.",
"shape",
"=",
"[",
"... | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L564-L573 | train |
tensorflow/datasets | tensorflow_datasets/image/celeba.py | CelebA._process_celeba_config_file | def _process_celeba_config_file(self, file_path):
"""Unpack the celeba config file.
The file starts with the number of lines, and a header.
Afterwards, there is a configuration for each file: one per line.
Args:
file_path: Path to the file with the configuration.
Returns:
keys: names ... | python | def _process_celeba_config_file(self, file_path):
"""Unpack the celeba config file.
The file starts with the number of lines, and a header.
Afterwards, there is a configuration for each file: one per line.
Args:
file_path: Path to the file with the configuration.
Returns:
keys: names ... | [
"def",
"_process_celeba_config_file",
"(",
"self",
",",
"file_path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"file_path",
")",
"as",
"f",
":",
"data_raw",
"=",
"f",
".",
"read",
"(",
")",
"lines",
"=",
"data_raw",
".",
"sp... | Unpack the celeba config file.
The file starts with the number of lines, and a header.
Afterwards, there is a configuration for each file: one per line.
Args:
file_path: Path to the file with the configuration.
Returns:
keys: names of the attributes
values: map from the file name to... | [
"Unpack",
"the",
"celeba",
"config",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/celeba.py#L150-L175 | train |
tensorflow/datasets | tensorflow_datasets/image/celeba.py | CelebA._generate_examples | def _generate_examples(self, file_id, extracted_dirs):
"""Yields examples."""
filedir = os.path.join(extracted_dirs["img_align_celeba"],
"img_align_celeba")
img_list_path = extracted_dirs["list_eval_partition"]
landmarks_path = extracted_dirs["landmarks_celeba"]
attr_path ... | python | def _generate_examples(self, file_id, extracted_dirs):
"""Yields examples."""
filedir = os.path.join(extracted_dirs["img_align_celeba"],
"img_align_celeba")
img_list_path = extracted_dirs["list_eval_partition"]
landmarks_path = extracted_dirs["landmarks_celeba"]
attr_path ... | [
"def",
"_generate_examples",
"(",
"self",
",",
"file_id",
",",
"extracted_dirs",
")",
":",
"filedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"extracted_dirs",
"[",
"\"img_align_celeba\"",
"]",
",",
"\"img_align_celeba\"",
")",
"img_list_path",
"=",
"extracted... | Yields examples. | [
"Yields",
"examples",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/celeba.py#L177-L207 | train |
tensorflow/datasets | tensorflow_datasets/image/quickdraw.py | QuickdrawBitmap._generate_examples | def _generate_examples(self, file_paths):
"""Generate QuickDraw bitmap examples.
Given a list of file paths with data for each class label, generate examples
in a random order.
Args:
file_paths: (dict of {str: str}) the paths to files containing the data,
indexed by label.
... | python | def _generate_examples(self, file_paths):
"""Generate QuickDraw bitmap examples.
Given a list of file paths with data for each class label, generate examples
in a random order.
Args:
file_paths: (dict of {str: str}) the paths to files containing the data,
indexed by label.
... | [
"def",
"_generate_examples",
"(",
"self",
",",
"file_paths",
")",
":",
"for",
"label",
",",
"path",
"in",
"sorted",
"(",
"file_paths",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
":",
"with",
"tf",
".",
"... | Generate QuickDraw bitmap examples.
Given a list of file paths with data for each class label, generate examples
in a random order.
Args:
file_paths: (dict of {str: str}) the paths to files containing the data,
indexed by label.
Yields:
The QuickDraw examples, as defined... | [
"Generate",
"QuickDraw",
"bitmap",
"examples",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/quickdraw.py#L97-L117 | train |
tensorflow/datasets | tensorflow_datasets/core/tf_compat.py | ensure_tf_install | def ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print ... | python | def ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print ... | [
"def",
"ensure_tf_install",
"(",
")",
":",
"# pylint: disable=g-statement-before-imports",
"try",
":",
"import",
"tensorflow",
"as",
"tf",
"except",
"ImportError",
":",
"# Print more informative error message, then reraise.",
"print",
"(",
"\"\\n\\nFailed to import TensorFlow. Pl... | Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate. | [
"Attempt",
"to",
"import",
"tensorflow",
"and",
"ensure",
"its",
"version",
"is",
"sufficient",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L39-L67 | train |
tensorflow/datasets | tensorflow_datasets/core/tf_compat.py | _patch_tf | def _patch_tf(tf):
"""Patch TF to maintain compatibility across versions."""
global TF_PATCH
if TF_PATCH:
return
v_1_12 = distutils.version.LooseVersion("1.12.0")
v_1_13 = distutils.version.LooseVersion("1.13.0")
v_2 = distutils.version.LooseVersion("2.0.0")
tf_version = distutils.version.LooseVersio... | python | def _patch_tf(tf):
"""Patch TF to maintain compatibility across versions."""
global TF_PATCH
if TF_PATCH:
return
v_1_12 = distutils.version.LooseVersion("1.12.0")
v_1_13 = distutils.version.LooseVersion("1.13.0")
v_2 = distutils.version.LooseVersion("2.0.0")
tf_version = distutils.version.LooseVersio... | [
"def",
"_patch_tf",
"(",
"tf",
")",
":",
"global",
"TF_PATCH",
"if",
"TF_PATCH",
":",
"return",
"v_1_12",
"=",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"\"1.12.0\"",
")",
"v_1_13",
"=",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"... | Patch TF to maintain compatibility across versions. | [
"Patch",
"TF",
"to",
"maintain",
"compatibility",
"across",
"versions",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L70-L89 | train |
tensorflow/datasets | tensorflow_datasets/core/tf_compat.py | _patch_for_tf1_12 | def _patch_for_tf1_12(tf):
"""Monkey patch tf 1.12 so tfds can use it."""
tf.io.gfile = tf.gfile
tf.io.gfile.copy = tf.gfile.Copy
tf.io.gfile.exists = tf.gfile.Exists
tf.io.gfile.glob = tf.gfile.Glob
tf.io.gfile.isdir = tf.gfile.IsDirectory
tf.io.gfile.listdir = tf.gfile.ListDirectory
tf.io.gfile.makedi... | python | def _patch_for_tf1_12(tf):
"""Monkey patch tf 1.12 so tfds can use it."""
tf.io.gfile = tf.gfile
tf.io.gfile.copy = tf.gfile.Copy
tf.io.gfile.exists = tf.gfile.Exists
tf.io.gfile.glob = tf.gfile.Glob
tf.io.gfile.isdir = tf.gfile.IsDirectory
tf.io.gfile.listdir = tf.gfile.ListDirectory
tf.io.gfile.makedi... | [
"def",
"_patch_for_tf1_12",
"(",
"tf",
")",
":",
"tf",
".",
"io",
".",
"gfile",
"=",
"tf",
".",
"gfile",
"tf",
".",
"io",
".",
"gfile",
".",
"copy",
"=",
"tf",
".",
"gfile",
".",
"Copy",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"=",
"tf",
... | Monkey patch tf 1.12 so tfds can use it. | [
"Monkey",
"patch",
"tf",
"1",
".",
"12",
"so",
"tfds",
"can",
"use",
"it",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L100-L132 | train |
tensorflow/datasets | tensorflow_datasets/core/tf_compat.py | _patch_for_tf1_13 | def _patch_for_tf1_13(tf):
"""Monkey patch tf 1.13 so tfds can use it."""
if not hasattr(tf.io.gfile, "GFile"):
tf.io.gfile.GFile = tf.gfile.GFile
if not hasattr(tf, "nest"):
tf.nest = tf.contrib.framework.nest
if not hasattr(tf.compat, "v2"):
tf.compat.v2 = types.ModuleType("tf.compat.v2")
tf.c... | python | def _patch_for_tf1_13(tf):
"""Monkey patch tf 1.13 so tfds can use it."""
if not hasattr(tf.io.gfile, "GFile"):
tf.io.gfile.GFile = tf.gfile.GFile
if not hasattr(tf, "nest"):
tf.nest = tf.contrib.framework.nest
if not hasattr(tf.compat, "v2"):
tf.compat.v2 = types.ModuleType("tf.compat.v2")
tf.c... | [
"def",
"_patch_for_tf1_13",
"(",
"tf",
")",
":",
"if",
"not",
"hasattr",
"(",
"tf",
".",
"io",
".",
"gfile",
",",
"\"GFile\"",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"=",
"tf",
".",
"gfile",
".",
"GFile",
"if",
"not",
"hasattr",
"... | Monkey patch tf 1.13 so tfds can use it. | [
"Monkey",
"patch",
"tf",
"1",
".",
"13",
"so",
"tfds",
"can",
"use",
"it",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L135-L152 | train |
tensorflow/datasets | tensorflow_datasets/core/tf_compat.py | is_dataset | def is_dataset(ds):
"""Whether ds is a Dataset. Compatible across TF versions."""
import tensorflow as tf
from tensorflow_datasets.core.utils import py_utils
dataset_types = [tf.data.Dataset]
v1_ds = py_utils.rgetattr(tf, "compat.v1.data.Dataset", None)
v2_ds = py_utils.rgetattr(tf, "compat.v2.data.Dataset"... | python | def is_dataset(ds):
"""Whether ds is a Dataset. Compatible across TF versions."""
import tensorflow as tf
from tensorflow_datasets.core.utils import py_utils
dataset_types = [tf.data.Dataset]
v1_ds = py_utils.rgetattr(tf, "compat.v1.data.Dataset", None)
v2_ds = py_utils.rgetattr(tf, "compat.v2.data.Dataset"... | [
"def",
"is_dataset",
"(",
"ds",
")",
":",
"import",
"tensorflow",
"as",
"tf",
"from",
"tensorflow_datasets",
".",
"core",
".",
"utils",
"import",
"py_utils",
"dataset_types",
"=",
"[",
"tf",
".",
"data",
".",
"Dataset",
"]",
"v1_ds",
"=",
"py_utils",
".",
... | Whether ds is a Dataset. Compatible across TF versions. | [
"Whether",
"ds",
"is",
"a",
"Dataset",
".",
"Compatible",
"across",
"TF",
"versions",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L155-L166 | train |
tensorflow/datasets | tensorflow_datasets/translate/ted_multi.py | TedMultiTranslate._generate_examples | def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(data_file) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
# Everything in the row except for 'talk_name' will be a tra... | python | def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(data_file) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
# Everything in the row except for 'talk_name' will be a tra... | [
"def",
"_generate_examples",
"(",
"self",
",",
"data_file",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"data_file",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
... | This function returns the examples in the raw (text) form. | [
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/ted_multi.py#L108-L123 | train |
tensorflow/datasets | tensorflow_datasets/text/multi_nli.py | MultiNLI._generate_examples | def _generate_examples(self, filepath):
"""Generate mnli examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
for idx, line in enumerate(tf.io.gfile.GFile(filepath, "rb")):
if idx == 0:
continue # skip header
... | python | def _generate_examples(self, filepath):
"""Generate mnli examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
for idx, line in enumerate(tf.io.gfile.GFile(filepath, "rb")):
if idx == 0:
continue # skip header
... | [
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"for",
"idx",
",",
"line",
"in",
"enumerate",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"\"rb\"",
")",
")",
":",
"if",
"idx",
"==",
"0",
":",
"contin... | Generate mnli examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings | [
"Generate",
"mnli",
"examples",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/multi_nli.py#L148-L171 | train |
tensorflow/datasets | tensorflow_datasets/image/image_folder.py | ImageLabelFolder._split_generators | def _split_generators(self, dl_manager):
"""Returns SplitGenerators from the folder names."""
# At data creation time, parse the folder to deduce number of splits,
# labels, image size,
# The splits correspond to the high level folders
split_names = list_folders(dl_manager.manual_dir)
# Extrac... | python | def _split_generators(self, dl_manager):
"""Returns SplitGenerators from the folder names."""
# At data creation time, parse the folder to deduce number of splits,
# labels, image size,
# The splits correspond to the high level folders
split_names = list_folders(dl_manager.manual_dir)
# Extrac... | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"# At data creation time, parse the folder to deduce number of splits,",
"# labels, image size,",
"# The splits correspond to the high level folders",
"split_names",
"=",
"list_folders",
"(",
"dl_manager",
".",
"... | Returns SplitGenerators from the folder names. | [
"Returns",
"SplitGenerators",
"from",
"the",
"folder",
"names",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/image_folder.py#L103-L154 | train |
tensorflow/datasets | tensorflow_datasets/image/image_folder.py | ImageLabelFolder._generate_examples | def _generate_examples(self, label_images):
"""Generate example for each image in the dict."""
for label, image_paths in label_images.items():
for image_path in image_paths:
yield {
"image": image_path,
"label": label,
} | python | def _generate_examples(self, label_images):
"""Generate example for each image in the dict."""
for label, image_paths in label_images.items():
for image_path in image_paths:
yield {
"image": image_path,
"label": label,
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"label_images",
")",
":",
"for",
"label",
",",
"image_paths",
"in",
"label_images",
".",
"items",
"(",
")",
":",
"for",
"image_path",
"in",
"image_paths",
":",
"yield",
"{",
"\"image\"",
":",
"image_path",
",",
... | Generate example for each image in the dict. | [
"Generate",
"example",
"for",
"each",
"image",
"in",
"the",
"dict",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/image_folder.py#L156-L164 | train |
tensorflow/datasets | tensorflow_datasets/scripts/create_new_dataset.py | create_dataset_file | def create_dataset_file(root_dir, data):
"""Create a new dataset from a template."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}.py')
context = (
_HEADER + _DATASET_DEFAULT_IMPORTS + _CITATION
+ _DESCRIPTION + _DATASET_DEFAULTS
)
with gfile.GFile(file_path.format(**data),... | python | def create_dataset_file(root_dir, data):
"""Create a new dataset from a template."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}.py')
context = (
_HEADER + _DATASET_DEFAULT_IMPORTS + _CITATION
+ _DESCRIPTION + _DATASET_DEFAULTS
)
with gfile.GFile(file_path.format(**data),... | [
"def",
"create_dataset_file",
"(",
"root_dir",
",",
"data",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'{dataset_type}'",
",",
"'{dataset_name}.py'",
")",
"context",
"=",
"(",
"_HEADER",
"+",
"_DATASET_DEFAULT_IMPORTS",
... | Create a new dataset from a template. | [
"Create",
"a",
"new",
"dataset",
"from",
"a",
"template",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/create_new_dataset.py#L155-L164 | train |
tensorflow/datasets | tensorflow_datasets/scripts/create_new_dataset.py | add_the_init | def add_the_init(root_dir, data):
"""Append the new dataset file to the __init__.py."""
init_file = os.path.join(root_dir, '{dataset_type}', '__init__.py')
context = (
'from tensorflow_datasets.{dataset_type}.{dataset_name} import '
'{dataset_cls} # {TODO} Sort alphabetically\n'
)
with gfile.GFil... | python | def add_the_init(root_dir, data):
"""Append the new dataset file to the __init__.py."""
init_file = os.path.join(root_dir, '{dataset_type}', '__init__.py')
context = (
'from tensorflow_datasets.{dataset_type}.{dataset_name} import '
'{dataset_cls} # {TODO} Sort alphabetically\n'
)
with gfile.GFil... | [
"def",
"add_the_init",
"(",
"root_dir",
",",
"data",
")",
":",
"init_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'{dataset_type}'",
",",
"'__init__.py'",
")",
"context",
"=",
"(",
"'from tensorflow_datasets.{dataset_type}.{dataset_name} import... | Append the new dataset file to the __init__.py. | [
"Append",
"the",
"new",
"dataset",
"file",
"to",
"the",
"__init__",
".",
"py",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/create_new_dataset.py#L167-L175 | train |
tensorflow/datasets | tensorflow_datasets/image/svhn.py | SvhnCropped._generate_examples | def _generate_examples(self, filepath):
"""Generate examples as dicts.
Args:
filepath: `str` path of the file to process.
Yields:
Generator yielding the next samples
"""
with tf.io.gfile.GFile(filepath, "rb") as f:
data = tfds.core.lazy_imports.scipy.io.loadmat(f)
# Maybe sh... | python | def _generate_examples(self, filepath):
"""Generate examples as dicts.
Args:
filepath: `str` path of the file to process.
Yields:
Generator yielding the next samples
"""
with tf.io.gfile.GFile(filepath, "rb") as f:
data = tfds.core.lazy_imports.scipy.io.loadmat(f)
# Maybe sh... | [
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"data",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"scipy",
".",
... | Generate examples as dicts.
Args:
filepath: `str` path of the file to process.
Yields:
Generator yielding the next samples | [
"Generate",
"examples",
"as",
"dicts",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/svhn.py#L92-L113 | train |
tensorflow/datasets | tensorflow_datasets/image/chexpert.py | Chexpert._split_generators | def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
path = dl_manager.manual_dir
train_path = os.path.join(path, _TRAIN_DIR)
val_path = os.path.join(path, _VALIDATION_DIR)
if not tf.io.gfile.exists(train_path) or not tf.io.gfile.exists(val_path):
msg = ("You must download ... | python | def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
path = dl_manager.manual_dir
train_path = os.path.join(path, _TRAIN_DIR)
val_path = os.path.join(path, _VALIDATION_DIR)
if not tf.io.gfile.exists(train_path) or not tf.io.gfile.exists(val_path):
msg = ("You must download ... | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"path",
"=",
"dl_manager",
".",
"manual_dir",
"train_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"_TRAIN_DIR",
")",
"val_path",
"=",
"os",
".",
"path",
".",
"join",
... | Returns SplitGenerators. | [
"Returns",
"SplitGenerators",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/chexpert.py#L93-L121 | train |
tensorflow/datasets | tensorflow_datasets/image/chexpert.py | Chexpert._generate_examples | def _generate_examples(self, imgs_path, csv_path):
"""Yields examples."""
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
# Get keys for each label from csv
label_keys = reader.fieldnames[5:]
data = []
for row in reader:
# Get image based on indica... | python | def _generate_examples(self, imgs_path, csv_path):
"""Yields examples."""
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
# Get keys for each label from csv
label_keys = reader.fieldnames[5:]
data = []
for row in reader:
# Get image based on indica... | [
"def",
"_generate_examples",
"(",
"self",
",",
"imgs_path",
",",
"csv_path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"csv_path",
")",
"as",
"csv_f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"csv_f",
")",
"# Get key... | Yields examples. | [
"Yields",
"examples",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/chexpert.py#L123-L141 | train |
tensorflow/datasets | tensorflow_datasets/image/imagenet2012_corrupted.py | _make_builder_configs | def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 60 Imagenet2012CorruptedConfig objects, corresponding to
the 12 corruption types, with each type having 5 severities.
Returns:
A list of 60 Imagenet2012CorruptedConfig objects.
"""
config_list = []
for each_corru... | python | def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 60 Imagenet2012CorruptedConfig objects, corresponding to
the 12 corruption types, with each type having 5 severities.
Returns:
A list of 60 Imagenet2012CorruptedConfig objects.
"""
config_list = []
for each_corru... | [
"def",
"_make_builder_configs",
"(",
")",
":",
"config_list",
"=",
"[",
"]",
"for",
"each_corruption",
"in",
"TYPE_LIST",
":",
"for",
"each_severity",
"in",
"range",
"(",
"1",
",",
"6",
")",
":",
"name_str",
"=",
"each_corruption",
"+",
"'_'",
"+",
"str",
... | Construct a list of BuilderConfigs.
Construct a list of 60 Imagenet2012CorruptedConfig objects, corresponding to
the 12 corruption types, with each type having 5 severities.
Returns:
A list of 60 Imagenet2012CorruptedConfig objects. | [
"Construct",
"a",
"list",
"of",
"BuilderConfigs",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet2012_corrupted.py#L83-L107 | train |
tensorflow/datasets | tensorflow_datasets/image/imagenet2012_corrupted.py | Imagenet2012Corrupted._split_generators | def _split_generators(self, dl_manager):
"""Return the validation split of ImageNet2012.
Args:
dl_manager: download manager object.
Returns:
validation split.
"""
splits = super(Imagenet2012Corrupted, self)._split_generators(dl_manager)
validation = splits[1]
return [validation... | python | def _split_generators(self, dl_manager):
"""Return the validation split of ImageNet2012.
Args:
dl_manager: download manager object.
Returns:
validation split.
"""
splits = super(Imagenet2012Corrupted, self)._split_generators(dl_manager)
validation = splits[1]
return [validation... | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"splits",
"=",
"super",
"(",
"Imagenet2012Corrupted",
",",
"self",
")",
".",
"_split_generators",
"(",
"dl_manager",
")",
"validation",
"=",
"splits",
"[",
"1",
"]",
"return",
"[",
"valida... | Return the validation split of ImageNet2012.
Args:
dl_manager: download manager object.
Returns:
validation split. | [
"Return",
"the",
"validation",
"split",
"of",
"ImageNet2012",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet2012_corrupted.py#L134-L145 | train |
tensorflow/datasets | tensorflow_datasets/image/imagenet2012_corrupted.py | Imagenet2012Corrupted._generate_examples_validation | def _generate_examples_validation(self, archive, labels):
"""Generate corrupted imagenet validation data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
archive: an iterator for the raw dataset.
labels: a dictionary that maps the file names to imagenet labels.
... | python | def _generate_examples_validation(self, archive, labels):
"""Generate corrupted imagenet validation data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
archive: an iterator for the raw dataset.
labels: a dictionary that maps the file names to imagenet labels.
... | [
"def",
"_generate_examples_validation",
"(",
"self",
",",
"archive",
",",
"labels",
")",
":",
"# Get the current random seeds.",
"numpy_st0",
"=",
"np",
".",
"random",
".",
"get_state",
"(",
")",
"# Set new random seeds.",
"np",
".",
"random",
".",
"seed",
"(",
... | Generate corrupted imagenet validation data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
archive: an iterator for the raw dataset.
labels: a dictionary that maps the file names to imagenet labels.
Yields:
dictionary with the file name, an image file obje... | [
"Generate",
"corrupted",
"imagenet",
"validation",
"data",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet2012_corrupted.py#L147-L175 | train |
tensorflow/datasets | tensorflow_datasets/image/imagenet2012_corrupted.py | Imagenet2012Corrupted._get_corrupted_example | def _get_corrupted_example(self, x):
"""Return corrupted images.
Args:
x: numpy array, uncorrupted image.
Returns:
numpy array, corrupted images.
"""
corruption_type = self.builder_config.corruption_type
severity = self.builder_config.severity
return {
'gaussian_noise'... | python | def _get_corrupted_example(self, x):
"""Return corrupted images.
Args:
x: numpy array, uncorrupted image.
Returns:
numpy array, corrupted images.
"""
corruption_type = self.builder_config.corruption_type
severity = self.builder_config.severity
return {
'gaussian_noise'... | [
"def",
"_get_corrupted_example",
"(",
"self",
",",
"x",
")",
":",
"corruption_type",
"=",
"self",
".",
"builder_config",
".",
"corruption_type",
"severity",
"=",
"self",
".",
"builder_config",
".",
"severity",
"return",
"{",
"'gaussian_noise'",
":",
"corruptions",... | Return corrupted images.
Args:
x: numpy array, uncorrupted image.
Returns:
numpy array, corrupted images. | [
"Return",
"corrupted",
"images",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet2012_corrupted.py#L177-L202 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tf_utils.py | assert_shape_match | def assert_shape_match(shape1, shape2):
"""Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None)
"""
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(s... | python | def assert_shape_match(shape1, shape2):
"""Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None)
"""
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(s... | [
"def",
"assert_shape_match",
"(",
"shape1",
",",
"shape2",
")",
":",
"shape1",
"=",
"tf",
".",
"TensorShape",
"(",
"shape1",
")",
"shape2",
"=",
"tf",
".",
"TensorShape",
"(",
"shape2",
")",
"if",
"shape1",
".",
"ndims",
"is",
"None",
"or",
"shape2",
"... | Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None) | [
"Ensure",
"the",
"shape1",
"match",
"the",
"pattern",
"given",
"by",
"shape2",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L132-L148 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tf_utils.py | raw_nogpu_session | def raw_nogpu_session(graph=None):
"""tf.Session, hiding GPUs."""
config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})
return tf.compat.v1.Session(config=config, graph=graph) | python | def raw_nogpu_session(graph=None):
"""tf.Session, hiding GPUs."""
config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})
return tf.compat.v1.Session(config=config, graph=graph) | [
"def",
"raw_nogpu_session",
"(",
"graph",
"=",
"None",
")",
":",
"config",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"ConfigProto",
"(",
"device_count",
"=",
"{",
"'GPU'",
":",
"0",
"}",
")",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"Session",
... | tf.Session, hiding GPUs. | [
"tf",
".",
"Session",
"hiding",
"GPUs",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L161-L164 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tf_utils.py | maybe_with_graph | def maybe_with_graph(graph=None, create_if_none=True):
"""Eager-compatible Graph().as_default() yielding the graph."""
if tf.executing_eagerly():
yield None
else:
if graph is None and create_if_none:
graph = tf.Graph()
if graph is None:
yield None
else:
with graph.as_default():
... | python | def maybe_with_graph(graph=None, create_if_none=True):
"""Eager-compatible Graph().as_default() yielding the graph."""
if tf.executing_eagerly():
yield None
else:
if graph is None and create_if_none:
graph = tf.Graph()
if graph is None:
yield None
else:
with graph.as_default():
... | [
"def",
"maybe_with_graph",
"(",
"graph",
"=",
"None",
",",
"create_if_none",
"=",
"True",
")",
":",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"yield",
"None",
"else",
":",
"if",
"graph",
"is",
"None",
"and",
"create_if_none",
":",
"graph",
"="... | Eager-compatible Graph().as_default() yielding the graph. | [
"Eager",
"-",
"compatible",
"Graph",
"()",
".",
"as_default",
"()",
"yielding",
"the",
"graph",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L168-L180 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tf_utils.py | TFGraphRunner.run | def run(self, fct, input_):
"""Execute the given TensorFlow function."""
# TF 2.0
if tf.executing_eagerly():
return fct(input_).numpy()
# TF 1.0
else:
# Should compile the function if this is the first time encountered
if not isinstance(input_, np.ndarray):
input_ = np.arra... | python | def run(self, fct, input_):
"""Execute the given TensorFlow function."""
# TF 2.0
if tf.executing_eagerly():
return fct(input_).numpy()
# TF 1.0
else:
# Should compile the function if this is the first time encountered
if not isinstance(input_, np.ndarray):
input_ = np.arra... | [
"def",
"run",
"(",
"self",
",",
"fct",
",",
"input_",
")",
":",
"# TF 2.0",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"return",
"fct",
"(",
"input_",
")",
".",
"numpy",
"(",
")",
"# TF 1.0",
"else",
":",
"# Should compile the function if this is... | Execute the given TensorFlow function. | [
"Execute",
"the",
"given",
"TensorFlow",
"function",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L70-L92 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tf_utils.py | TFGraphRunner._build_graph_run | def _build_graph_run(self, run_args):
"""Create a new graph for the given args."""
# Could try to use tfe.py_func(fct) but this would require knowing
# information about the signature of the function.
# Create a new graph:
with tf.Graph().as_default() as g:
# Create placeholder
input_ =... | python | def _build_graph_run(self, run_args):
"""Create a new graph for the given args."""
# Could try to use tfe.py_func(fct) but this would require knowing
# information about the signature of the function.
# Create a new graph:
with tf.Graph().as_default() as g:
# Create placeholder
input_ =... | [
"def",
"_build_graph_run",
"(",
"self",
",",
"run_args",
")",
":",
"# Could try to use tfe.py_func(fct) but this would require knowing",
"# information about the signature of the function.",
"# Create a new graph:",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
... | Create a new graph for the given args. | [
"Create",
"a",
"new",
"graph",
"for",
"the",
"given",
"args",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L94-L111 | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tf_utils.py | TFGraphRunner._build_signature | def _build_signature(self, run_args):
"""Create a unique signature for each fct/inputs."""
return (id(run_args.fct), run_args.input.dtype, run_args.input.shape) | python | def _build_signature(self, run_args):
"""Create a unique signature for each fct/inputs."""
return (id(run_args.fct), run_args.input.dtype, run_args.input.shape) | [
"def",
"_build_signature",
"(",
"self",
",",
"run_args",
")",
":",
"return",
"(",
"id",
"(",
"run_args",
".",
"fct",
")",
",",
"run_args",
".",
"input",
".",
"dtype",
",",
"run_args",
".",
"input",
".",
"shape",
")"
] | Create a unique signature for each fct/inputs. | [
"Create",
"a",
"unique",
"signature",
"for",
"each",
"fct",
"/",
"inputs",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L113-L115 | train |
tensorflow/datasets | tensorflow_datasets/core/features/video_feature.py | Video.encode_example | def encode_example(self, video_or_path_or_fobj):
"""Converts the given image into a dict convertible to tf example."""
if isinstance(video_or_path_or_fobj, six.string_types):
if not os.path.isfile(video_or_path_or_fobj):
_, video_temp_path = tempfile.mkstemp()
try:
tf.gfile.Copy(... | python | def encode_example(self, video_or_path_or_fobj):
"""Converts the given image into a dict convertible to tf example."""
if isinstance(video_or_path_or_fobj, six.string_types):
if not os.path.isfile(video_or_path_or_fobj):
_, video_temp_path = tempfile.mkstemp()
try:
tf.gfile.Copy(... | [
"def",
"encode_example",
"(",
"self",
",",
"video_or_path_or_fobj",
")",
":",
"if",
"isinstance",
"(",
"video_or_path_or_fobj",
",",
"six",
".",
"string_types",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"video_or_path_or_fobj",
")",
":",
... | Converts the given image into a dict convertible to tf example. | [
"Converts",
"the",
"given",
"image",
"into",
"a",
"dict",
"convertible",
"to",
"tf",
"example",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/video_feature.py#L148-L164 | train |
tensorflow/datasets | tensorflow_datasets/image/rock_paper_scissors.py | RockPaperScissors._generate_examples | def _generate_examples(self, archive):
"""Generate rock, paper or scissors images and labels given the directory path.
Args:
archive: object that iterates over the zip.
Yields:
The image path and its corresponding label.
"""
for fname, fobj in archive:
res = _NAME_RE.match(fname... | python | def _generate_examples(self, archive):
"""Generate rock, paper or scissors images and labels given the directory path.
Args:
archive: object that iterates over the zip.
Yields:
The image path and its corresponding label.
"""
for fname, fobj in archive:
res = _NAME_RE.match(fname... | [
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
")",
":",
"for",
"fname",
",",
"fobj",
"in",
"archive",
":",
"res",
"=",
"_NAME_RE",
".",
"match",
"(",
"fname",
")",
"if",
"not",
"res",
":",
"# if anything other than .png; skip",
"continue",
"label... | Generate rock, paper or scissors images and labels given the directory path.
Args:
archive: object that iterates over the zip.
Yields:
The image path and its corresponding label. | [
"Generate",
"rock",
"paper",
"or",
"scissors",
"images",
"and",
"labels",
"given",
"the",
"directory",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/rock_paper_scissors.py#L82-L100 | train |
tensorflow/datasets | tensorflow_datasets/structured/titanic.py | Titanic._generate_examples | def _generate_examples(self, file_path):
"""Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target
"""
with tf.io.gfile.GFile(file_path) as f:
raw_data = csv.DictReader(f)
for row in raw... | python | def _generate_examples(self, file_path):
"""Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target
"""
with tf.io.gfile.GFile(file_path) as f:
raw_data = csv.DictReader(f)
for row in raw... | [
"def",
"_generate_examples",
"(",
"self",
",",
"file_path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"file_path",
")",
"as",
"f",
":",
"raw_data",
"=",
"csv",
".",
"DictReader",
"(",
"f",
")",
"for",
"row",
"in",
"raw_data"... | Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target | [
"Generate",
"features",
"and",
"target",
"given",
"the",
"directory",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/structured/titanic.py#L130-L150 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/text_encoder.py | pad_decr | def pad_decr(ids):
"""Strip ID 0 and decrement ids by 1."""
if len(ids) < 1:
return list(ids)
if not any(ids):
return [] # all padding.
idx = -1
while not ids[idx]:
idx -= 1
if idx == -1:
ids = ids
else:
ids = ids[:idx + 1]
return [i - 1 for i in ids] | python | def pad_decr(ids):
"""Strip ID 0 and decrement ids by 1."""
if len(ids) < 1:
return list(ids)
if not any(ids):
return [] # all padding.
idx = -1
while not ids[idx]:
idx -= 1
if idx == -1:
ids = ids
else:
ids = ids[:idx + 1]
return [i - 1 for i in ids] | [
"def",
"pad_decr",
"(",
"ids",
")",
":",
"if",
"len",
"(",
"ids",
")",
"<",
"1",
":",
"return",
"list",
"(",
"ids",
")",
"if",
"not",
"any",
"(",
"ids",
")",
":",
"return",
"[",
"]",
"# all padding.",
"idx",
"=",
"-",
"1",
"while",
"not",
"ids"... | Strip ID 0 and decrement ids by 1. | [
"Strip",
"ID",
"0",
"and",
"decrement",
"ids",
"by",
"1",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L426-L439 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/text_encoder.py | _prepare_reserved_tokens | def _prepare_reserved_tokens(reserved_tokens):
"""Prepare reserved tokens and a regex for splitting them out of strings."""
reserved_tokens = [tf.compat.as_text(tok) for tok in reserved_tokens or []]
dups = _find_duplicates(reserved_tokens)
if dups:
raise ValueError("Duplicates found in tokens: %s" % dups)
... | python | def _prepare_reserved_tokens(reserved_tokens):
"""Prepare reserved tokens and a regex for splitting them out of strings."""
reserved_tokens = [tf.compat.as_text(tok) for tok in reserved_tokens or []]
dups = _find_duplicates(reserved_tokens)
if dups:
raise ValueError("Duplicates found in tokens: %s" % dups)
... | [
"def",
"_prepare_reserved_tokens",
"(",
"reserved_tokens",
")",
":",
"reserved_tokens",
"=",
"[",
"tf",
".",
"compat",
".",
"as_text",
"(",
"tok",
")",
"for",
"tok",
"in",
"reserved_tokens",
"or",
"[",
"]",
"]",
"dups",
"=",
"_find_duplicates",
"(",
"reserve... | Prepare reserved tokens and a regex for splitting them out of strings. | [
"Prepare",
"reserved",
"tokens",
"and",
"a",
"regex",
"for",
"splitting",
"them",
"out",
"of",
"strings",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L447-L454 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/text_encoder.py | _make_reserved_tokens_re | def _make_reserved_tokens_re(reserved_tokens):
"""Constructs compiled regex to parse out reserved tokens."""
if not reserved_tokens:
return None
escaped_tokens = [_re_escape(rt) for rt in reserved_tokens]
pattern = "(%s)" % "|".join(escaped_tokens)
reserved_tokens_re = _re_compile(pattern)
return reserv... | python | def _make_reserved_tokens_re(reserved_tokens):
"""Constructs compiled regex to parse out reserved tokens."""
if not reserved_tokens:
return None
escaped_tokens = [_re_escape(rt) for rt in reserved_tokens]
pattern = "(%s)" % "|".join(escaped_tokens)
reserved_tokens_re = _re_compile(pattern)
return reserv... | [
"def",
"_make_reserved_tokens_re",
"(",
"reserved_tokens",
")",
":",
"if",
"not",
"reserved_tokens",
":",
"return",
"None",
"escaped_tokens",
"=",
"[",
"_re_escape",
"(",
"rt",
")",
"for",
"rt",
"in",
"reserved_tokens",
"]",
"pattern",
"=",
"\"(%s)\"",
"%",
"\... | Constructs compiled regex to parse out reserved tokens. | [
"Constructs",
"compiled",
"regex",
"to",
"parse",
"out",
"reserved",
"tokens",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L463-L470 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/text_encoder.py | write_lines_to_file | def write_lines_to_file(cls_name, filename, lines, metadata_dict):
"""Writes lines to file prepended by header and metadata."""
metadata_dict = metadata_dict or {}
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
metadata_line = "%s%s" % (_METADATA_PREFIX,
json.dumps(metadata_dict, ... | python | def write_lines_to_file(cls_name, filename, lines, metadata_dict):
"""Writes lines to file prepended by header and metadata."""
metadata_dict = metadata_dict or {}
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
metadata_line = "%s%s" % (_METADATA_PREFIX,
json.dumps(metadata_dict, ... | [
"def",
"write_lines_to_file",
"(",
"cls_name",
",",
"filename",
",",
"lines",
",",
"metadata_dict",
")",
":",
"metadata_dict",
"=",
"metadata_dict",
"or",
"{",
"}",
"header_line",
"=",
"\"%s%s\"",
"%",
"(",
"_HEADER_PREFIX",
",",
"cls_name",
")",
"metadata_line"... | Writes lines to file prepended by header and metadata. | [
"Writes",
"lines",
"to",
"file",
"prepended",
"by",
"header",
"and",
"metadata",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L492-L504 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/text_encoder.py | read_lines_from_file | def read_lines_from_file(cls_name, filename):
"""Read lines from file, parsing out header and metadata."""
with tf.io.gfile.GFile(filename, "rb") as f:
lines = [tf.compat.as_text(line)[:-1] for line in f]
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
if lines[0] != header_line:
raise ValueError("Fil... | python | def read_lines_from_file(cls_name, filename):
"""Read lines from file, parsing out header and metadata."""
with tf.io.gfile.GFile(filename, "rb") as f:
lines = [tf.compat.as_text(line)[:-1] for line in f]
header_line = "%s%s" % (_HEADER_PREFIX, cls_name)
if lines[0] != header_line:
raise ValueError("Fil... | [
"def",
"read_lines_from_file",
"(",
"cls_name",
",",
"filename",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"lines",
"=",
"[",
"tf",
".",
"compat",
".",
"as_text",
"(",
"line",
... | Read lines from file, parsing out header and metadata. | [
"Read",
"lines",
"from",
"file",
"parsing",
"out",
"header",
"and",
"metadata",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L507-L517 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/text_encoder.py | Tokenizer.tokenize | def tokenize(self, s):
"""Splits a string into tokens."""
s = tf.compat.as_text(s)
if self.reserved_tokens:
# First split out the reserved tokens
substrs = self._reserved_tokens_re.split(s)
else:
substrs = [s]
toks = []
for substr in substrs:
if substr in self.reserved_... | python | def tokenize(self, s):
"""Splits a string into tokens."""
s = tf.compat.as_text(s)
if self.reserved_tokens:
# First split out the reserved tokens
substrs = self._reserved_tokens_re.split(s)
else:
substrs = [s]
toks = []
for substr in substrs:
if substr in self.reserved_... | [
"def",
"tokenize",
"(",
"self",
",",
"s",
")",
":",
"s",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"s",
")",
"if",
"self",
".",
"reserved_tokens",
":",
"# First split out the reserved tokens",
"substrs",
"=",
"self",
".",
"_reserved_tokens_re",
".",
"... | Splits a string into tokens. | [
"Splits",
"a",
"string",
"into",
"tokens",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L378-L397 | train |
tensorflow/datasets | tensorflow_datasets/core/splits.py | slice_to_percent_mask | def slice_to_percent_mask(slice_value):
"""Convert a python slice [15:50] into a list[bool] mask of 100 elements."""
if slice_value is None:
slice_value = slice(None)
# Select only the elements of the slice
selected = set(list(range(100))[slice_value])
# Create the binary mask
return [i in selected for ... | python | def slice_to_percent_mask(slice_value):
"""Convert a python slice [15:50] into a list[bool] mask of 100 elements."""
if slice_value is None:
slice_value = slice(None)
# Select only the elements of the slice
selected = set(list(range(100))[slice_value])
# Create the binary mask
return [i in selected for ... | [
"def",
"slice_to_percent_mask",
"(",
"slice_value",
")",
":",
"if",
"slice_value",
"is",
"None",
":",
"slice_value",
"=",
"slice",
"(",
"None",
")",
"# Select only the elements of the slice",
"selected",
"=",
"set",
"(",
"list",
"(",
"range",
"(",
"100",
")",
... | Convert a python slice [15:50] into a list[bool] mask of 100 elements. | [
"Convert",
"a",
"python",
"slice",
"[",
"15",
":",
"50",
"]",
"into",
"a",
"list",
"[",
"bool",
"]",
"mask",
"of",
"100",
"elements",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L479-L486 | train |
tensorflow/datasets | tensorflow_datasets/core/splits.py | get_shard_id2num_examples | def get_shard_id2num_examples(num_shards, total_num_examples):
"""Return the mapping shard_id=>num_examples, assuming round-robin."""
# TODO(b/130353071): This has the strong assumption that the shards have
# been written in a round-robin fashion. This assumption does not hold, for
# instance, with Beam generat... | python | def get_shard_id2num_examples(num_shards, total_num_examples):
"""Return the mapping shard_id=>num_examples, assuming round-robin."""
# TODO(b/130353071): This has the strong assumption that the shards have
# been written in a round-robin fashion. This assumption does not hold, for
# instance, with Beam generat... | [
"def",
"get_shard_id2num_examples",
"(",
"num_shards",
",",
"total_num_examples",
")",
":",
"# TODO(b/130353071): This has the strong assumption that the shards have",
"# been written in a round-robin fashion. This assumption does not hold, for",
"# instance, with Beam generation. The mapping sh... | Return the mapping shard_id=>num_examples, assuming round-robin. | [
"Return",
"the",
"mapping",
"shard_id",
"=",
">",
"num_examples",
"assuming",
"round",
"-",
"robin",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L489-L502 | train |
tensorflow/datasets | tensorflow_datasets/core/splits.py | compute_mask_offsets | def compute_mask_offsets(shard_id2num_examples):
"""Return the list of offsets associated with each shards.
Args:
shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples
Returns:
mask_offsets: `list[int]`, offset to skip for each of the shard
"""
total_num_examples = sum(shard_id2num_exam... | python | def compute_mask_offsets(shard_id2num_examples):
"""Return the list of offsets associated with each shards.
Args:
shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples
Returns:
mask_offsets: `list[int]`, offset to skip for each of the shard
"""
total_num_examples = sum(shard_id2num_exam... | [
"def",
"compute_mask_offsets",
"(",
"shard_id2num_examples",
")",
":",
"total_num_examples",
"=",
"sum",
"(",
"shard_id2num_examples",
")",
"mask_offsets",
"=",
"[",
"]",
"total_num_examples",
"=",
"0",
"for",
"num_examples_in_shard",
"in",
"shard_id2num_examples",
":",... | Return the list of offsets associated with each shards.
Args:
shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples
Returns:
mask_offsets: `list[int]`, offset to skip for each of the shard | [
"Return",
"the",
"list",
"of",
"offsets",
"associated",
"with",
"each",
"shards",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L505-L524 | train |
tensorflow/datasets | tensorflow_datasets/core/splits.py | check_splits_equals | def check_splits_equals(splits1, splits2):
"""Check that the two split dicts have the same names and num_shards."""
if set(splits1) ^ set(splits2): # Name intersection should be null
return False
for _, (split1, split2) in utils.zip_dict(splits1, splits2):
if split1.num_shards != split2.num_shards:
... | python | def check_splits_equals(splits1, splits2):
"""Check that the two split dicts have the same names and num_shards."""
if set(splits1) ^ set(splits2): # Name intersection should be null
return False
for _, (split1, split2) in utils.zip_dict(splits1, splits2):
if split1.num_shards != split2.num_shards:
... | [
"def",
"check_splits_equals",
"(",
"splits1",
",",
"splits2",
")",
":",
"if",
"set",
"(",
"splits1",
")",
"^",
"set",
"(",
"splits2",
")",
":",
"# Name intersection should be null",
"return",
"False",
"for",
"_",
",",
"(",
"split1",
",",
"split2",
")",
"in... | Check that the two split dicts have the same names and num_shards. | [
"Check",
"that",
"the",
"two",
"split",
"dicts",
"have",
"the",
"same",
"names",
"and",
"num_shards",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L573-L580 | train |
tensorflow/datasets | tensorflow_datasets/core/splits.py | SplitDict.add | def add(self, split_info):
"""Add the split info."""
if split_info.name in self:
raise ValueError("Split {} already present".format(split_info.name))
# TODO(epot): Make sure this works with Named splits correctly.
super(SplitDict, self).__setitem__(split_info.name, split_info) | python | def add(self, split_info):
"""Add the split info."""
if split_info.name in self:
raise ValueError("Split {} already present".format(split_info.name))
# TODO(epot): Make sure this works with Named splits correctly.
super(SplitDict, self).__setitem__(split_info.name, split_info) | [
"def",
"add",
"(",
"self",
",",
"split_info",
")",
":",
"if",
"split_info",
".",
"name",
"in",
"self",
":",
"raise",
"ValueError",
"(",
"\"Split {} already present\"",
".",
"format",
"(",
"split_info",
".",
"name",
")",
")",
"# TODO(epot): Make sure this works w... | Add the split info. | [
"Add",
"the",
"split",
"info",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L542-L547 | train |
tensorflow/datasets | tensorflow_datasets/core/splits.py | SplitDict.from_proto | def from_proto(cls, repeated_split_infos):
"""Returns a new SplitDict initialized from the `repeated_split_infos`."""
split_dict = cls()
for split_info_proto in repeated_split_infos:
split_info = SplitInfo()
split_info.CopyFrom(split_info_proto)
split_dict.add(split_info)
return split_... | python | def from_proto(cls, repeated_split_infos):
"""Returns a new SplitDict initialized from the `repeated_split_infos`."""
split_dict = cls()
for split_info_proto in repeated_split_infos:
split_info = SplitInfo()
split_info.CopyFrom(split_info_proto)
split_dict.add(split_info)
return split_... | [
"def",
"from_proto",
"(",
"cls",
",",
"repeated_split_infos",
")",
":",
"split_dict",
"=",
"cls",
"(",
")",
"for",
"split_info_proto",
"in",
"repeated_split_infos",
":",
"split_info",
"=",
"SplitInfo",
"(",
")",
"split_info",
".",
"CopyFrom",
"(",
"split_info_pr... | Returns a new SplitDict initialized from the `repeated_split_infos`. | [
"Returns",
"a",
"new",
"SplitDict",
"initialized",
"from",
"the",
"repeated_split_infos",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L550-L557 | train |
tensorflow/datasets | tensorflow_datasets/core/splits.py | SplitDict.to_proto | def to_proto(self):
"""Returns a list of SplitInfo protos that we have."""
# Return the proto.SplitInfo, sorted by name
return sorted((s.get_proto() for s in self.values()), key=lambda s: s.name) | python | def to_proto(self):
"""Returns a list of SplitInfo protos that we have."""
# Return the proto.SplitInfo, sorted by name
return sorted((s.get_proto() for s in self.values()), key=lambda s: s.name) | [
"def",
"to_proto",
"(",
"self",
")",
":",
"# Return the proto.SplitInfo, sorted by name",
"return",
"sorted",
"(",
"(",
"s",
".",
"get_proto",
"(",
")",
"for",
"s",
"in",
"self",
".",
"values",
"(",
")",
")",
",",
"key",
"=",
"lambda",
"s",
":",
"s",
"... | Returns a list of SplitInfo protos that we have. | [
"Returns",
"a",
"list",
"of",
"SplitInfo",
"protos",
"that",
"we",
"have",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/splits.py#L559-L562 | train |
tensorflow/datasets | tensorflow_datasets/text/squad.py | Squad._generate_examples | def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
squad = json.load(f)
for article in squad["data"]:
if "title" in article:
titl... | python | def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
squad = json.load(f)
for article in squad["data"]:
if "title" in article:
titl... | [
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"logging",
".",
"info",
"(",
"\"generating examples from = %s\"",
",",
"filepath",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
")",
"as",
"f",
":",
"squad",... | This function returns the examples in the raw (text) form. | [
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/squad.py#L164-L198 | train |
tensorflow/datasets | tensorflow_datasets/translate/para_crawl.py | ParaCrawl._generate_examples | def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
target_language = self.builder_config.target_language
with tf.io.gfile.GFile(data_file) as f:
for i, line in enumerate(f):
line_parts = line.strip().split("\t")
if len(line_parts)... | python | def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
target_language = self.builder_config.target_language
with tf.io.gfile.GFile(data_file) as f:
for i, line in enumerate(f):
line_parts = line.strip().split("\t")
if len(line_parts)... | [
"def",
"_generate_examples",
"(",
"self",
",",
"data_file",
")",
":",
"target_language",
"=",
"self",
".",
"builder_config",
".",
"target_language",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"data_file",
")",
"as",
"f",
":",
"for",
"i",
"... | This function returns the examples in the raw (text) form. | [
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/para_crawl.py#L148-L160 | train |
tensorflow/datasets | tensorflow_datasets/core/download/util.py | build_synchronize_decorator | def build_synchronize_decorator():
"""Returns a decorator which prevents concurrent calls to functions.
Usage:
synchronized = build_synchronize_decorator()
@synchronized
def read_value():
...
@synchronized
def write_value(x):
...
Returns:
make_threadsafe (fct): The decorato... | python | def build_synchronize_decorator():
"""Returns a decorator which prevents concurrent calls to functions.
Usage:
synchronized = build_synchronize_decorator()
@synchronized
def read_value():
...
@synchronized
def write_value(x):
...
Returns:
make_threadsafe (fct): The decorato... | [
"def",
"build_synchronize_decorator",
"(",
")",
":",
"lock",
"=",
"threading",
".",
"Lock",
"(",
")",
"def",
"lock_decorator",
"(",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"lock_decorated",
"(",
"*",
"args",
",",
"*",
"*"... | Returns a decorator which prevents concurrent calls to functions.
Usage:
synchronized = build_synchronize_decorator()
@synchronized
def read_value():
...
@synchronized
def write_value(x):
...
Returns:
make_threadsafe (fct): The decorator which lock all functions to which it
... | [
"Returns",
"a",
"decorator",
"which",
"prevents",
"concurrent",
"calls",
"to",
"functions",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/util.py#L72-L101 | train |
tensorflow/datasets | tensorflow_datasets/core/download/util.py | get_file_name | def get_file_name(url):
"""Returns file name of file at given url."""
return os.path.basename(urllib.parse.urlparse(url).path) or 'unknown_name' | python | def get_file_name(url):
"""Returns file name of file at given url."""
return os.path.basename(urllib.parse.urlparse(url).path) or 'unknown_name' | [
"def",
"get_file_name",
"(",
"url",
")",
":",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
".",
"path",
")",
"or",
"'unknown_name'"
] | Returns file name of file at given url. | [
"Returns",
"file",
"name",
"of",
"file",
"at",
"given",
"url",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/util.py#L104-L106 | train |
tensorflow/datasets | tensorflow_datasets/audio/librispeech.py | _make_builder_configs | def _make_builder_configs():
"""Make built-in Librispeech BuilderConfigs.
Uses 4 text encodings (plain text, bytes, subwords with 8k vocab, subwords
with 32k vocab) crossed with the data subsets (clean100, clean360, all).
Returns:
`list<tfds.audio.LibrispeechConfig>`
"""
text_encoder_configs = [
... | python | def _make_builder_configs():
"""Make built-in Librispeech BuilderConfigs.
Uses 4 text encodings (plain text, bytes, subwords with 8k vocab, subwords
with 32k vocab) crossed with the data subsets (clean100, clean360, all).
Returns:
`list<tfds.audio.LibrispeechConfig>`
"""
text_encoder_configs = [
... | [
"def",
"_make_builder_configs",
"(",
")",
":",
"text_encoder_configs",
"=",
"[",
"None",
",",
"tfds",
".",
"features",
".",
"text",
".",
"TextEncoderConfig",
"(",
"name",
"=",
"\"bytes\"",
",",
"encoder",
"=",
"tfds",
".",
"features",
".",
"text",
".",
"By... | Make built-in Librispeech BuilderConfigs.
Uses 4 text encodings (plain text, bytes, subwords with 8k vocab, subwords
with 32k vocab) crossed with the data subsets (clean100, clean360, all).
Returns:
`list<tfds.audio.LibrispeechConfig>` | [
"Make",
"built",
"-",
"in",
"Librispeech",
"BuilderConfigs",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/audio/librispeech.py#L130-L159 | train |
tensorflow/datasets | tensorflow_datasets/audio/librispeech.py | _walk_librispeech_dir | def _walk_librispeech_dir(directory):
"""Walk a Librispeech directory and yield examples."""
directory = os.path.join(directory, "LibriSpeech")
for path, _, files in tf.io.gfile.walk(directory):
if not files:
continue
transcript_file = [f for f in files if f.endswith(".txt")]
if not transcript_... | python | def _walk_librispeech_dir(directory):
"""Walk a Librispeech directory and yield examples."""
directory = os.path.join(directory, "LibriSpeech")
for path, _, files in tf.io.gfile.walk(directory):
if not files:
continue
transcript_file = [f for f in files if f.endswith(".txt")]
if not transcript_... | [
"def",
"_walk_librispeech_dir",
"(",
"directory",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"LibriSpeech\"",
")",
"for",
"path",
",",
"_",
",",
"files",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"walk",
"(",
... | Walk a Librispeech directory and yield examples. | [
"Walk",
"a",
"Librispeech",
"directory",
"and",
"yield",
"examples",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/audio/librispeech.py#L237-L265 | train |
tensorflow/datasets | tensorflow_datasets/audio/librispeech.py | LibrispeechConfig.download_urls | def download_urls(self):
"""Returns download urls for this config."""
urls = {
tfds.Split.TRAIN: ["train_clean100"],
tfds.Split.VALIDATION: ["dev_clean"],
tfds.Split.TEST: ["test_clean"],
}
if self.data in ["all", "clean360"]:
urls[tfds.Split.TRAIN].append("train_clean360")... | python | def download_urls(self):
"""Returns download urls for this config."""
urls = {
tfds.Split.TRAIN: ["train_clean100"],
tfds.Split.VALIDATION: ["dev_clean"],
tfds.Split.TEST: ["test_clean"],
}
if self.data in ["all", "clean360"]:
urls[tfds.Split.TRAIN].append("train_clean360")... | [
"def",
"download_urls",
"(",
"self",
")",
":",
"urls",
"=",
"{",
"tfds",
".",
"Split",
".",
"TRAIN",
":",
"[",
"\"train_clean100\"",
"]",
",",
"tfds",
".",
"Split",
".",
"VALIDATION",
":",
"[",
"\"dev_clean\"",
"]",
",",
"tfds",
".",
"Split",
".",
"T... | Returns download urls for this config. | [
"Returns",
"download",
"urls",
"for",
"this",
"config",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/audio/librispeech.py#L109-L127 | train |
tensorflow/datasets | tensorflow_datasets/core/features/class_label_feature.py | ClassLabel.str2int | def str2int(self, str_value):
"""Conversion class name string => integer."""
str_value = tf.compat.as_text(str_value)
if self._str2int:
return self._str2int[str_value]
# No names provided, try to integerize
failed_parse = False
try:
int_value = int(str_value)
except ValueError:
... | python | def str2int(self, str_value):
"""Conversion class name string => integer."""
str_value = tf.compat.as_text(str_value)
if self._str2int:
return self._str2int[str_value]
# No names provided, try to integerize
failed_parse = False
try:
int_value = int(str_value)
except ValueError:
... | [
"def",
"str2int",
"(",
"self",
",",
"str_value",
")",
":",
"str_value",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"str_value",
")",
"if",
"self",
".",
"_str2int",
":",
"return",
"self",
".",
"_str2int",
"[",
"str_value",
"]",
"# No names provided, try... | Conversion class name string => integer. | [
"Conversion",
"class",
"name",
"string",
"=",
">",
"integer",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/class_label_feature.py#L99-L113 | train |
tensorflow/datasets | tensorflow_datasets/core/features/class_label_feature.py | ClassLabel.int2str | def int2str(self, int_value):
"""Conversion integer => class name string."""
if self._int2str:
# Maybe should support batched np array/eager tensors, to allow things
# like
# out_ids = model(inputs)
# labels = cifar10.info.features['label'].int2str(out_ids)
return self._int2str[int... | python | def int2str(self, int_value):
"""Conversion integer => class name string."""
if self._int2str:
# Maybe should support batched np array/eager tensors, to allow things
# like
# out_ids = model(inputs)
# labels = cifar10.info.features['label'].int2str(out_ids)
return self._int2str[int... | [
"def",
"int2str",
"(",
"self",
",",
"int_value",
")",
":",
"if",
"self",
".",
"_int2str",
":",
"# Maybe should support batched np array/eager tensors, to allow things",
"# like",
"# out_ids = model(inputs)",
"# labels = cifar10.info.features['label'].int2str(out_ids)",
"return",
... | Conversion integer => class name string. | [
"Conversion",
"integer",
"=",
">",
"class",
"name",
"string",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/class_label_feature.py#L115-L127 | train |
tensorflow/datasets | tensorflow_datasets/core/features/class_label_feature.py | ClassLabel.save_metadata | def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Save names if defined
if self._str2int is not None:
names_filepath = _get_names_filepath(data_dir, feature_name)
_write_names_to_file(names_filepath, self.names) | python | def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Save names if defined
if self._str2int is not None:
names_filepath = _get_names_filepath(data_dir, feature_name)
_write_names_to_file(names_filepath, self.names) | [
"def",
"save_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"# Save names if defined",
"if",
"self",
".",
"_str2int",
"is",
"not",
"None",
":",
"names_filepath",
"=",
"_get_names_filepath",
"(",
"data_dir",
",",
"feature_nam... | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/class_label_feature.py#L152-L157 | train |
tensorflow/datasets | tensorflow_datasets/core/features/class_label_feature.py | ClassLabel.load_metadata | def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
names_filepath = _get_names_filepath(data_dir, feature_name)
if tf.io.gfile.exists(names_filepath):
self.names = _load_names_from_file(names_filepath) | python | def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
names_filepath = _get_names_filepath(data_dir, feature_name)
if tf.io.gfile.exists(names_filepath):
self.names = _load_names_from_file(names_filepath) | [
"def",
"load_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"# Restore names if defined",
"names_filepath",
"=",
"_get_names_filepath",
"(",
"data_dir",
",",
"feature_name",
")",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"... | See base class for details. | [
"See",
"base",
"class",
"for",
"details",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/class_label_feature.py#L159-L164 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | _token_counts_from_generator | def _token_counts_from_generator(generator, max_chars, reserved_tokens):
"""Builds token counts from generator."""
reserved_tokens = list(reserved_tokens) + [_UNDERSCORE_REPLACEMENT]
tokenizer = text_encoder.Tokenizer(
alphanum_only=False, reserved_tokens=reserved_tokens)
num_chars = 0
token_counts = co... | python | def _token_counts_from_generator(generator, max_chars, reserved_tokens):
"""Builds token counts from generator."""
reserved_tokens = list(reserved_tokens) + [_UNDERSCORE_REPLACEMENT]
tokenizer = text_encoder.Tokenizer(
alphanum_only=False, reserved_tokens=reserved_tokens)
num_chars = 0
token_counts = co... | [
"def",
"_token_counts_from_generator",
"(",
"generator",
",",
"max_chars",
",",
"reserved_tokens",
")",
":",
"reserved_tokens",
"=",
"list",
"(",
"reserved_tokens",
")",
"+",
"[",
"_UNDERSCORE_REPLACEMENT",
"]",
"tokenizer",
"=",
"text_encoder",
".",
"Tokenizer",
"(... | Builds token counts from generator. | [
"Builds",
"token",
"counts",
"from",
"generator",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L388-L407 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | _validate_build_arguments | def _validate_build_arguments(max_subword_length, reserved_tokens,
target_vocab_size):
"""Validate arguments for SubwordTextEncoder.build_from_corpus."""
if max_subword_length <= 0:
raise ValueError(
"max_subword_length must be > 0. Note that memory and compute for "
... | python | def _validate_build_arguments(max_subword_length, reserved_tokens,
target_vocab_size):
"""Validate arguments for SubwordTextEncoder.build_from_corpus."""
if max_subword_length <= 0:
raise ValueError(
"max_subword_length must be > 0. Note that memory and compute for "
... | [
"def",
"_validate_build_arguments",
"(",
"max_subword_length",
",",
"reserved_tokens",
",",
"target_vocab_size",
")",
":",
"if",
"max_subword_length",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"max_subword_length must be > 0. Note that memory and compute for \"",
"\"buildin... | Validate arguments for SubwordTextEncoder.build_from_corpus. | [
"Validate",
"arguments",
"for",
"SubwordTextEncoder",
".",
"build_from_corpus",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L410-L428 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | _prepare_tokens_for_encode | def _prepare_tokens_for_encode(tokens):
"""Prepare tokens for encoding.
Tokens followed by a single space have "_" appended and the single space token
is dropped.
If a token is _UNDERSCORE_REPLACEMENT, it is broken up into 2 tokens.
Args:
tokens: `list<str>`, tokens to prepare.
Returns:
`list<st... | python | def _prepare_tokens_for_encode(tokens):
"""Prepare tokens for encoding.
Tokens followed by a single space have "_" appended and the single space token
is dropped.
If a token is _UNDERSCORE_REPLACEMENT, it is broken up into 2 tokens.
Args:
tokens: `list<str>`, tokens to prepare.
Returns:
`list<st... | [
"def",
"_prepare_tokens_for_encode",
"(",
"tokens",
")",
":",
"prepared_tokens",
"=",
"[",
"]",
"def",
"_prepare_token",
"(",
"t",
",",
"next_t",
")",
":",
"skip_next",
"=",
"False",
"t",
"=",
"_escape",
"(",
"t",
")",
"# If next token is a single space, add _ s... | Prepare tokens for encoding.
Tokens followed by a single space have "_" appended and the single space token
is dropped.
If a token is _UNDERSCORE_REPLACEMENT, it is broken up into 2 tokens.
Args:
tokens: `list<str>`, tokens to prepare.
Returns:
`list<str>` prepared tokens. | [
"Prepare",
"tokens",
"for",
"encoding",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L451-L496 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder.encode | def encode(self, s):
"""Encodes text into a list of integers."""
s = tf.compat.as_text(s)
tokens = self._tokenizer.tokenize(s)
tokens = _prepare_tokens_for_encode(tokens)
ids = []
for token in tokens:
ids.extend(self._token_to_ids(token))
return text_encoder.pad_incr(ids) | python | def encode(self, s):
"""Encodes text into a list of integers."""
s = tf.compat.as_text(s)
tokens = self._tokenizer.tokenize(s)
tokens = _prepare_tokens_for_encode(tokens)
ids = []
for token in tokens:
ids.extend(self._token_to_ids(token))
return text_encoder.pad_incr(ids) | [
"def",
"encode",
"(",
"self",
",",
"s",
")",
":",
"s",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"s",
")",
"tokens",
"=",
"self",
".",
"_tokenizer",
".",
"tokenize",
"(",
"s",
")",
"tokens",
"=",
"_prepare_tokens_for_encode",
"(",
"tokens",
")",... | Encodes text into a list of integers. | [
"Encodes",
"text",
"into",
"a",
"list",
"of",
"integers",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L80-L88 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder.decode | def decode(self, ids):
"""Decodes a list of integers into text."""
ids = text_encoder.pad_decr(ids)
subword_ids = ids
del ids
subwords = []
# Some ids correspond to bytes. Because unicode characters are composed of
# possibly multiple bytes, we attempt to decode contiguous lists of bytes
... | python | def decode(self, ids):
"""Decodes a list of integers into text."""
ids = text_encoder.pad_decr(ids)
subword_ids = ids
del ids
subwords = []
# Some ids correspond to bytes. Because unicode characters are composed of
# possibly multiple bytes, we attempt to decode contiguous lists of bytes
... | [
"def",
"decode",
"(",
"self",
",",
"ids",
")",
":",
"ids",
"=",
"text_encoder",
".",
"pad_decr",
"(",
"ids",
")",
"subword_ids",
"=",
"ids",
"del",
"ids",
"subwords",
"=",
"[",
"]",
"# Some ids correspond to bytes. Because unicode characters are composed of",
"# p... | Decodes a list of integers into text. | [
"Decodes",
"a",
"list",
"of",
"integers",
"into",
"text",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L90-L126 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder._token_to_ids | def _token_to_ids(self, token):
"""Convert a single token to a list of integer ids."""
# Check cache
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._token_to_ids_cache[cache_location]
if cache_key == token:
return cache_value
subwords = self._token_to_subwor... | python | def _token_to_ids(self, token):
"""Convert a single token to a list of integer ids."""
# Check cache
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._token_to_ids_cache[cache_location]
if cache_key == token:
return cache_value
subwords = self._token_to_subwor... | [
"def",
"_token_to_ids",
"(",
"self",
",",
"token",
")",
":",
"# Check cache",
"cache_location",
"=",
"hash",
"(",
"token",
")",
"%",
"self",
".",
"_cache_size",
"cache_key",
",",
"cache_value",
"=",
"self",
".",
"_token_to_ids_cache",
"[",
"cache_location",
"]... | Convert a single token to a list of integer ids. | [
"Convert",
"a",
"single",
"token",
"to",
"a",
"list",
"of",
"integer",
"ids",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L140-L164 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder._byte_encode | def _byte_encode(self, token):
"""Encode a single token byte-wise into integer ids."""
# Vocab ids for all bytes follow ids for the subwords
offset = len(self._subwords)
if token == "_":
return [len(self._subwords) + ord(" ")]
return [i + offset for i in list(bytearray(tf.compat.as_bytes(token... | python | def _byte_encode(self, token):
"""Encode a single token byte-wise into integer ids."""
# Vocab ids for all bytes follow ids for the subwords
offset = len(self._subwords)
if token == "_":
return [len(self._subwords) + ord(" ")]
return [i + offset for i in list(bytearray(tf.compat.as_bytes(token... | [
"def",
"_byte_encode",
"(",
"self",
",",
"token",
")",
":",
"# Vocab ids for all bytes follow ids for the subwords",
"offset",
"=",
"len",
"(",
"self",
".",
"_subwords",
")",
"if",
"token",
"==",
"\"_\"",
":",
"return",
"[",
"len",
"(",
"self",
".",
"_subwords... | Encode a single token byte-wise into integer ids. | [
"Encode",
"a",
"single",
"token",
"byte",
"-",
"wise",
"into",
"integer",
"ids",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L166-L172 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder._id_to_subword | def _id_to_subword(self, subword_id):
"""Converts a subword integer ID to a subword string."""
if subword_id < 0 or subword_id >= (self.vocab_size - 1):
raise ValueError("Received id %d which is invalid. Ids must be within "
"[0, %d)." % (subword_id + 1, self.vocab_size))
if 0 ... | python | def _id_to_subword(self, subword_id):
"""Converts a subword integer ID to a subword string."""
if subword_id < 0 or subword_id >= (self.vocab_size - 1):
raise ValueError("Received id %d which is invalid. Ids must be within "
"[0, %d)." % (subword_id + 1, self.vocab_size))
if 0 ... | [
"def",
"_id_to_subword",
"(",
"self",
",",
"subword_id",
")",
":",
"if",
"subword_id",
"<",
"0",
"or",
"subword_id",
">=",
"(",
"self",
".",
"vocab_size",
"-",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"Received id %d which is invalid. Ids must be within \"",
... | Converts a subword integer ID to a subword string. | [
"Converts",
"a",
"subword",
"integer",
"ID",
"to",
"a",
"subword",
"string",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L174-L188 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder._token_to_subwords | def _token_to_subwords(self, token):
"""Greedily split token into subwords."""
subwords = []
start = 0
while start < len(token):
subword = None
for end in range(
min(len(token), start + self._max_subword_len), start, -1):
candidate = token[start:end]
if (candidate ... | python | def _token_to_subwords(self, token):
"""Greedily split token into subwords."""
subwords = []
start = 0
while start < len(token):
subword = None
for end in range(
min(len(token), start + self._max_subword_len), start, -1):
candidate = token[start:end]
if (candidate ... | [
"def",
"_token_to_subwords",
"(",
"self",
",",
"token",
")",
":",
"subwords",
"=",
"[",
"]",
"start",
"=",
"0",
"while",
"start",
"<",
"len",
"(",
"token",
")",
":",
"subword",
"=",
"None",
"for",
"end",
"in",
"range",
"(",
"min",
"(",
"len",
"(",
... | Greedily split token into subwords. | [
"Greedily",
"split",
"token",
"into",
"subwords",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L190-L211 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder._init_from_list | def _init_from_list(self, subwords):
"""Initializes the encoder from a list of subwords."""
subwords = [tf.compat.as_text(s) for s in subwords if s]
self._subwords = subwords
# Note that internally everything is 0-indexed. Padding is dealt with at the
# end of encode and the beginning of decode.
... | python | def _init_from_list(self, subwords):
"""Initializes the encoder from a list of subwords."""
subwords = [tf.compat.as_text(s) for s in subwords if s]
self._subwords = subwords
# Note that internally everything is 0-indexed. Padding is dealt with at the
# end of encode and the beginning of decode.
... | [
"def",
"_init_from_list",
"(",
"self",
",",
"subwords",
")",
":",
"subwords",
"=",
"[",
"tf",
".",
"compat",
".",
"as_text",
"(",
"s",
")",
"for",
"s",
"in",
"subwords",
"if",
"s",
"]",
"self",
".",
"_subwords",
"=",
"subwords",
"# Note that internally e... | Initializes the encoder from a list of subwords. | [
"Initializes",
"the",
"encoder",
"from",
"a",
"list",
"of",
"subwords",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L213-L237 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder.save_to_file | def save_to_file(self, filename_prefix):
"""Save the vocabulary to a file."""
# Wrap in single quotes to make it easier to see the full subword when
# it has spaces and make it easier to search with ctrl+f.
filename = self._filename(filename_prefix)
lines = ["'%s'" % s for s in self._subwords]
s... | python | def save_to_file(self, filename_prefix):
"""Save the vocabulary to a file."""
# Wrap in single quotes to make it easier to see the full subword when
# it has spaces and make it easier to search with ctrl+f.
filename = self._filename(filename_prefix)
lines = ["'%s'" % s for s in self._subwords]
s... | [
"def",
"save_to_file",
"(",
"self",
",",
"filename_prefix",
")",
":",
"# Wrap in single quotes to make it easier to see the full subword when",
"# it has spaces and make it easier to search with ctrl+f.",
"filename",
"=",
"self",
".",
"_filename",
"(",
"filename_prefix",
")",
"li... | Save the vocabulary to a file. | [
"Save",
"the",
"vocabulary",
"to",
"a",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L243-L249 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder.load_from_file | def load_from_file(cls, filename_prefix):
"""Extracts list of subwords from file."""
filename = cls._filename(filename_prefix)
lines, _ = cls._read_lines_from_file(filename)
# Strip wrapping single quotes
vocab_list = [line[1:-1] for line in lines]
return cls(vocab_list=vocab_list) | python | def load_from_file(cls, filename_prefix):
"""Extracts list of subwords from file."""
filename = cls._filename(filename_prefix)
lines, _ = cls._read_lines_from_file(filename)
# Strip wrapping single quotes
vocab_list = [line[1:-1] for line in lines]
return cls(vocab_list=vocab_list) | [
"def",
"load_from_file",
"(",
"cls",
",",
"filename_prefix",
")",
":",
"filename",
"=",
"cls",
".",
"_filename",
"(",
"filename_prefix",
")",
"lines",
",",
"_",
"=",
"cls",
".",
"_read_lines_from_file",
"(",
"filename",
")",
"# Strip wrapping single quotes",
"vo... | Extracts list of subwords from file. | [
"Extracts",
"list",
"of",
"subwords",
"from",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L252-L258 | train |
tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder.build_from_corpus | def build_from_corpus(cls,
corpus_generator,
target_vocab_size,
max_subword_length=20,
max_corpus_chars=None,
reserved_tokens=None):
"""Builds a `SubwordTextEncoder` based on the `corpus_generator... | python | def build_from_corpus(cls,
corpus_generator,
target_vocab_size,
max_subword_length=20,
max_corpus_chars=None,
reserved_tokens=None):
"""Builds a `SubwordTextEncoder` based on the `corpus_generator... | [
"def",
"build_from_corpus",
"(",
"cls",
",",
"corpus_generator",
",",
"target_vocab_size",
",",
"max_subword_length",
"=",
"20",
",",
"max_corpus_chars",
"=",
"None",
",",
"reserved_tokens",
"=",
"None",
")",
":",
"reserved_tokens",
"=",
"reserved_tokens",
"or",
"... | Builds a `SubwordTextEncoder` based on the `corpus_generator`.
Args:
corpus_generator: generator yielding `str`, from which subwords will be
constructed.
target_vocab_size: `int`, approximate size of the vocabulary to create.
max_subword_length: `int`, maximum length of a subword. Note th... | [
"Builds",
"a",
"SubwordTextEncoder",
"based",
"on",
"the",
"corpus_generator",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L261-L336 | train |
tensorflow/datasets | tensorflow_datasets/structured/higgs.py | Higgs._generate_examples | def _generate_examples(self, file_path):
"""Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row.
"""
fieldnames = [
'class_label', 'lepton_pT', 'lepton_eta', 'lepton_phi',
'missing_energy_magnitud... | python | def _generate_examples(self, file_path):
"""Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row.
"""
fieldnames = [
'class_label', 'lepton_pT', 'lepton_eta', 'lepton_phi',
'missing_energy_magnitud... | [
"def",
"_generate_examples",
"(",
"self",
",",
"file_path",
")",
":",
"fieldnames",
"=",
"[",
"'class_label'",
",",
"'lepton_pT'",
",",
"'lepton_eta'",
",",
"'lepton_phi'",
",",
"'missing_energy_magnitude'",
",",
"'missing_energy_phi'",
",",
"'jet_1_pt'",
",",
"'jet... | Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row. | [
"Generate",
"features",
"given",
"the",
"directory",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/structured/higgs.py#L122-L144 | train |
tensorflow/datasets | tensorflow_datasets/image/cats_vs_dogs.py | CatsVsDogs._generate_examples | def _generate_examples(self, archive):
"""Generate Cats vs Dogs images and labels given a directory path."""
num_skipped = 0
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # README file, ...
continue
label = res.group(1).lower()
if tf.compat.as_bytes("JF... | python | def _generate_examples(self, archive):
"""Generate Cats vs Dogs images and labels given a directory path."""
num_skipped = 0
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # README file, ...
continue
label = res.group(1).lower()
if tf.compat.as_bytes("JF... | [
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
")",
":",
"num_skipped",
"=",
"0",
"for",
"fname",
",",
"fobj",
"in",
"archive",
":",
"res",
"=",
"_NAME_RE",
".",
"match",
"(",
"fname",
")",
"if",
"not",
"res",
":",
"# README file, ...",
"conti... | Generate Cats vs Dogs images and labels given a directory path. | [
"Generate",
"Cats",
"vs",
"Dogs",
"images",
"and",
"labels",
"given",
"a",
"directory",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cats_vs_dogs.py#L87-L107 | train |
tensorflow/datasets | tensorflow_datasets/image/smallnorb.py | _load_chunk | def _load_chunk(dat_path, cat_path, info_path):
"""Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays.
"""
dat_array = r... | python | def _load_chunk(dat_path, cat_path, info_path):
"""Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays.
"""
dat_array = r... | [
"def",
"_load_chunk",
"(",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
":",
"dat_array",
"=",
"read_binary_matrix",
"(",
"dat_path",
")",
"# Even if the image is gray scale, we need to add an extra channel dimension",
"# to be compatible with tfds.features.Image.",
"dat_a... | Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays. | [
"Loads",
"a",
"data",
"chunk",
"as",
"specified",
"by",
"the",
"paths",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L141-L164 | train |
tensorflow/datasets | tensorflow_datasets/image/smallnorb.py | read_binary_matrix | def read_binary_matrix(filename):
"""Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file.
"""
wi... | python | def read_binary_matrix(filename):
"""Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file.
"""
wi... | [
"def",
"read_binary_matrix",
"(",
"filename",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"s",
"=",
"f",
".",
"read",
"(",
")",
"# Data is stored in little-endian byte order.",
"int32_... | Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file. | [
"Reads",
"and",
"returns",
"binary",
"formatted",
"matrix",
"stored",
"in",
"filename",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L167-L209 | train |
tensorflow/datasets | tensorflow_datasets/image/smallnorb.py | Smallnorb._split_generators | def _split_generators(self, dl_manager):
"""Returns splits."""
filenames = {
"training_dat": _TRAINING_URL_TEMPLATE.format(type="dat"),
"training_cat": _TRAINING_URL_TEMPLATE.format(type="cat"),
"training_info": _TRAINING_URL_TEMPLATE.format(type="info"),
"testing_dat": _TESTING_... | python | def _split_generators(self, dl_manager):
"""Returns splits."""
filenames = {
"training_dat": _TRAINING_URL_TEMPLATE.format(type="dat"),
"training_cat": _TRAINING_URL_TEMPLATE.format(type="cat"),
"training_info": _TRAINING_URL_TEMPLATE.format(type="info"),
"testing_dat": _TESTING_... | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"filenames",
"=",
"{",
"\"training_dat\"",
":",
"_TRAINING_URL_TEMPLATE",
".",
"format",
"(",
"type",
"=",
"\"dat\"",
")",
",",
"\"training_cat\"",
":",
"_TRAINING_URL_TEMPLATE",
".",
"format",
... | Returns splits. | [
"Returns",
"splits",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L86-L114 | train |
tensorflow/datasets | tensorflow_datasets/image/smallnorb.py | Smallnorb._generate_examples | def _generate_examples(self, dat_path, cat_path, info_path):
"""Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the d... | python | def _generate_examples(self, dat_path, cat_path, info_path):
"""Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the d... | [
"def",
"_generate_examples",
"(",
"self",
",",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
":",
"dat_arr",
",",
"cat_arr",
",",
"info_arr",
"=",
"_load_chunk",
"(",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
"for",
"image",
",",
"category",
... | Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the different labels. | [
"Generate",
"examples",
"for",
"the",
"Smallnorb",
"dataset",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L116-L138 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.