repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_utils.py
|
build_dataset
|
def build_dataset(instruction_dicts,
dataset_from_file_fn,
shuffle_files=False,
parallel_reads=64):
"""Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
"""
# First case: All examples are taken (No value skipped)
if _no_examples_skipped(instruction_dicts):
# Only use the filenames as instruction
instruction_ds = tf.data.Dataset.from_tensor_slices([
d["filepath"] for d in instruction_dicts
])
build_ds_from_instruction = dataset_from_file_fn
# Second case: Use the instructions to read the examples
else:
instruction_ds = _build_instruction_ds(instruction_dicts)
build_ds_from_instruction = functools.partial(
_build_ds_from_instruction,
ds_from_file_fn=dataset_from_file_fn,
)
# If shuffle is True, we shuffle the instructions/shards
if shuffle_files:
instruction_ds = instruction_ds.shuffle(len(instruction_dicts))
# Use interleave to parallel read files and decode records
ds = instruction_ds.interleave(
build_ds_from_instruction,
cycle_length=parallel_reads,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds
|
python
|
def build_dataset(instruction_dicts,
dataset_from_file_fn,
shuffle_files=False,
parallel_reads=64):
"""Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
"""
# First case: All examples are taken (No value skipped)
if _no_examples_skipped(instruction_dicts):
# Only use the filenames as instruction
instruction_ds = tf.data.Dataset.from_tensor_slices([
d["filepath"] for d in instruction_dicts
])
build_ds_from_instruction = dataset_from_file_fn
# Second case: Use the instructions to read the examples
else:
instruction_ds = _build_instruction_ds(instruction_dicts)
build_ds_from_instruction = functools.partial(
_build_ds_from_instruction,
ds_from_file_fn=dataset_from_file_fn,
)
# If shuffle is True, we shuffle the instructions/shards
if shuffle_files:
instruction_ds = instruction_ds.shuffle(len(instruction_dicts))
# Use interleave to parallel read files and decode records
ds = instruction_ds.interleave(
build_ds_from_instruction,
cycle_length=parallel_reads,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds
|
[
"def",
"build_dataset",
"(",
"instruction_dicts",
",",
"dataset_from_file_fn",
",",
"shuffle_files",
"=",
"False",
",",
"parallel_reads",
"=",
"64",
")",
":",
"# First case: All examples are taken (No value skipped)",
"if",
"_no_examples_skipped",
"(",
"instruction_dicts",
")",
":",
"# Only use the filenames as instruction",
"instruction_ds",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"[",
"d",
"[",
"\"filepath\"",
"]",
"for",
"d",
"in",
"instruction_dicts",
"]",
")",
"build_ds_from_instruction",
"=",
"dataset_from_file_fn",
"# Second case: Use the instructions to read the examples",
"else",
":",
"instruction_ds",
"=",
"_build_instruction_ds",
"(",
"instruction_dicts",
")",
"build_ds_from_instruction",
"=",
"functools",
".",
"partial",
"(",
"_build_ds_from_instruction",
",",
"ds_from_file_fn",
"=",
"dataset_from_file_fn",
",",
")",
"# If shuffle is True, we shuffle the instructions/shards",
"if",
"shuffle_files",
":",
"instruction_ds",
"=",
"instruction_ds",
".",
"shuffle",
"(",
"len",
"(",
"instruction_dicts",
")",
")",
"# Use interleave to parallel read files and decode records",
"ds",
"=",
"instruction_ds",
".",
"interleave",
"(",
"build_ds_from_instruction",
",",
"cycle_length",
"=",
"parallel_reads",
",",
"num_parallel_calls",
"=",
"tf",
".",
"data",
".",
"experimental",
".",
"AUTOTUNE",
")",
"return",
"ds"
] |
Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
|
[
"Constructs",
"a",
"tf",
".",
"data",
".",
"Dataset",
"from",
"TFRecord",
"files",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L32-L76
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_utils.py
|
_build_instruction_ds
|
def _build_instruction_ds(instructions):
"""Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard.
"""
# Transpose the list[dict] into dict[list]
tensor_inputs = {
# offset_mask need to be converted to int64 explicitly
k: np.array(vals, dtype=np.int64) if k == "mask_offset" else list(vals)
for k, vals in utils.zip_dict(*instructions)
}
return tf.data.Dataset.from_tensor_slices(tensor_inputs)
|
python
|
def _build_instruction_ds(instructions):
"""Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard.
"""
# Transpose the list[dict] into dict[list]
tensor_inputs = {
# offset_mask need to be converted to int64 explicitly
k: np.array(vals, dtype=np.int64) if k == "mask_offset" else list(vals)
for k, vals in utils.zip_dict(*instructions)
}
return tf.data.Dataset.from_tensor_slices(tensor_inputs)
|
[
"def",
"_build_instruction_ds",
"(",
"instructions",
")",
":",
"# Transpose the list[dict] into dict[list]",
"tensor_inputs",
"=",
"{",
"# offset_mask need to be converted to int64 explicitly",
"k",
":",
"np",
".",
"array",
"(",
"vals",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"if",
"k",
"==",
"\"mask_offset\"",
"else",
"list",
"(",
"vals",
")",
"for",
"k",
",",
"vals",
"in",
"utils",
".",
"zip_dict",
"(",
"*",
"instructions",
")",
"}",
"return",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"tensor_inputs",
")"
] |
Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard.
|
[
"Create",
"a",
"dataset",
"containing",
"individual",
"instruction",
"for",
"each",
"shard",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L84-L109
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_utils.py
|
_build_mask_ds
|
def _build_mask_ds(mask, mask_offset):
"""Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip
and True for examples to keep.
"""
mask_ds = tf.data.Dataset.from_tensor_slices(mask)
mask_ds = mask_ds.repeat()
mask_ds = mask_ds.skip(mask_offset)
return mask_ds
|
python
|
def _build_mask_ds(mask, mask_offset):
"""Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip
and True for examples to keep.
"""
mask_ds = tf.data.Dataset.from_tensor_slices(mask)
mask_ds = mask_ds.repeat()
mask_ds = mask_ds.skip(mask_offset)
return mask_ds
|
[
"def",
"_build_mask_ds",
"(",
"mask",
",",
"mask_offset",
")",
":",
"mask_ds",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"mask",
")",
"mask_ds",
"=",
"mask_ds",
".",
"repeat",
"(",
")",
"mask_ds",
"=",
"mask_ds",
".",
"skip",
"(",
"mask_offset",
")",
"return",
"mask_ds"
] |
Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip
and True for examples to keep.
|
[
"Build",
"the",
"mask",
"dataset",
"to",
"indicate",
"which",
"element",
"to",
"skip",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L112-L128
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_utils.py
|
_build_ds_from_instruction
|
def _build_ds_from_instruction(instruction, ds_from_file_fn):
"""Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
"""
# Create the example and mask ds for this particular shard
examples_ds = ds_from_file_fn(instruction["filepath"])
mask_ds = _build_mask_ds(
mask_offset=instruction["mask_offset"],
mask=instruction["mask"],
)
# Zip the mask and real examples
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
# Filter according to the mask (only keep True)
ds = ds.filter(lambda example, mask: mask)
# Only keep the examples
ds = ds.map(lambda example, mask: example)
return ds
|
python
|
def _build_ds_from_instruction(instruction, ds_from_file_fn):
"""Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
"""
# Create the example and mask ds for this particular shard
examples_ds = ds_from_file_fn(instruction["filepath"])
mask_ds = _build_mask_ds(
mask_offset=instruction["mask_offset"],
mask=instruction["mask"],
)
# Zip the mask and real examples
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
# Filter according to the mask (only keep True)
ds = ds.filter(lambda example, mask: mask)
# Only keep the examples
ds = ds.map(lambda example, mask: example)
return ds
|
[
"def",
"_build_ds_from_instruction",
"(",
"instruction",
",",
"ds_from_file_fn",
")",
":",
"# Create the example and mask ds for this particular shard",
"examples_ds",
"=",
"ds_from_file_fn",
"(",
"instruction",
"[",
"\"filepath\"",
"]",
")",
"mask_ds",
"=",
"_build_mask_ds",
"(",
"mask_offset",
"=",
"instruction",
"[",
"\"mask_offset\"",
"]",
",",
"mask",
"=",
"instruction",
"[",
"\"mask\"",
"]",
",",
")",
"# Zip the mask and real examples",
"ds",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"zip",
"(",
"(",
"examples_ds",
",",
"mask_ds",
")",
")",
"# Filter according to the mask (only keep True)",
"ds",
"=",
"ds",
".",
"filter",
"(",
"lambda",
"example",
",",
"mask",
":",
"mask",
")",
"# Only keep the examples",
"ds",
"=",
"ds",
".",
"map",
"(",
"lambda",
"example",
",",
"mask",
":",
"example",
")",
"return",
"ds"
] |
Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
|
[
"Map",
"an",
"instruction",
"to",
"a",
"real",
"datasets",
"for",
"one",
"particular",
"shard",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L131-L156
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_utils.py
|
as_numpy
|
def as_numpy(dataset, graph=None):
"""Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
"""
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
# Eager mode
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
# Graph mode
# First create iterators for datasets
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next()
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
# Then create numpy arrays for tensors
with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor
# Calling sess.run once so that randomness is shared.
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
# Merge the dataset iterators and np arrays
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
# Nest
return tf.nest.pack_sequence_as(nested_ds, flat_np)
|
python
|
def as_numpy(dataset, graph=None):
"""Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
"""
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
# Eager mode
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
# Graph mode
# First create iterators for datasets
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next()
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
# Then create numpy arrays for tensors
with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor
# Calling sess.run once so that randomness is shared.
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
# Merge the dataset iterators and np arrays
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
# Nest
return tf.nest.pack_sequence_as(nested_ds, flat_np)
|
[
"def",
"as_numpy",
"(",
"dataset",
",",
"graph",
"=",
"None",
")",
":",
"nested_ds",
"=",
"dataset",
"del",
"dataset",
"# Flatten",
"flat_ds",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"nested_ds",
")",
"flat_np",
"=",
"[",
"]",
"# Type check for Tensors and Datasets",
"for",
"ds_el",
"in",
"flat_ds",
":",
"types",
"=",
"[",
"type",
"(",
"el",
")",
"for",
"el",
"in",
"flat_ds",
"]",
"types",
"=",
"tf",
".",
"nest",
".",
"pack_sequence_as",
"(",
"nested_ds",
",",
"types",
")",
"if",
"not",
"(",
"isinstance",
"(",
"ds_el",
",",
"tf",
".",
"Tensor",
")",
"or",
"tf_compat",
".",
"is_dataset",
"(",
"ds_el",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Arguments to as_numpy must be tf.Tensors or \"",
"\"tf.data.Datasets. Got: %s\"",
"%",
"types",
")",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"# Eager mode",
"for",
"ds_el",
"in",
"flat_ds",
":",
"if",
"isinstance",
"(",
"ds_el",
",",
"tf",
".",
"Tensor",
")",
":",
"np_el",
"=",
"ds_el",
".",
"numpy",
"(",
")",
"elif",
"tf_compat",
".",
"is_dataset",
"(",
"ds_el",
")",
":",
"np_el",
"=",
"_eager_dataset_iterator",
"(",
"ds_el",
")",
"else",
":",
"assert",
"False",
"flat_np",
".",
"append",
"(",
"np_el",
")",
"else",
":",
"# Graph mode",
"# First create iterators for datasets",
"with",
"utils",
".",
"maybe_with_graph",
"(",
"graph",
",",
"create_if_none",
"=",
"False",
")",
":",
"ds_iters",
"=",
"[",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"ds_el",
")",
".",
"get_next",
"(",
")",
"for",
"ds_el",
"in",
"flat_ds",
"if",
"tf_compat",
".",
"is_dataset",
"(",
"ds_el",
")",
"]",
"ds_iters",
"=",
"[",
"_graph_dataset_iterator",
"(",
"ds_iter",
",",
"graph",
")",
"for",
"ds_iter",
"in",
"ds_iters",
"]",
"# Then create numpy arrays for tensors",
"with",
"utils",
".",
"nogpu_session",
"(",
"graph",
")",
"as",
"sess",
":",
"# Shared session for tf.Tensor",
"# Calling sess.run once so that randomness is shared.",
"np_arrays",
"=",
"sess",
".",
"run",
"(",
"[",
"tensor",
"for",
"tensor",
"in",
"flat_ds",
"if",
"not",
"tf_compat",
".",
"is_dataset",
"(",
"tensor",
")",
"]",
")",
"# Merge the dataset iterators and np arrays",
"iter_ds",
"=",
"iter",
"(",
"ds_iters",
")",
"iter_array",
"=",
"iter",
"(",
"np_arrays",
")",
"flat_np",
"=",
"[",
"next",
"(",
"iter_ds",
")",
"if",
"tf_compat",
".",
"is_dataset",
"(",
"ds_el",
")",
"else",
"next",
"(",
"iter_array",
")",
"for",
"ds_el",
"in",
"flat_ds",
"]",
"# Nest",
"return",
"tf",
".",
"nest",
".",
"pack_sequence_as",
"(",
"nested_ds",
",",
"flat_np",
")"
] |
Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
|
[
"Converts",
"a",
"tf",
".",
"data",
".",
"Dataset",
"to",
"an",
"iterable",
"of",
"NumPy",
"arrays",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L176-L242
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/shapes3d.py
|
_load_data
|
def _load_data(filepath):
"""Loads the images and latent values into Numpy arrays."""
with h5py.File(filepath, "r") as h5dataset:
image_array = np.array(h5dataset["images"])
# The 'label' data set in the hdf5 file actually contains the float values
# and not the class labels.
values_array = np.array(h5dataset["labels"])
return image_array, values_array
|
python
|
def _load_data(filepath):
"""Loads the images and latent values into Numpy arrays."""
with h5py.File(filepath, "r") as h5dataset:
image_array = np.array(h5dataset["images"])
# The 'label' data set in the hdf5 file actually contains the float values
# and not the class labels.
values_array = np.array(h5dataset["labels"])
return image_array, values_array
|
[
"def",
"_load_data",
"(",
"filepath",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"filepath",
",",
"\"r\"",
")",
"as",
"h5dataset",
":",
"image_array",
"=",
"np",
".",
"array",
"(",
"h5dataset",
"[",
"\"images\"",
"]",
")",
"# The 'label' data set in the hdf5 file actually contains the float values",
"# and not the class labels.",
"values_array",
"=",
"np",
".",
"array",
"(",
"h5dataset",
"[",
"\"labels\"",
"]",
")",
"return",
"image_array",
",",
"values_array"
] |
Loads the images and latent values into Numpy arrays.
|
[
"Loads",
"the",
"images",
"and",
"latent",
"values",
"into",
"Numpy",
"arrays",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/shapes3d.py#L151-L158
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/shapes3d.py
|
_discretize
|
def _discretize(a):
"""Discretizes array values to class labels."""
arr = np.asarray(a)
index = np.argsort(arr)
inverse_index = np.zeros(arr.size, dtype=np.intp)
inverse_index[index] = np.arange(arr.size, dtype=np.intp)
arr = arr[index]
obs = np.r_[True, arr[1:] != arr[:-1]]
return obs.cumsum()[inverse_index] - 1
|
python
|
def _discretize(a):
"""Discretizes array values to class labels."""
arr = np.asarray(a)
index = np.argsort(arr)
inverse_index = np.zeros(arr.size, dtype=np.intp)
inverse_index[index] = np.arange(arr.size, dtype=np.intp)
arr = arr[index]
obs = np.r_[True, arr[1:] != arr[:-1]]
return obs.cumsum()[inverse_index] - 1
|
[
"def",
"_discretize",
"(",
"a",
")",
":",
"arr",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"index",
"=",
"np",
".",
"argsort",
"(",
"arr",
")",
"inverse_index",
"=",
"np",
".",
"zeros",
"(",
"arr",
".",
"size",
",",
"dtype",
"=",
"np",
".",
"intp",
")",
"inverse_index",
"[",
"index",
"]",
"=",
"np",
".",
"arange",
"(",
"arr",
".",
"size",
",",
"dtype",
"=",
"np",
".",
"intp",
")",
"arr",
"=",
"arr",
"[",
"index",
"]",
"obs",
"=",
"np",
".",
"r_",
"[",
"True",
",",
"arr",
"[",
"1",
":",
"]",
"!=",
"arr",
"[",
":",
"-",
"1",
"]",
"]",
"return",
"obs",
".",
"cumsum",
"(",
")",
"[",
"inverse_index",
"]",
"-",
"1"
] |
Discretizes array values to class labels.
|
[
"Discretizes",
"array",
"values",
"to",
"class",
"labels",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/shapes3d.py#L163-L171
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/shapes3d.py
|
Shapes3d._generate_examples
|
def _generate_examples(self, filepath):
"""Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file will be slow with a single file. Instead, we first load everything
# into memory before yielding the samples.
image_array, values_array = _load_data(filepath)
# We need to calculate the class labels from the float values in the file.
labels_array = np.zeros_like(values_array, dtype=np.int64)
for i in range(values_array.shape[1]):
labels_array[:, i] = _discretize(values_array[:, i]) # pylint: disable=unsupported-assignment-operation
for image, labels, values in moves.zip(image_array, labels_array,
values_array):
yield {
"image": image,
"label_floor_hue": labels[0],
"label_wall_hue": labels[1],
"label_object_hue": labels[2],
"label_scale": labels[3],
"label_shape": labels[4],
"label_orientation": labels[5],
"value_floor_hue": values[0],
"value_wall_hue": values[1],
"value_object_hue": values[2],
"value_scale": values[3],
"value_shape": values[4],
"value_orientation": values[5],
}
|
python
|
def _generate_examples(self, filepath):
"""Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file will be slow with a single file. Instead, we first load everything
# into memory before yielding the samples.
image_array, values_array = _load_data(filepath)
# We need to calculate the class labels from the float values in the file.
labels_array = np.zeros_like(values_array, dtype=np.int64)
for i in range(values_array.shape[1]):
labels_array[:, i] = _discretize(values_array[:, i]) # pylint: disable=unsupported-assignment-operation
for image, labels, values in moves.zip(image_array, labels_array,
values_array):
yield {
"image": image,
"label_floor_hue": labels[0],
"label_wall_hue": labels[1],
"label_object_hue": labels[2],
"label_scale": labels[3],
"label_shape": labels[4],
"label_orientation": labels[5],
"value_floor_hue": values[0],
"value_wall_hue": values[1],
"value_object_hue": values[2],
"value_scale": values[3],
"value_shape": values[4],
"value_orientation": values[5],
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"# Simultaneously iterating through the different data sets in the hdf5",
"# file will be slow with a single file. Instead, we first load everything",
"# into memory before yielding the samples.",
"image_array",
",",
"values_array",
"=",
"_load_data",
"(",
"filepath",
")",
"# We need to calculate the class labels from the float values in the file.",
"labels_array",
"=",
"np",
".",
"zeros_like",
"(",
"values_array",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"for",
"i",
"in",
"range",
"(",
"values_array",
".",
"shape",
"[",
"1",
"]",
")",
":",
"labels_array",
"[",
":",
",",
"i",
"]",
"=",
"_discretize",
"(",
"values_array",
"[",
":",
",",
"i",
"]",
")",
"# pylint: disable=unsupported-assignment-operation",
"for",
"image",
",",
"labels",
",",
"values",
"in",
"moves",
".",
"zip",
"(",
"image_array",
",",
"labels_array",
",",
"values_array",
")",
":",
"yield",
"{",
"\"image\"",
":",
"image",
",",
"\"label_floor_hue\"",
":",
"labels",
"[",
"0",
"]",
",",
"\"label_wall_hue\"",
":",
"labels",
"[",
"1",
"]",
",",
"\"label_object_hue\"",
":",
"labels",
"[",
"2",
"]",
",",
"\"label_scale\"",
":",
"labels",
"[",
"3",
"]",
",",
"\"label_shape\"",
":",
"labels",
"[",
"4",
"]",
",",
"\"label_orientation\"",
":",
"labels",
"[",
"5",
"]",
",",
"\"value_floor_hue\"",
":",
"values",
"[",
"0",
"]",
",",
"\"value_wall_hue\"",
":",
"values",
"[",
"1",
"]",
",",
"\"value_object_hue\"",
":",
"values",
"[",
"2",
"]",
",",
"\"value_scale\"",
":",
"values",
"[",
"3",
"]",
",",
"\"value_shape\"",
":",
"values",
"[",
"4",
"]",
",",
"\"value_orientation\"",
":",
"values",
"[",
"5",
"]",
",",
"}"
] |
Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
|
[
"Generate",
"examples",
"for",
"the",
"Shapes3d",
"dataset",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/shapes3d.py#L113-L148
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/wikipedia.py
|
_parse_and_clean_wikicode
|
def _parse_and_clean_wikicode(raw_content):
"""Strips formatting and unwanted sections from raw page content."""
wikicode = tfds.core.lazy_imports.mwparserfromhell.parse(raw_content)
# Filters for references, tables, and file/image links.
re_rm_wikilink = re.compile(
"^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
def rm_wikilink(obj):
return bool(re_rm_wikilink.match(six.text_type(obj.title)))
def rm_tag(obj):
return six.text_type(obj.tag) in {"ref", "table"}
def rm_template(obj):
return obj.name.lower() in {
"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur",
"notelist-lg"}
def try_remove_obj(obj, section):
try:
section.remove(obj)
except ValueError:
# For unknown reasons, objects are sometimes not found.
pass
section_text = []
# Filter individual sections to clean.
for section in wikicode.get_sections(
flat=True, include_lead=True, include_headings=True):
for obj in section.ifilter_wikilinks(matches=rm_wikilink, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_templates(matches=rm_template, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
try_remove_obj(obj, section)
section_text.append(section.strip_code().strip())
return "\n\n".join(section_text)
|
python
|
def _parse_and_clean_wikicode(raw_content):
"""Strips formatting and unwanted sections from raw page content."""
wikicode = tfds.core.lazy_imports.mwparserfromhell.parse(raw_content)
# Filters for references, tables, and file/image links.
re_rm_wikilink = re.compile(
"^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
def rm_wikilink(obj):
return bool(re_rm_wikilink.match(six.text_type(obj.title)))
def rm_tag(obj):
return six.text_type(obj.tag) in {"ref", "table"}
def rm_template(obj):
return obj.name.lower() in {
"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur",
"notelist-lg"}
def try_remove_obj(obj, section):
try:
section.remove(obj)
except ValueError:
# For unknown reasons, objects are sometimes not found.
pass
section_text = []
# Filter individual sections to clean.
for section in wikicode.get_sections(
flat=True, include_lead=True, include_headings=True):
for obj in section.ifilter_wikilinks(matches=rm_wikilink, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_templates(matches=rm_template, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
try_remove_obj(obj, section)
section_text.append(section.strip_code().strip())
return "\n\n".join(section_text)
|
[
"def",
"_parse_and_clean_wikicode",
"(",
"raw_content",
")",
":",
"wikicode",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"mwparserfromhell",
".",
"parse",
"(",
"raw_content",
")",
"# Filters for references, tables, and file/image links.",
"re_rm_wikilink",
"=",
"re",
".",
"compile",
"(",
"\"^(?:File|Image|Media):\"",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
"|",
"re",
".",
"UNICODE",
")",
"def",
"rm_wikilink",
"(",
"obj",
")",
":",
"return",
"bool",
"(",
"re_rm_wikilink",
".",
"match",
"(",
"six",
".",
"text_type",
"(",
"obj",
".",
"title",
")",
")",
")",
"def",
"rm_tag",
"(",
"obj",
")",
":",
"return",
"six",
".",
"text_type",
"(",
"obj",
".",
"tag",
")",
"in",
"{",
"\"ref\"",
",",
"\"table\"",
"}",
"def",
"rm_template",
"(",
"obj",
")",
":",
"return",
"obj",
".",
"name",
".",
"lower",
"(",
")",
"in",
"{",
"\"reflist\"",
",",
"\"notelist\"",
",",
"\"notelist-ua\"",
",",
"\"notelist-lr\"",
",",
"\"notelist-ur\"",
",",
"\"notelist-lg\"",
"}",
"def",
"try_remove_obj",
"(",
"obj",
",",
"section",
")",
":",
"try",
":",
"section",
".",
"remove",
"(",
"obj",
")",
"except",
"ValueError",
":",
"# For unknown reasons, objects are sometimes not found.",
"pass",
"section_text",
"=",
"[",
"]",
"# Filter individual sections to clean.",
"for",
"section",
"in",
"wikicode",
".",
"get_sections",
"(",
"flat",
"=",
"True",
",",
"include_lead",
"=",
"True",
",",
"include_headings",
"=",
"True",
")",
":",
"for",
"obj",
"in",
"section",
".",
"ifilter_wikilinks",
"(",
"matches",
"=",
"rm_wikilink",
",",
"recursive",
"=",
"True",
")",
":",
"try_remove_obj",
"(",
"obj",
",",
"section",
")",
"for",
"obj",
"in",
"section",
".",
"ifilter_templates",
"(",
"matches",
"=",
"rm_template",
",",
"recursive",
"=",
"True",
")",
":",
"try_remove_obj",
"(",
"obj",
",",
"section",
")",
"for",
"obj",
"in",
"section",
".",
"ifilter_tags",
"(",
"matches",
"=",
"rm_tag",
",",
"recursive",
"=",
"True",
")",
":",
"try_remove_obj",
"(",
"obj",
",",
"section",
")",
"section_text",
".",
"append",
"(",
"section",
".",
"strip_code",
"(",
")",
".",
"strip",
"(",
")",
")",
"return",
"\"\\n\\n\"",
".",
"join",
"(",
"section_text",
")"
] |
Strips formatting and unwanted sections from raw page content.
|
[
"Strips",
"formatting",
"and",
"unwanted",
"sections",
"from",
"raw",
"page",
"content",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/wikipedia.py#L234-L269
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/wikipedia.py
|
Wikipedia._build_pcollection
|
def _build_pcollection(self, pipeline, filepaths, language):
"""Build PCollection of examples in the raw (text) form."""
beam = tfds.core.lazy_imports.apache_beam
def _extract_content(filepath):
"""Extracts article content from a single WikiMedia XML file."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
for _, elem in etree.iterparse(f, events=("end",)):
if not elem.tag.endswith("page"):
continue
namespace = elem.tag[:-4]
title = elem.find("./{0}title".format(namespace)).text
ns = elem.find("./{0}ns".format(namespace)).text
# Filter pages that are not in the "main" namespace.
if ns != "0":
continue
raw_content = elem.find(
"./{0}revision/{0}text".format(namespace)).text
elem.clear()
# Filter redirects.
if raw_content is None or raw_content.lower().startswith("#redirect"):
beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
continue
beam.metrics.Metrics.counter(language, "extracted-examples").inc()
yield (title, raw_content)
def _clean_content(inputs):
"""Cleans raw wikicode to extract text."""
title, raw_content = inputs
try:
text = _parse_and_clean_wikicode(raw_content)
except (
tfds.core.lazy_imports.mwparserfromhell.parser.ParserError) as e:
beam.metrics.Metrics.counter(language, "parser-error").inc()
logging.error("mwparserfromhell ParseError: %s", e)
return
beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
yield {
"title": title,
"text": text
}
return (
pipeline
| beam.Create(filepaths)
| beam.FlatMap(_extract_content)
| beam.FlatMap(_clean_content)
)
|
python
|
def _build_pcollection(self, pipeline, filepaths, language):
"""Build PCollection of examples in the raw (text) form."""
beam = tfds.core.lazy_imports.apache_beam
def _extract_content(filepath):
"""Extracts article content from a single WikiMedia XML file."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
for _, elem in etree.iterparse(f, events=("end",)):
if not elem.tag.endswith("page"):
continue
namespace = elem.tag[:-4]
title = elem.find("./{0}title".format(namespace)).text
ns = elem.find("./{0}ns".format(namespace)).text
# Filter pages that are not in the "main" namespace.
if ns != "0":
continue
raw_content = elem.find(
"./{0}revision/{0}text".format(namespace)).text
elem.clear()
# Filter redirects.
if raw_content is None or raw_content.lower().startswith("#redirect"):
beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
continue
beam.metrics.Metrics.counter(language, "extracted-examples").inc()
yield (title, raw_content)
def _clean_content(inputs):
"""Cleans raw wikicode to extract text."""
title, raw_content = inputs
try:
text = _parse_and_clean_wikicode(raw_content)
except (
tfds.core.lazy_imports.mwparserfromhell.parser.ParserError) as e:
beam.metrics.Metrics.counter(language, "parser-error").inc()
logging.error("mwparserfromhell ParseError: %s", e)
return
beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
yield {
"title": title,
"text": text
}
return (
pipeline
| beam.Create(filepaths)
| beam.FlatMap(_extract_content)
| beam.FlatMap(_clean_content)
)
|
[
"def",
"_build_pcollection",
"(",
"self",
",",
"pipeline",
",",
"filepaths",
",",
"language",
")",
":",
"beam",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"apache_beam",
"def",
"_extract_content",
"(",
"filepath",
")",
":",
"\"\"\"Extracts article content from a single WikiMedia XML file.\"\"\"",
"logging",
".",
"info",
"(",
"\"generating examples from = %s\"",
",",
"filepath",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
")",
"as",
"f",
":",
"for",
"_",
",",
"elem",
"in",
"etree",
".",
"iterparse",
"(",
"f",
",",
"events",
"=",
"(",
"\"end\"",
",",
")",
")",
":",
"if",
"not",
"elem",
".",
"tag",
".",
"endswith",
"(",
"\"page\"",
")",
":",
"continue",
"namespace",
"=",
"elem",
".",
"tag",
"[",
":",
"-",
"4",
"]",
"title",
"=",
"elem",
".",
"find",
"(",
"\"./{0}title\"",
".",
"format",
"(",
"namespace",
")",
")",
".",
"text",
"ns",
"=",
"elem",
".",
"find",
"(",
"\"./{0}ns\"",
".",
"format",
"(",
"namespace",
")",
")",
".",
"text",
"# Filter pages that are not in the \"main\" namespace.",
"if",
"ns",
"!=",
"\"0\"",
":",
"continue",
"raw_content",
"=",
"elem",
".",
"find",
"(",
"\"./{0}revision/{0}text\"",
".",
"format",
"(",
"namespace",
")",
")",
".",
"text",
"elem",
".",
"clear",
"(",
")",
"# Filter redirects.",
"if",
"raw_content",
"is",
"None",
"or",
"raw_content",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"#redirect\"",
")",
":",
"beam",
".",
"metrics",
".",
"Metrics",
".",
"counter",
"(",
"language",
",",
"\"filtered-redirects\"",
")",
".",
"inc",
"(",
")",
"continue",
"beam",
".",
"metrics",
".",
"Metrics",
".",
"counter",
"(",
"language",
",",
"\"extracted-examples\"",
")",
".",
"inc",
"(",
")",
"yield",
"(",
"title",
",",
"raw_content",
")",
"def",
"_clean_content",
"(",
"inputs",
")",
":",
"\"\"\"Cleans raw wikicode to extract text.\"\"\"",
"title",
",",
"raw_content",
"=",
"inputs",
"try",
":",
"text",
"=",
"_parse_and_clean_wikicode",
"(",
"raw_content",
")",
"except",
"(",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"mwparserfromhell",
".",
"parser",
".",
"ParserError",
")",
"as",
"e",
":",
"beam",
".",
"metrics",
".",
"Metrics",
".",
"counter",
"(",
"language",
",",
"\"parser-error\"",
")",
".",
"inc",
"(",
")",
"logging",
".",
"error",
"(",
"\"mwparserfromhell ParseError: %s\"",
",",
"e",
")",
"return",
"beam",
".",
"metrics",
".",
"Metrics",
".",
"counter",
"(",
"language",
",",
"\"cleaned-examples\"",
")",
".",
"inc",
"(",
")",
"yield",
"{",
"\"title\"",
":",
"title",
",",
"\"text\"",
":",
"text",
"}",
"return",
"(",
"pipeline",
"|",
"beam",
".",
"Create",
"(",
"filepaths",
")",
"|",
"beam",
".",
"FlatMap",
"(",
"_extract_content",
")",
"|",
"beam",
".",
"FlatMap",
"(",
"_clean_content",
")",
")"
] |
Build PCollection of examples in the raw (text) form.
|
[
"Build",
"PCollection",
"of",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/wikipedia.py#L176-L231
|
train
|
tensorflow/datasets
|
tensorflow_datasets/scripts/download_and_prepare.py
|
download_and_prepare
|
def download_and_prepare(builder):
"""Generate data for a given dataset."""
print("download_and_prepare for dataset {}...".format(builder.info.full_name))
dl_config = download_config()
if isinstance(builder, tfds.core.BeamBasedBuilder):
beam = tfds.core.lazy_imports.apache_beam
# TODO(b/129149715): Restore compute stats. Currently skipped because not
# beam supported.
dl_config.compute_stats = tfds.download.ComputeStatsMode.SKIP
dl_config.beam_options = beam.options.pipeline_options.PipelineOptions()
builder.download_and_prepare(
download_dir=FLAGS.download_dir,
download_config=dl_config,
)
termcolor.cprint(str(builder.info.as_proto), attrs=["bold"])
if FLAGS.debug:
dataset = builder.as_dataset(split=tfds.Split.TRAIN)
pdb.set_trace()
del dataset
|
python
|
def download_and_prepare(builder):
"""Generate data for a given dataset."""
print("download_and_prepare for dataset {}...".format(builder.info.full_name))
dl_config = download_config()
if isinstance(builder, tfds.core.BeamBasedBuilder):
beam = tfds.core.lazy_imports.apache_beam
# TODO(b/129149715): Restore compute stats. Currently skipped because not
# beam supported.
dl_config.compute_stats = tfds.download.ComputeStatsMode.SKIP
dl_config.beam_options = beam.options.pipeline_options.PipelineOptions()
builder.download_and_prepare(
download_dir=FLAGS.download_dir,
download_config=dl_config,
)
termcolor.cprint(str(builder.info.as_proto), attrs=["bold"])
if FLAGS.debug:
dataset = builder.as_dataset(split=tfds.Split.TRAIN)
pdb.set_trace()
del dataset
|
[
"def",
"download_and_prepare",
"(",
"builder",
")",
":",
"print",
"(",
"\"download_and_prepare for dataset {}...\"",
".",
"format",
"(",
"builder",
".",
"info",
".",
"full_name",
")",
")",
"dl_config",
"=",
"download_config",
"(",
")",
"if",
"isinstance",
"(",
"builder",
",",
"tfds",
".",
"core",
".",
"BeamBasedBuilder",
")",
":",
"beam",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"apache_beam",
"# TODO(b/129149715): Restore compute stats. Currently skipped because not",
"# beam supported.",
"dl_config",
".",
"compute_stats",
"=",
"tfds",
".",
"download",
".",
"ComputeStatsMode",
".",
"SKIP",
"dl_config",
".",
"beam_options",
"=",
"beam",
".",
"options",
".",
"pipeline_options",
".",
"PipelineOptions",
"(",
")",
"builder",
".",
"download_and_prepare",
"(",
"download_dir",
"=",
"FLAGS",
".",
"download_dir",
",",
"download_config",
"=",
"dl_config",
",",
")",
"termcolor",
".",
"cprint",
"(",
"str",
"(",
"builder",
".",
"info",
".",
"as_proto",
")",
",",
"attrs",
"=",
"[",
"\"bold\"",
"]",
")",
"if",
"FLAGS",
".",
"debug",
":",
"dataset",
"=",
"builder",
".",
"as_dataset",
"(",
"split",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
")",
"pdb",
".",
"set_trace",
"(",
")",
"del",
"dataset"
] |
Generate data for a given dataset.
|
[
"Generate",
"data",
"for",
"a",
"given",
"dataset",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/download_and_prepare.py#L113-L135
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/bounding_boxes.py
|
BBoxFeature.encode_example
|
def encode_example(self, bbox):
"""See base class for details."""
# Validate the coordinates
for coordinate in bbox:
if not isinstance(coordinate, float):
raise ValueError(
'BBox coordinates should be float. Got {}.'.format(bbox))
if not 0.0 <= coordinate <= 1.0:
raise ValueError(
'BBox coordinates should be between 0 and 1. Got {}.'.format(bbox))
if bbox.xmax < bbox.xmin or bbox.ymax < bbox.ymin:
raise ValueError(
'BBox coordinates should have min <= max. Got {}.'.format(bbox))
return super(BBoxFeature, self).encode_example(
[bbox.ymin, bbox.xmin, bbox.ymax, bbox.xmax]
)
|
python
|
def encode_example(self, bbox):
"""See base class for details."""
# Validate the coordinates
for coordinate in bbox:
if not isinstance(coordinate, float):
raise ValueError(
'BBox coordinates should be float. Got {}.'.format(bbox))
if not 0.0 <= coordinate <= 1.0:
raise ValueError(
'BBox coordinates should be between 0 and 1. Got {}.'.format(bbox))
if bbox.xmax < bbox.xmin or bbox.ymax < bbox.ymin:
raise ValueError(
'BBox coordinates should have min <= max. Got {}.'.format(bbox))
return super(BBoxFeature, self).encode_example(
[bbox.ymin, bbox.xmin, bbox.ymax, bbox.xmax]
)
|
[
"def",
"encode_example",
"(",
"self",
",",
"bbox",
")",
":",
"# Validate the coordinates",
"for",
"coordinate",
"in",
"bbox",
":",
"if",
"not",
"isinstance",
"(",
"coordinate",
",",
"float",
")",
":",
"raise",
"ValueError",
"(",
"'BBox coordinates should be float. Got {}.'",
".",
"format",
"(",
"bbox",
")",
")",
"if",
"not",
"0.0",
"<=",
"coordinate",
"<=",
"1.0",
":",
"raise",
"ValueError",
"(",
"'BBox coordinates should be between 0 and 1. Got {}.'",
".",
"format",
"(",
"bbox",
")",
")",
"if",
"bbox",
".",
"xmax",
"<",
"bbox",
".",
"xmin",
"or",
"bbox",
".",
"ymax",
"<",
"bbox",
".",
"ymin",
":",
"raise",
"ValueError",
"(",
"'BBox coordinates should have min <= max. Got {}.'",
".",
"format",
"(",
"bbox",
")",
")",
"return",
"super",
"(",
"BBoxFeature",
",",
"self",
")",
".",
"encode_example",
"(",
"[",
"bbox",
".",
"ymin",
",",
"bbox",
".",
"xmin",
",",
"bbox",
".",
"ymax",
",",
"bbox",
".",
"xmax",
"]",
")"
] |
See base class for details.
|
[
"See",
"base",
"class",
"for",
"details",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/bounding_boxes.py#L60-L76
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/cifar.py
|
_load_data
|
def _load_data(path, labels_number=1):
"""Yields (labels, np_image) tuples."""
with tf.io.gfile.GFile(path, "rb") as f:
data = f.read()
offset = 0
max_offset = len(data) - 1
while offset < max_offset:
labels = np.frombuffer(data, dtype=np.uint8, count=labels_number,
offset=offset).reshape((labels_number,))
# 1 byte per label, 1024 * 3 = 3072 bytes for the image.
offset += labels_number
img = (np.frombuffer(data, dtype=np.uint8, count=3072, offset=offset)
.reshape((3, _CIFAR_IMAGE_SIZE, _CIFAR_IMAGE_SIZE))
.transpose((1, 2, 0))
)
offset += 3072
yield labels, img
|
python
|
def _load_data(path, labels_number=1):
"""Yields (labels, np_image) tuples."""
with tf.io.gfile.GFile(path, "rb") as f:
data = f.read()
offset = 0
max_offset = len(data) - 1
while offset < max_offset:
labels = np.frombuffer(data, dtype=np.uint8, count=labels_number,
offset=offset).reshape((labels_number,))
# 1 byte per label, 1024 * 3 = 3072 bytes for the image.
offset += labels_number
img = (np.frombuffer(data, dtype=np.uint8, count=3072, offset=offset)
.reshape((3, _CIFAR_IMAGE_SIZE, _CIFAR_IMAGE_SIZE))
.transpose((1, 2, 0))
)
offset += 3072
yield labels, img
|
[
"def",
"_load_data",
"(",
"path",
",",
"labels_number",
"=",
"1",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"offset",
"=",
"0",
"max_offset",
"=",
"len",
"(",
"data",
")",
"-",
"1",
"while",
"offset",
"<",
"max_offset",
":",
"labels",
"=",
"np",
".",
"frombuffer",
"(",
"data",
",",
"dtype",
"=",
"np",
".",
"uint8",
",",
"count",
"=",
"labels_number",
",",
"offset",
"=",
"offset",
")",
".",
"reshape",
"(",
"(",
"labels_number",
",",
")",
")",
"# 1 byte per label, 1024 * 3 = 3072 bytes for the image.",
"offset",
"+=",
"labels_number",
"img",
"=",
"(",
"np",
".",
"frombuffer",
"(",
"data",
",",
"dtype",
"=",
"np",
".",
"uint8",
",",
"count",
"=",
"3072",
",",
"offset",
"=",
"offset",
")",
".",
"reshape",
"(",
"(",
"3",
",",
"_CIFAR_IMAGE_SIZE",
",",
"_CIFAR_IMAGE_SIZE",
")",
")",
".",
"transpose",
"(",
"(",
"1",
",",
"2",
",",
"0",
")",
")",
")",
"offset",
"+=",
"3072",
"yield",
"labels",
",",
"img"
] |
Yields (labels, np_image) tuples.
|
[
"Yields",
"(",
"labels",
"np_image",
")",
"tuples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar.py#L191-L207
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/cifar.py
|
Cifar10._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cifar_path = dl_manager.download_and_extract(self._cifar_info.url)
cifar_info = self._cifar_info
cifar_path = os.path.join(cifar_path, cifar_info.prefix)
# Load the label names
for label_key, label_file in zip(cifar_info.label_keys,
cifar_info.label_files):
labels_path = os.path.join(cifar_path, label_file)
with tf.io.gfile.GFile(labels_path) as label_f:
label_names = [name for name in label_f.read().split("\n") if name]
self.info.features[label_key].names = label_names
# Define the splits
def gen_filenames(filenames):
for f in filenames:
yield os.path.join(cifar_path, f)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs={"filepaths": gen_filenames(cifar_info.train_files)}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={"filepaths": gen_filenames(cifar_info.test_files)}),
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cifar_path = dl_manager.download_and_extract(self._cifar_info.url)
cifar_info = self._cifar_info
cifar_path = os.path.join(cifar_path, cifar_info.prefix)
# Load the label names
for label_key, label_file in zip(cifar_info.label_keys,
cifar_info.label_files):
labels_path = os.path.join(cifar_path, label_file)
with tf.io.gfile.GFile(labels_path) as label_f:
label_names = [name for name in label_f.read().split("\n") if name]
self.info.features[label_key].names = label_names
# Define the splits
def gen_filenames(filenames):
for f in filenames:
yield os.path.join(cifar_path, f)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs={"filepaths": gen_filenames(cifar_info.train_files)}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={"filepaths": gen_filenames(cifar_info.test_files)}),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"cifar_path",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"self",
".",
"_cifar_info",
".",
"url",
")",
"cifar_info",
"=",
"self",
".",
"_cifar_info",
"cifar_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cifar_path",
",",
"cifar_info",
".",
"prefix",
")",
"# Load the label names",
"for",
"label_key",
",",
"label_file",
"in",
"zip",
"(",
"cifar_info",
".",
"label_keys",
",",
"cifar_info",
".",
"label_files",
")",
":",
"labels_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cifar_path",
",",
"label_file",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_path",
")",
"as",
"label_f",
":",
"label_names",
"=",
"[",
"name",
"for",
"name",
"in",
"label_f",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"name",
"]",
"self",
".",
"info",
".",
"features",
"[",
"label_key",
"]",
".",
"names",
"=",
"label_names",
"# Define the splits",
"def",
"gen_filenames",
"(",
"filenames",
")",
":",
"for",
"f",
"in",
"filenames",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"cifar_path",
",",
"f",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"{",
"\"filepaths\"",
":",
"gen_filenames",
"(",
"cifar_info",
".",
"train_files",
")",
"}",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TEST",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"{",
"\"filepaths\"",
":",
"gen_filenames",
"(",
"cifar_info",
".",
"test_files",
")",
"}",
")",
",",
"]"
] |
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar.py#L79-L108
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/cifar.py
|
Cifar10._generate_examples
|
def _generate_examples(self, filepaths):
"""Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
"""
label_keys = self._cifar_info.label_keys
for path in filepaths:
for labels, np_image in _load_data(path, len(label_keys)):
row = dict(zip(label_keys, labels))
row["image"] = np_image
yield row
|
python
|
def _generate_examples(self, filepaths):
"""Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
"""
label_keys = self._cifar_info.label_keys
for path in filepaths:
for labels, np_image in _load_data(path, len(label_keys)):
row = dict(zip(label_keys, labels))
row["image"] = np_image
yield row
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"filepaths",
")",
":",
"label_keys",
"=",
"self",
".",
"_cifar_info",
".",
"label_keys",
"for",
"path",
"in",
"filepaths",
":",
"for",
"labels",
",",
"np_image",
"in",
"_load_data",
"(",
"path",
",",
"len",
"(",
"label_keys",
")",
")",
":",
"row",
"=",
"dict",
"(",
"zip",
"(",
"label_keys",
",",
"labels",
")",
")",
"row",
"[",
"\"image\"",
"]",
"=",
"np_image",
"yield",
"row"
] |
Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
|
[
"Generate",
"CIFAR",
"examples",
"as",
"dicts",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar.py#L110-L127
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/api_utils.py
|
disallow_positional_args
|
def disallow_positional_args(wrapped=None, allowed=None):
"""Requires function to be called using keyword arguments."""
# See
# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments
# for decorator pattern.
if wrapped is None:
return functools.partial(disallow_positional_args, allowed=allowed)
@wrapt.decorator
def disallow_positional_args_dec(fn, instance, args, kwargs):
ismethod = instance is not None
_check_no_positional(fn, args, ismethod, allowed=allowed)
_check_required(fn, kwargs)
return fn(*args, **kwargs)
return disallow_positional_args_dec(wrapped)
|
python
|
def disallow_positional_args(wrapped=None, allowed=None):
"""Requires function to be called using keyword arguments."""
# See
# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments
# for decorator pattern.
if wrapped is None:
return functools.partial(disallow_positional_args, allowed=allowed)
@wrapt.decorator
def disallow_positional_args_dec(fn, instance, args, kwargs):
ismethod = instance is not None
_check_no_positional(fn, args, ismethod, allowed=allowed)
_check_required(fn, kwargs)
return fn(*args, **kwargs)
return disallow_positional_args_dec(wrapped)
|
[
"def",
"disallow_positional_args",
"(",
"wrapped",
"=",
"None",
",",
"allowed",
"=",
"None",
")",
":",
"# See",
"# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments",
"# for decorator pattern.",
"if",
"wrapped",
"is",
"None",
":",
"return",
"functools",
".",
"partial",
"(",
"disallow_positional_args",
",",
"allowed",
"=",
"allowed",
")",
"@",
"wrapt",
".",
"decorator",
"def",
"disallow_positional_args_dec",
"(",
"fn",
",",
"instance",
",",
"args",
",",
"kwargs",
")",
":",
"ismethod",
"=",
"instance",
"is",
"not",
"None",
"_check_no_positional",
"(",
"fn",
",",
"args",
",",
"ismethod",
",",
"allowed",
"=",
"allowed",
")",
"_check_required",
"(",
"fn",
",",
"kwargs",
")",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"disallow_positional_args_dec",
"(",
"wrapped",
")"
] |
Requires function to be called using keyword arguments.
|
[
"Requires",
"function",
"to",
"be",
"called",
"using",
"keyword",
"arguments",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/api_utils.py#L39-L54
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/api_utils.py
|
_required_args
|
def _required_args(fn):
"""Returns arguments of fn with default=REQUIRED_ARG."""
spec = getargspec(fn)
if not spec.defaults:
return []
arg_names = spec.args[-len(spec.defaults):]
return [name for name, val in zip(arg_names, spec.defaults)
if val is REQUIRED_ARG]
|
python
|
def _required_args(fn):
"""Returns arguments of fn with default=REQUIRED_ARG."""
spec = getargspec(fn)
if not spec.defaults:
return []
arg_names = spec.args[-len(spec.defaults):]
return [name for name, val in zip(arg_names, spec.defaults)
if val is REQUIRED_ARG]
|
[
"def",
"_required_args",
"(",
"fn",
")",
":",
"spec",
"=",
"getargspec",
"(",
"fn",
")",
"if",
"not",
"spec",
".",
"defaults",
":",
"return",
"[",
"]",
"arg_names",
"=",
"spec",
".",
"args",
"[",
"-",
"len",
"(",
"spec",
".",
"defaults",
")",
":",
"]",
"return",
"[",
"name",
"for",
"name",
",",
"val",
"in",
"zip",
"(",
"arg_names",
",",
"spec",
".",
"defaults",
")",
"if",
"val",
"is",
"REQUIRED_ARG",
"]"
] |
Returns arguments of fn with default=REQUIRED_ARG.
|
[
"Returns",
"arguments",
"of",
"fn",
"with",
"default",
"=",
"REQUIRED_ARG",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/api_utils.py#L67-L75
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/gcs_utils.py
|
download_gcs_file
|
def download_gcs_file(path, out_fname=None, prefix_filter=None):
"""Download a file from GCS, optionally to a file."""
url = posixpath.join(GCS_BUCKET, path)
if prefix_filter:
url += "?prefix=%s" % prefix_filter
stream = bool(out_fname)
resp = requests.get(url, stream=stream)
if not resp.ok:
raise ValueError("GCS bucket inaccessible")
if out_fname:
with tf.io.gfile.GFile(out_fname, "wb") as f:
for chunk in resp.iter_content(1024):
f.write(chunk)
else:
return resp.content
|
python
|
def download_gcs_file(path, out_fname=None, prefix_filter=None):
"""Download a file from GCS, optionally to a file."""
url = posixpath.join(GCS_BUCKET, path)
if prefix_filter:
url += "?prefix=%s" % prefix_filter
stream = bool(out_fname)
resp = requests.get(url, stream=stream)
if not resp.ok:
raise ValueError("GCS bucket inaccessible")
if out_fname:
with tf.io.gfile.GFile(out_fname, "wb") as f:
for chunk in resp.iter_content(1024):
f.write(chunk)
else:
return resp.content
|
[
"def",
"download_gcs_file",
"(",
"path",
",",
"out_fname",
"=",
"None",
",",
"prefix_filter",
"=",
"None",
")",
":",
"url",
"=",
"posixpath",
".",
"join",
"(",
"GCS_BUCKET",
",",
"path",
")",
"if",
"prefix_filter",
":",
"url",
"+=",
"\"?prefix=%s\"",
"%",
"prefix_filter",
"stream",
"=",
"bool",
"(",
"out_fname",
")",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"stream",
")",
"if",
"not",
"resp",
".",
"ok",
":",
"raise",
"ValueError",
"(",
"\"GCS bucket inaccessible\"",
")",
"if",
"out_fname",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"out_fname",
",",
"\"wb\"",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"resp",
".",
"iter_content",
"(",
"1024",
")",
":",
"f",
".",
"write",
"(",
"chunk",
")",
"else",
":",
"return",
"resp",
".",
"content"
] |
Download a file from GCS, optionally to a file.
|
[
"Download",
"a",
"file",
"from",
"GCS",
"optionally",
"to",
"a",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L34-L48
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/gcs_utils.py
|
gcs_files
|
def gcs_files(prefix_filter=None):
"""List all files in GCS bucket."""
top_level_xml_str = download_gcs_file("", prefix_filter=prefix_filter)
xml_root = ElementTree.fromstring(top_level_xml_str)
filenames = [el[0].text for el in xml_root if el.tag.endswith("Contents")]
return filenames
|
python
|
def gcs_files(prefix_filter=None):
"""List all files in GCS bucket."""
top_level_xml_str = download_gcs_file("", prefix_filter=prefix_filter)
xml_root = ElementTree.fromstring(top_level_xml_str)
filenames = [el[0].text for el in xml_root if el.tag.endswith("Contents")]
return filenames
|
[
"def",
"gcs_files",
"(",
"prefix_filter",
"=",
"None",
")",
":",
"top_level_xml_str",
"=",
"download_gcs_file",
"(",
"\"\"",
",",
"prefix_filter",
"=",
"prefix_filter",
")",
"xml_root",
"=",
"ElementTree",
".",
"fromstring",
"(",
"top_level_xml_str",
")",
"filenames",
"=",
"[",
"el",
"[",
"0",
"]",
".",
"text",
"for",
"el",
"in",
"xml_root",
"if",
"el",
".",
"tag",
".",
"endswith",
"(",
"\"Contents\"",
")",
"]",
"return",
"filenames"
] |
List all files in GCS bucket.
|
[
"List",
"all",
"files",
"in",
"GCS",
"bucket",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L52-L57
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/gcs_utils.py
|
gcs_dataset_info_files
|
def gcs_dataset_info_files(dataset_dir):
"""Return paths to GCS files in the given dataset directory."""
prefix = posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir, "")
# Filter for this dataset
filenames = [el for el in gcs_files(prefix_filter=prefix)
if el.startswith(prefix) and len(el) > len(prefix)]
return filenames
|
python
|
def gcs_dataset_info_files(dataset_dir):
"""Return paths to GCS files in the given dataset directory."""
prefix = posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir, "")
# Filter for this dataset
filenames = [el for el in gcs_files(prefix_filter=prefix)
if el.startswith(prefix) and len(el) > len(prefix)]
return filenames
|
[
"def",
"gcs_dataset_info_files",
"(",
"dataset_dir",
")",
":",
"prefix",
"=",
"posixpath",
".",
"join",
"(",
"GCS_DATASET_INFO_DIR",
",",
"dataset_dir",
",",
"\"\"",
")",
"# Filter for this dataset",
"filenames",
"=",
"[",
"el",
"for",
"el",
"in",
"gcs_files",
"(",
"prefix_filter",
"=",
"prefix",
")",
"if",
"el",
".",
"startswith",
"(",
"prefix",
")",
"and",
"len",
"(",
"el",
")",
">",
"len",
"(",
"prefix",
")",
"]",
"return",
"filenames"
] |
Return paths to GCS files in the given dataset directory.
|
[
"Return",
"paths",
"to",
"GCS",
"files",
"in",
"the",
"given",
"dataset",
"directory",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L60-L66
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/utils/gcs_utils.py
|
is_dataset_on_gcs
|
def is_dataset_on_gcs(dataset_name):
"""If the dataset is available on the GCS bucket gs://tfds-data/datasets."""
dir_name = posixpath.join(GCS_DATASETS_DIR, dataset_name)
return len(gcs_files(prefix_filter=dir_name)) > 2
|
python
|
def is_dataset_on_gcs(dataset_name):
"""If the dataset is available on the GCS bucket gs://tfds-data/datasets."""
dir_name = posixpath.join(GCS_DATASETS_DIR, dataset_name)
return len(gcs_files(prefix_filter=dir_name)) > 2
|
[
"def",
"is_dataset_on_gcs",
"(",
"dataset_name",
")",
":",
"dir_name",
"=",
"posixpath",
".",
"join",
"(",
"GCS_DATASETS_DIR",
",",
"dataset_name",
")",
"return",
"len",
"(",
"gcs_files",
"(",
"prefix_filter",
"=",
"dir_name",
")",
")",
">",
"2"
] |
If the dataset is available on the GCS bucket gs://tfds-data/datasets.
|
[
"If",
"the",
"dataset",
"is",
"available",
"on",
"the",
"GCS",
"bucket",
"gs",
":",
"//",
"tfds",
"-",
"data",
"/",
"datasets",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L69-L72
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/kaggle.py
|
_run_kaggle_command
|
def _run_kaggle_command(command_args, competition_name):
"""Run kaggle command with subprocess."""
try:
output = sp.check_output(command_args)
return tf.compat.as_text(output)
except sp.CalledProcessError as err:
output = err.output
_log_command_output(output, error=True)
if output.startswith(b"404"):
logging.error(_NOT_FOUND_ERR_MSG, competition_name)
raise
logging.error(_ERR_MSG, competition_name)
raise
|
python
|
def _run_kaggle_command(command_args, competition_name):
"""Run kaggle command with subprocess."""
try:
output = sp.check_output(command_args)
return tf.compat.as_text(output)
except sp.CalledProcessError as err:
output = err.output
_log_command_output(output, error=True)
if output.startswith(b"404"):
logging.error(_NOT_FOUND_ERR_MSG, competition_name)
raise
logging.error(_ERR_MSG, competition_name)
raise
|
[
"def",
"_run_kaggle_command",
"(",
"command_args",
",",
"competition_name",
")",
":",
"try",
":",
"output",
"=",
"sp",
".",
"check_output",
"(",
"command_args",
")",
"return",
"tf",
".",
"compat",
".",
"as_text",
"(",
"output",
")",
"except",
"sp",
".",
"CalledProcessError",
"as",
"err",
":",
"output",
"=",
"err",
".",
"output",
"_log_command_output",
"(",
"output",
",",
"error",
"=",
"True",
")",
"if",
"output",
".",
"startswith",
"(",
"b\"404\"",
")",
":",
"logging",
".",
"error",
"(",
"_NOT_FOUND_ERR_MSG",
",",
"competition_name",
")",
"raise",
"logging",
".",
"error",
"(",
"_ERR_MSG",
",",
"competition_name",
")",
"raise"
] |
Run kaggle command with subprocess.
|
[
"Run",
"kaggle",
"command",
"with",
"subprocess",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L138-L150
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/kaggle.py
|
KaggleCompetitionDownloader.competition_files
|
def competition_files(self):
"""List of competition files."""
command = [
"kaggle",
"datasets" if "/" in self._competition_name else "competitions",
"files",
"-v",
self._competition_name,
]
output = _run_kaggle_command(command, self._competition_name)
return sorted([
line.split(",")[0] for line in output.split("\n")[1:] if line
])
|
python
|
def competition_files(self):
"""List of competition files."""
command = [
"kaggle",
"datasets" if "/" in self._competition_name else "competitions",
"files",
"-v",
self._competition_name,
]
output = _run_kaggle_command(command, self._competition_name)
return sorted([
line.split(",")[0] for line in output.split("\n")[1:] if line
])
|
[
"def",
"competition_files",
"(",
"self",
")",
":",
"command",
"=",
"[",
"\"kaggle\"",
",",
"\"datasets\"",
"if",
"\"/\"",
"in",
"self",
".",
"_competition_name",
"else",
"\"competitions\"",
",",
"\"files\"",
",",
"\"-v\"",
",",
"self",
".",
"_competition_name",
",",
"]",
"output",
"=",
"_run_kaggle_command",
"(",
"command",
",",
"self",
".",
"_competition_name",
")",
"return",
"sorted",
"(",
"[",
"line",
".",
"split",
"(",
"\",\"",
")",
"[",
"0",
"]",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"1",
":",
"]",
"if",
"line",
"]",
")"
] |
List of competition files.
|
[
"List",
"of",
"competition",
"files",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L96-L108
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/kaggle.py
|
KaggleCompetitionDownloader.competition_urls
|
def competition_urls(self):
"""Returns 'kaggle://' urls."""
return [
KaggleFile(self._competition_name, fname).to_url()
for fname in self.competition_files # pylint: disable=not-an-iterable
]
|
python
|
def competition_urls(self):
"""Returns 'kaggle://' urls."""
return [
KaggleFile(self._competition_name, fname).to_url()
for fname in self.competition_files # pylint: disable=not-an-iterable
]
|
[
"def",
"competition_urls",
"(",
"self",
")",
":",
"return",
"[",
"KaggleFile",
"(",
"self",
".",
"_competition_name",
",",
"fname",
")",
".",
"to_url",
"(",
")",
"for",
"fname",
"in",
"self",
".",
"competition_files",
"# pylint: disable=not-an-iterable",
"]"
] |
Returns 'kaggle://' urls.
|
[
"Returns",
"kaggle",
":",
"//",
"urls",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L111-L116
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/kaggle.py
|
KaggleCompetitionDownloader.download_file
|
def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname)
|
python
|
def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname)
|
[
"def",
"download_file",
"(",
"self",
",",
"fname",
",",
"output_dir",
")",
":",
"if",
"fname",
"not",
"in",
"self",
".",
"competition_files",
":",
"# pylint: disable=unsupported-membership-test",
"raise",
"ValueError",
"(",
"\"%s is not one of the competition's \"",
"\"files: %s\"",
"%",
"(",
"fname",
",",
"self",
".",
"competition_files",
")",
")",
"command",
"=",
"[",
"\"kaggle\"",
",",
"\"competitions\"",
",",
"\"download\"",
",",
"\"--file\"",
",",
"fname",
",",
"\"--path\"",
",",
"output_dir",
",",
"\"-c\"",
",",
"self",
".",
"_competition_name",
",",
"]",
"_run_kaggle_command",
"(",
"command",
",",
"self",
".",
"_competition_name",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"fname",
")"
] |
Downloads competition file to output_dir.
|
[
"Downloads",
"competition",
"file",
"to",
"output_dir",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L118-L135
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/flowers.py
|
TFFlowers._generate_examples
|
def _generate_examples(self, images_dir_path):
"""Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
"""
parent_dir = tf.io.gfile.listdir(images_dir_path)[0]
walk_dir = os.path.join(images_dir_path, parent_dir)
dirs = tf.io.gfile.listdir(walk_dir)
for d in dirs:
if tf.io.gfile.isdir(os.path.join(walk_dir, d)):
for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)):
for image_file in fname:
if image_file.endswith(".jpg"):
image_path = os.path.join(full_path, image_file)
yield {
"image": image_path,
"label": d.lower(),
}
|
python
|
def _generate_examples(self, images_dir_path):
"""Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
"""
parent_dir = tf.io.gfile.listdir(images_dir_path)[0]
walk_dir = os.path.join(images_dir_path, parent_dir)
dirs = tf.io.gfile.listdir(walk_dir)
for d in dirs:
if tf.io.gfile.isdir(os.path.join(walk_dir, d)):
for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)):
for image_file in fname:
if image_file.endswith(".jpg"):
image_path = os.path.join(full_path, image_file)
yield {
"image": image_path,
"label": d.lower(),
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"images_dir_path",
")",
":",
"parent_dir",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"images_dir_path",
")",
"[",
"0",
"]",
"walk_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"images_dir_path",
",",
"parent_dir",
")",
"dirs",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"walk_dir",
")",
"for",
"d",
"in",
"dirs",
":",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"walk_dir",
",",
"d",
")",
")",
":",
"for",
"full_path",
",",
"_",
",",
"fname",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"walk",
"(",
"os",
".",
"path",
".",
"join",
"(",
"walk_dir",
",",
"d",
")",
")",
":",
"for",
"image_file",
"in",
"fname",
":",
"if",
"image_file",
".",
"endswith",
"(",
"\".jpg\"",
")",
":",
"image_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"full_path",
",",
"image_file",
")",
"yield",
"{",
"\"image\"",
":",
"image_path",
",",
"\"label\"",
":",
"d",
".",
"lower",
"(",
")",
",",
"}"
] |
Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
|
[
"Generate",
"flower",
"images",
"and",
"labels",
"given",
"the",
"image",
"directory",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/flowers.py#L71-L93
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/checksums.py
|
_checksum_paths
|
def _checksum_paths():
"""Returns dict {'dataset_name': 'path/to/checksums/file'}."""
dataset2path = {}
for dir_path in _CHECKSUM_DIRS:
for fname in _list_dir(dir_path):
if not fname.endswith(_CHECKSUM_SUFFIX):
continue
fpath = os.path.join(dir_path, fname)
dataset_name = fname[:-len(_CHECKSUM_SUFFIX)]
dataset2path[dataset_name] = fpath
return dataset2path
|
python
|
def _checksum_paths():
"""Returns dict {'dataset_name': 'path/to/checksums/file'}."""
dataset2path = {}
for dir_path in _CHECKSUM_DIRS:
for fname in _list_dir(dir_path):
if not fname.endswith(_CHECKSUM_SUFFIX):
continue
fpath = os.path.join(dir_path, fname)
dataset_name = fname[:-len(_CHECKSUM_SUFFIX)]
dataset2path[dataset_name] = fpath
return dataset2path
|
[
"def",
"_checksum_paths",
"(",
")",
":",
"dataset2path",
"=",
"{",
"}",
"for",
"dir_path",
"in",
"_CHECKSUM_DIRS",
":",
"for",
"fname",
"in",
"_list_dir",
"(",
"dir_path",
")",
":",
"if",
"not",
"fname",
".",
"endswith",
"(",
"_CHECKSUM_SUFFIX",
")",
":",
"continue",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"fname",
")",
"dataset_name",
"=",
"fname",
"[",
":",
"-",
"len",
"(",
"_CHECKSUM_SUFFIX",
")",
"]",
"dataset2path",
"[",
"dataset_name",
"]",
"=",
"fpath",
"return",
"dataset2path"
] |
Returns dict {'dataset_name': 'path/to/checksums/file'}.
|
[
"Returns",
"dict",
"{",
"dataset_name",
":",
"path",
"/",
"to",
"/",
"checksums",
"/",
"file",
"}",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L46-L56
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/checksums.py
|
_get_path
|
def _get_path(dataset_name):
"""Returns path to where checksums are stored for a given dataset."""
path = _checksum_paths().get(dataset_name, None)
if path:
return path
msg = ('No checksums file could be find for dataset %s. Please create one in '
'one of: %s') % (dataset_name, ', '.join(_CHECKSUM_DIRS))
raise AssertionError(msg)
|
python
|
def _get_path(dataset_name):
"""Returns path to where checksums are stored for a given dataset."""
path = _checksum_paths().get(dataset_name, None)
if path:
return path
msg = ('No checksums file could be find for dataset %s. Please create one in '
'one of: %s') % (dataset_name, ', '.join(_CHECKSUM_DIRS))
raise AssertionError(msg)
|
[
"def",
"_get_path",
"(",
"dataset_name",
")",
":",
"path",
"=",
"_checksum_paths",
"(",
")",
".",
"get",
"(",
"dataset_name",
",",
"None",
")",
"if",
"path",
":",
"return",
"path",
"msg",
"=",
"(",
"'No checksums file could be find for dataset %s. Please create one in '",
"'one of: %s'",
")",
"%",
"(",
"dataset_name",
",",
"', '",
".",
"join",
"(",
"_CHECKSUM_DIRS",
")",
")",
"raise",
"AssertionError",
"(",
"msg",
")"
] |
Returns path to where checksums are stored for a given dataset.
|
[
"Returns",
"path",
"to",
"where",
"checksums",
"are",
"stored",
"for",
"a",
"given",
"dataset",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L59-L66
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/checksums.py
|
_get_sizes_checksums
|
def _get_sizes_checksums(checksums_path):
"""Returns {URL: (size, checksum)}s stored within file."""
checksums = {}
for line in _read_file(checksums_path).split('\n'):
if not line:
continue
# URL might have spaces inside, but size and checksum will not.
url, size, checksum = line.rsplit(' ', 2)
checksums[url] = (int(size), checksum)
return checksums
|
python
|
def _get_sizes_checksums(checksums_path):
"""Returns {URL: (size, checksum)}s stored within file."""
checksums = {}
for line in _read_file(checksums_path).split('\n'):
if not line:
continue
# URL might have spaces inside, but size and checksum will not.
url, size, checksum = line.rsplit(' ', 2)
checksums[url] = (int(size), checksum)
return checksums
|
[
"def",
"_get_sizes_checksums",
"(",
"checksums_path",
")",
":",
"checksums",
"=",
"{",
"}",
"for",
"line",
"in",
"_read_file",
"(",
"checksums_path",
")",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"not",
"line",
":",
"continue",
"# URL might have spaces inside, but size and checksum will not.",
"url",
",",
"size",
",",
"checksum",
"=",
"line",
".",
"rsplit",
"(",
"' '",
",",
"2",
")",
"checksums",
"[",
"url",
"]",
"=",
"(",
"int",
"(",
"size",
")",
",",
"checksum",
")",
"return",
"checksums"
] |
Returns {URL: (size, checksum)}s stored within file.
|
[
"Returns",
"{",
"URL",
":",
"(",
"size",
"checksum",
")",
"}",
"s",
"stored",
"within",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L75-L84
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/checksums.py
|
get_all_sizes_checksums
|
def get_all_sizes_checksums():
"""Returns dict associating URL to (size, sha256)."""
sizes_checksums = {}
for path in _checksum_paths().values():
data = _get_sizes_checksums(path)
for url, size_checksum in data.items():
if (url in sizes_checksums and
sizes_checksums[url] != size_checksum):
raise AssertionError(
'URL %s is registered with 2+ distinct size/checksum tuples.' % url)
sizes_checksums.update(data)
return sizes_checksums
|
python
|
def get_all_sizes_checksums():
"""Returns dict associating URL to (size, sha256)."""
sizes_checksums = {}
for path in _checksum_paths().values():
data = _get_sizes_checksums(path)
for url, size_checksum in data.items():
if (url in sizes_checksums and
sizes_checksums[url] != size_checksum):
raise AssertionError(
'URL %s is registered with 2+ distinct size/checksum tuples.' % url)
sizes_checksums.update(data)
return sizes_checksums
|
[
"def",
"get_all_sizes_checksums",
"(",
")",
":",
"sizes_checksums",
"=",
"{",
"}",
"for",
"path",
"in",
"_checksum_paths",
"(",
")",
".",
"values",
"(",
")",
":",
"data",
"=",
"_get_sizes_checksums",
"(",
"path",
")",
"for",
"url",
",",
"size_checksum",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"(",
"url",
"in",
"sizes_checksums",
"and",
"sizes_checksums",
"[",
"url",
"]",
"!=",
"size_checksum",
")",
":",
"raise",
"AssertionError",
"(",
"'URL %s is registered with 2+ distinct size/checksum tuples.'",
"%",
"url",
")",
"sizes_checksums",
".",
"update",
"(",
"data",
")",
"return",
"sizes_checksums"
] |
Returns dict associating URL to (size, sha256).
|
[
"Returns",
"dict",
"associating",
"URL",
"to",
"(",
"size",
"sha256",
")",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L88-L99
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/checksums.py
|
store_checksums
|
def store_checksums(dataset_name, sizes_checksums):
"""Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
dataset_name: string.
sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
"""
path = _get_path(dataset_name)
original_data = _get_sizes_checksums(path)
new_data = original_data.copy()
new_data.update(sizes_checksums)
if original_data == new_data:
return
with tf.io.gfile.GFile(path, 'w') as f:
for url, (size, checksum) in sorted(new_data.items()):
f.write('%s %s %s\n' % (url, size, checksum))
|
python
|
def store_checksums(dataset_name, sizes_checksums):
"""Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
dataset_name: string.
sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
"""
path = _get_path(dataset_name)
original_data = _get_sizes_checksums(path)
new_data = original_data.copy()
new_data.update(sizes_checksums)
if original_data == new_data:
return
with tf.io.gfile.GFile(path, 'w') as f:
for url, (size, checksum) in sorted(new_data.items()):
f.write('%s %s %s\n' % (url, size, checksum))
|
[
"def",
"store_checksums",
"(",
"dataset_name",
",",
"sizes_checksums",
")",
":",
"path",
"=",
"_get_path",
"(",
"dataset_name",
")",
"original_data",
"=",
"_get_sizes_checksums",
"(",
"path",
")",
"new_data",
"=",
"original_data",
".",
"copy",
"(",
")",
"new_data",
".",
"update",
"(",
"sizes_checksums",
")",
"if",
"original_data",
"==",
"new_data",
":",
"return",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"url",
",",
"(",
"size",
",",
"checksum",
")",
"in",
"sorted",
"(",
"new_data",
".",
"items",
"(",
")",
")",
":",
"f",
".",
"write",
"(",
"'%s %s %s\\n'",
"%",
"(",
"url",
",",
"size",
",",
"checksum",
")",
")"
] |
Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
dataset_name: string.
sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
|
[
"Store",
"given",
"checksums",
"and",
"sizes",
"for",
"specific",
"dataset",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L102-L127
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/resource.py
|
_guess_extract_method
|
def _guess_extract_method(fname):
"""Guess extraction method, given file name (or path)."""
for method, extensions in _EXTRACTION_METHOD_TO_EXTS:
for ext in extensions:
if fname.endswith(ext):
return method
return ExtractMethod.NO_EXTRACT
|
python
|
def _guess_extract_method(fname):
"""Guess extraction method, given file name (or path)."""
for method, extensions in _EXTRACTION_METHOD_TO_EXTS:
for ext in extensions:
if fname.endswith(ext):
return method
return ExtractMethod.NO_EXTRACT
|
[
"def",
"_guess_extract_method",
"(",
"fname",
")",
":",
"for",
"method",
",",
"extensions",
"in",
"_EXTRACTION_METHOD_TO_EXTS",
":",
"for",
"ext",
"in",
"extensions",
":",
"if",
"fname",
".",
"endswith",
"(",
"ext",
")",
":",
"return",
"method",
"return",
"ExtractMethod",
".",
"NO_EXTRACT"
] |
Guess extraction method, given file name (or path).
|
[
"Guess",
"extraction",
"method",
"given",
"file",
"name",
"(",
"or",
"path",
")",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L93-L99
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/resource.py
|
_sanitize_url
|
def _sanitize_url(url, max_length):
"""Sanitize and shorten url to fit in max_length.
Function is stable: same input MUST ALWAYS give same result, accros changes
in code as well. Different URLs might give same result.
As much as possible, the extension should be kept.
Heuristics are applied to only keep useful info from url.
1- Drop generic [sub]domains.
'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...'
'storage.googleapis.com/foo/...' -> 'foo/...'
'drive.google.com/bar/...' -> 'bar/...'
'github.com/baz/...' -> 'baz/...'
2- Remove leading '0's from url components:
'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords'
3- Truncate each component of url until total size fits or each component is
left with 4 chars (or total size is <= limit):
'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords'
(here truncate components to 4 chars per component max)
-> 'Move_64x6_png/trai-4-of-10.tfrecords'
4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def'
Args:
url: string, url to sanitize and shorten.
max_length: int, max length of result.
Returns:
(string, string): sanitized and shorted url, file extension.
"""
url = urllib.parse.urlparse(url)
netloc = url.netloc
for prefix in _NETLOC_COMMON_PREFIXES:
if netloc.startswith(prefix):
netloc = netloc[len(prefix):]
for suffix in _NETLOC_COMMON_SUFFIXES:
if netloc.endswith(suffix):
netloc = netloc[:-len(suffix)]
url = '%s%s%s%s' % (netloc, url.path, url.params, url.query)
# Get the extension:
for ext in _KNOWN_EXTENSIONS:
if url.endswith(ext):
extension = ext
url = url[:-len(extension)]
break
else:
url, extension = os.path.splitext(url)
max_length -= len(extension)
# Replace non authorized chars (including '/') by '_':
url = re.sub(r'[^a-zA-Z0-9\.\-_]+', '_', url)
# Remove parts with no info:
for common_part in _URL_COMMON_PARTS:
url = url.replace(common_part, '_')
url = url.strip('_')
# Remove leading zeros in groups of numbers:
url = re.sub('(?<![0-9])0+(?=[0-9])', '', url)
# Decrease max size of URL components:
c_size = max(len(c) for c in re.split(r'[\.\-_]', url))
while c_size > 4 and len(url) > max_length:
c_size -= 1
url = re.sub(r'[^\.\-_]{4,}', lambda match: match.group(0)[:c_size], url)
return url[:max_length], extension
|
python
|
def _sanitize_url(url, max_length):
"""Sanitize and shorten url to fit in max_length.
Function is stable: same input MUST ALWAYS give same result, accros changes
in code as well. Different URLs might give same result.
As much as possible, the extension should be kept.
Heuristics are applied to only keep useful info from url.
1- Drop generic [sub]domains.
'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...'
'storage.googleapis.com/foo/...' -> 'foo/...'
'drive.google.com/bar/...' -> 'bar/...'
'github.com/baz/...' -> 'baz/...'
2- Remove leading '0's from url components:
'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords'
3- Truncate each component of url until total size fits or each component is
left with 4 chars (or total size is <= limit):
'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords'
(here truncate components to 4 chars per component max)
-> 'Move_64x6_png/trai-4-of-10.tfrecords'
4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def'
Args:
url: string, url to sanitize and shorten.
max_length: int, max length of result.
Returns:
(string, string): sanitized and shorted url, file extension.
"""
url = urllib.parse.urlparse(url)
netloc = url.netloc
for prefix in _NETLOC_COMMON_PREFIXES:
if netloc.startswith(prefix):
netloc = netloc[len(prefix):]
for suffix in _NETLOC_COMMON_SUFFIXES:
if netloc.endswith(suffix):
netloc = netloc[:-len(suffix)]
url = '%s%s%s%s' % (netloc, url.path, url.params, url.query)
# Get the extension:
for ext in _KNOWN_EXTENSIONS:
if url.endswith(ext):
extension = ext
url = url[:-len(extension)]
break
else:
url, extension = os.path.splitext(url)
max_length -= len(extension)
# Replace non authorized chars (including '/') by '_':
url = re.sub(r'[^a-zA-Z0-9\.\-_]+', '_', url)
# Remove parts with no info:
for common_part in _URL_COMMON_PARTS:
url = url.replace(common_part, '_')
url = url.strip('_')
# Remove leading zeros in groups of numbers:
url = re.sub('(?<![0-9])0+(?=[0-9])', '', url)
# Decrease max size of URL components:
c_size = max(len(c) for c in re.split(r'[\.\-_]', url))
while c_size > 4 and len(url) > max_length:
c_size -= 1
url = re.sub(r'[^\.\-_]{4,}', lambda match: match.group(0)[:c_size], url)
return url[:max_length], extension
|
[
"def",
"_sanitize_url",
"(",
"url",
",",
"max_length",
")",
":",
"url",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"netloc",
"=",
"url",
".",
"netloc",
"for",
"prefix",
"in",
"_NETLOC_COMMON_PREFIXES",
":",
"if",
"netloc",
".",
"startswith",
"(",
"prefix",
")",
":",
"netloc",
"=",
"netloc",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"for",
"suffix",
"in",
"_NETLOC_COMMON_SUFFIXES",
":",
"if",
"netloc",
".",
"endswith",
"(",
"suffix",
")",
":",
"netloc",
"=",
"netloc",
"[",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
"url",
"=",
"'%s%s%s%s'",
"%",
"(",
"netloc",
",",
"url",
".",
"path",
",",
"url",
".",
"params",
",",
"url",
".",
"query",
")",
"# Get the extension:",
"for",
"ext",
"in",
"_KNOWN_EXTENSIONS",
":",
"if",
"url",
".",
"endswith",
"(",
"ext",
")",
":",
"extension",
"=",
"ext",
"url",
"=",
"url",
"[",
":",
"-",
"len",
"(",
"extension",
")",
"]",
"break",
"else",
":",
"url",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"url",
")",
"max_length",
"-=",
"len",
"(",
"extension",
")",
"# Replace non authorized chars (including '/') by '_':",
"url",
"=",
"re",
".",
"sub",
"(",
"r'[^a-zA-Z0-9\\.\\-_]+'",
",",
"'_'",
",",
"url",
")",
"# Remove parts with no info:",
"for",
"common_part",
"in",
"_URL_COMMON_PARTS",
":",
"url",
"=",
"url",
".",
"replace",
"(",
"common_part",
",",
"'_'",
")",
"url",
"=",
"url",
".",
"strip",
"(",
"'_'",
")",
"# Remove leading zeros in groups of numbers:",
"url",
"=",
"re",
".",
"sub",
"(",
"'(?<![0-9])0+(?=[0-9])'",
",",
"''",
",",
"url",
")",
"# Decrease max size of URL components:",
"c_size",
"=",
"max",
"(",
"len",
"(",
"c",
")",
"for",
"c",
"in",
"re",
".",
"split",
"(",
"r'[\\.\\-_]'",
",",
"url",
")",
")",
"while",
"c_size",
">",
"4",
"and",
"len",
"(",
"url",
")",
">",
"max_length",
":",
"c_size",
"-=",
"1",
"url",
"=",
"re",
".",
"sub",
"(",
"r'[^\\.\\-_]{4,}'",
",",
"lambda",
"match",
":",
"match",
".",
"group",
"(",
"0",
")",
"[",
":",
"c_size",
"]",
",",
"url",
")",
"return",
"url",
"[",
":",
"max_length",
"]",
",",
"extension"
] |
Sanitize and shorten url to fit in max_length.
Function is stable: same input MUST ALWAYS give same result, accros changes
in code as well. Different URLs might give same result.
As much as possible, the extension should be kept.
Heuristics are applied to only keep useful info from url.
1- Drop generic [sub]domains.
'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...'
'storage.googleapis.com/foo/...' -> 'foo/...'
'drive.google.com/bar/...' -> 'bar/...'
'github.com/baz/...' -> 'baz/...'
2- Remove leading '0's from url components:
'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords'
3- Truncate each component of url until total size fits or each component is
left with 4 chars (or total size is <= limit):
'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords'
(here truncate components to 4 chars per component max)
-> 'Move_64x6_png/trai-4-of-10.tfrecords'
4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def'
Args:
url: string, url to sanitize and shorten.
max_length: int, max length of result.
Returns:
(string, string): sanitized and shorted url, file extension.
|
[
"Sanitize",
"and",
"shorten",
"url",
"to",
"fit",
"in",
"max_length",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L102-L166
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/resource.py
|
get_dl_fname
|
def get_dl_fname(url, checksum):
"""Returns name of file for (url, checksum).
The max length of linux and windows filenames is 255 chars.
Windows however expects short paths (260 chars), so we limit the file name
to an arbitrary 90 chars.
Naming pattern: '${url}${checksum}'.
- url: url sanitized and shortened to 46 chars.
- checksum: base64url encoded sha256: 44 chars (removing trailing '=').
Args:
url: `str`, url of the file.
checksum: `str` (hex), the sha256 hexdigest of file or url.
Returns:
string of 90 chars max.
"""
checksum = base64.urlsafe_b64encode(_decode_hex(checksum))
checksum = tf.compat.as_text(checksum)[:-1]
name, extension = _sanitize_url(url, max_length=46)
return '%s%s%s' % (name, checksum, extension)
|
python
|
def get_dl_fname(url, checksum):
"""Returns name of file for (url, checksum).
The max length of linux and windows filenames is 255 chars.
Windows however expects short paths (260 chars), so we limit the file name
to an arbitrary 90 chars.
Naming pattern: '${url}${checksum}'.
- url: url sanitized and shortened to 46 chars.
- checksum: base64url encoded sha256: 44 chars (removing trailing '=').
Args:
url: `str`, url of the file.
checksum: `str` (hex), the sha256 hexdigest of file or url.
Returns:
string of 90 chars max.
"""
checksum = base64.urlsafe_b64encode(_decode_hex(checksum))
checksum = tf.compat.as_text(checksum)[:-1]
name, extension = _sanitize_url(url, max_length=46)
return '%s%s%s' % (name, checksum, extension)
|
[
"def",
"get_dl_fname",
"(",
"url",
",",
"checksum",
")",
":",
"checksum",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"_decode_hex",
"(",
"checksum",
")",
")",
"checksum",
"=",
"tf",
".",
"compat",
".",
"as_text",
"(",
"checksum",
")",
"[",
":",
"-",
"1",
"]",
"name",
",",
"extension",
"=",
"_sanitize_url",
"(",
"url",
",",
"max_length",
"=",
"46",
")",
"return",
"'%s%s%s'",
"%",
"(",
"name",
",",
"checksum",
",",
"extension",
")"
] |
Returns name of file for (url, checksum).
The max length of linux and windows filenames is 255 chars.
Windows however expects short paths (260 chars), so we limit the file name
to an arbitrary 90 chars.
Naming pattern: '${url}${checksum}'.
- url: url sanitized and shortened to 46 chars.
- checksum: base64url encoded sha256: 44 chars (removing trailing '=').
Args:
url: `str`, url of the file.
checksum: `str` (hex), the sha256 hexdigest of file or url.
Returns:
string of 90 chars max.
|
[
"Returns",
"name",
"of",
"file",
"for",
"(",
"url",
"checksum",
")",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L169-L190
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/resource.py
|
get_dl_dirname
|
def get_dl_dirname(url):
"""Returns name of temp dir for given url."""
checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest()
return get_dl_fname(url, checksum)
|
python
|
def get_dl_dirname(url):
"""Returns name of temp dir for given url."""
checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest()
return get_dl_fname(url, checksum)
|
[
"def",
"get_dl_dirname",
"(",
"url",
")",
":",
"checksum",
"=",
"hashlib",
".",
"sha256",
"(",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"url",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"get_dl_fname",
"(",
"url",
",",
"checksum",
")"
] |
Returns name of temp dir for given url.
|
[
"Returns",
"name",
"of",
"temp",
"dir",
"for",
"given",
"url",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L193-L196
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/resource.py
|
_read_info
|
def _read_info(info_path):
"""Returns info dict or None."""
if not tf.io.gfile.exists(info_path):
return None
with tf.io.gfile.GFile(info_path) as info_f:
return json.load(info_f)
|
python
|
def _read_info(info_path):
"""Returns info dict or None."""
if not tf.io.gfile.exists(info_path):
return None
with tf.io.gfile.GFile(info_path) as info_f:
return json.load(info_f)
|
[
"def",
"_read_info",
"(",
"info_path",
")",
":",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"info_path",
")",
":",
"return",
"None",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"info_path",
")",
"as",
"info_f",
":",
"return",
"json",
".",
"load",
"(",
"info_f",
")"
] |
Returns info dict or None.
|
[
"Returns",
"info",
"dict",
"or",
"None",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L204-L209
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/resource.py
|
write_info_file
|
def write_info_file(resource, path, dataset_name, original_fname):
"""Write the INFO file next to local file.
Although the method is synchronized, there is still a risk two processes
running at the same time overlap here. Risk accepted, since potentially lost
data (`dataset_name`) is only for human consumption.
Args:
resource: resource for which to write the INFO file.
path: path of downloaded file.
dataset_name: data used to dl the file.
original_fname: name of file as downloaded.
"""
info_path = _get_info_path(path)
info = _read_info(info_path) or {}
urls = set(info.get('urls', []) + [resource.url])
dataset_names = info.get('dataset_names', [])
if dataset_name:
dataset_names.append(dataset_name)
if 'original_fname' in info and info['original_fname'] != original_fname:
raise AssertionError(
'`original_fname` "%s" stored in %s does NOT match "%s".' % (
info['original_fname'], info_path, original_fname))
info = dict(urls=list(urls), dataset_names=list(set(dataset_names)),
original_fname=original_fname)
with py_utils.atomic_write(info_path, 'w') as info_f:
json.dump(info, info_f, sort_keys=True)
|
python
|
def write_info_file(resource, path, dataset_name, original_fname):
"""Write the INFO file next to local file.
Although the method is synchronized, there is still a risk two processes
running at the same time overlap here. Risk accepted, since potentially lost
data (`dataset_name`) is only for human consumption.
Args:
resource: resource for which to write the INFO file.
path: path of downloaded file.
dataset_name: data used to dl the file.
original_fname: name of file as downloaded.
"""
info_path = _get_info_path(path)
info = _read_info(info_path) or {}
urls = set(info.get('urls', []) + [resource.url])
dataset_names = info.get('dataset_names', [])
if dataset_name:
dataset_names.append(dataset_name)
if 'original_fname' in info and info['original_fname'] != original_fname:
raise AssertionError(
'`original_fname` "%s" stored in %s does NOT match "%s".' % (
info['original_fname'], info_path, original_fname))
info = dict(urls=list(urls), dataset_names=list(set(dataset_names)),
original_fname=original_fname)
with py_utils.atomic_write(info_path, 'w') as info_f:
json.dump(info, info_f, sort_keys=True)
|
[
"def",
"write_info_file",
"(",
"resource",
",",
"path",
",",
"dataset_name",
",",
"original_fname",
")",
":",
"info_path",
"=",
"_get_info_path",
"(",
"path",
")",
"info",
"=",
"_read_info",
"(",
"info_path",
")",
"or",
"{",
"}",
"urls",
"=",
"set",
"(",
"info",
".",
"get",
"(",
"'urls'",
",",
"[",
"]",
")",
"+",
"[",
"resource",
".",
"url",
"]",
")",
"dataset_names",
"=",
"info",
".",
"get",
"(",
"'dataset_names'",
",",
"[",
"]",
")",
"if",
"dataset_name",
":",
"dataset_names",
".",
"append",
"(",
"dataset_name",
")",
"if",
"'original_fname'",
"in",
"info",
"and",
"info",
"[",
"'original_fname'",
"]",
"!=",
"original_fname",
":",
"raise",
"AssertionError",
"(",
"'`original_fname` \"%s\" stored in %s does NOT match \"%s\".'",
"%",
"(",
"info",
"[",
"'original_fname'",
"]",
",",
"info_path",
",",
"original_fname",
")",
")",
"info",
"=",
"dict",
"(",
"urls",
"=",
"list",
"(",
"urls",
")",
",",
"dataset_names",
"=",
"list",
"(",
"set",
"(",
"dataset_names",
")",
")",
",",
"original_fname",
"=",
"original_fname",
")",
"with",
"py_utils",
".",
"atomic_write",
"(",
"info_path",
",",
"'w'",
")",
"as",
"info_f",
":",
"json",
".",
"dump",
"(",
"info",
",",
"info_f",
",",
"sort_keys",
"=",
"True",
")"
] |
Write the INFO file next to local file.
Although the method is synchronized, there is still a risk two processes
running at the same time overlap here. Risk accepted, since potentially lost
data (`dataset_name`) is only for human consumption.
Args:
resource: resource for which to write the INFO file.
path: path of downloaded file.
dataset_name: data used to dl the file.
original_fname: name of file as downloaded.
|
[
"Write",
"the",
"INFO",
"file",
"next",
"to",
"local",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L214-L240
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/resource.py
|
get_extract_method
|
def get_extract_method(path):
"""Returns `ExtractMethod` to use on resource at path. Cannot be None."""
info_path = _get_info_path(path)
info = _read_info(info_path)
fname = info.get('original_fname', path) if info else path
return _guess_extract_method(fname)
|
python
|
def get_extract_method(path):
"""Returns `ExtractMethod` to use on resource at path. Cannot be None."""
info_path = _get_info_path(path)
info = _read_info(info_path)
fname = info.get('original_fname', path) if info else path
return _guess_extract_method(fname)
|
[
"def",
"get_extract_method",
"(",
"path",
")",
":",
"info_path",
"=",
"_get_info_path",
"(",
"path",
")",
"info",
"=",
"_read_info",
"(",
"info_path",
")",
"fname",
"=",
"info",
".",
"get",
"(",
"'original_fname'",
",",
"path",
")",
"if",
"info",
"else",
"path",
"return",
"_guess_extract_method",
"(",
"fname",
")"
] |
Returns `ExtractMethod` to use on resource at path. Cannot be None.
|
[
"Returns",
"ExtractMethod",
"to",
"use",
"on",
"resource",
"at",
"path",
".",
"Cannot",
"be",
"None",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L243-L248
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/resource.py
|
Resource.exists_locally
|
def exists_locally(cls, path):
"""Returns whether the resource exists locally, at `resource.path`."""
# If INFO file doesn't exist, consider resource does NOT exist, as it would
# prevent guessing the `extract_method`.
return (tf.io.gfile.exists(path) and
tf.io.gfile.exists(_get_info_path(path)))
|
python
|
def exists_locally(cls, path):
"""Returns whether the resource exists locally, at `resource.path`."""
# If INFO file doesn't exist, consider resource does NOT exist, as it would
# prevent guessing the `extract_method`.
return (tf.io.gfile.exists(path) and
tf.io.gfile.exists(_get_info_path(path)))
|
[
"def",
"exists_locally",
"(",
"cls",
",",
"path",
")",
":",
"# If INFO file doesn't exist, consider resource does NOT exist, as it would",
"# prevent guessing the `extract_method`.",
"return",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"path",
")",
"and",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"_get_info_path",
"(",
"path",
")",
")",
")"
] |
Returns whether the resource exists locally, at `resource.path`.
|
[
"Returns",
"whether",
"the",
"resource",
"exists",
"locally",
"at",
"resource",
".",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L273-L278
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/coco.py
|
Coco2014._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
root_url = "http://images.cocodataset.org/"
urls = {
# Train/validation set
"train_images": "zips/train2014.zip",
"val_images": "zips/val2014.zip",
"trainval_annotations": "annotations/annotations_trainval2014.zip",
# Testing set (no annotations) (2014)
"test_images": "zips/test2014.zip",
"test_annotations": "annotations/image_info_test2014.zip",
# Testing set (no annotations) (2015)
"test2015_images": "zips/test2015.zip",
"test2015_annotations": "annotations/image_info_test2015.zip",
}
extracted_paths = dl_manager.download_and_extract({
key: root_url + url for key, url in urls.items()
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["train_images"],
annotation_dir=extracted_paths["trainval_annotations"],
split_type="train2014",
)),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["val_images"],
annotation_dir=extracted_paths["trainval_annotations"],
split_type="val2014",
)),
# Warning: Testing split only contains the images without any annotation
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["test_images"],
annotation_dir=extracted_paths["test_annotations"],
split_type="test2014",
has_annotation=False,
)),
tfds.core.SplitGenerator(
name="test2015",
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["test2015_images"],
annotation_dir=extracted_paths["test2015_annotations"],
split_type="test2015",
has_annotation=False,
)),
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
root_url = "http://images.cocodataset.org/"
urls = {
# Train/validation set
"train_images": "zips/train2014.zip",
"val_images": "zips/val2014.zip",
"trainval_annotations": "annotations/annotations_trainval2014.zip",
# Testing set (no annotations) (2014)
"test_images": "zips/test2014.zip",
"test_annotations": "annotations/image_info_test2014.zip",
# Testing set (no annotations) (2015)
"test2015_images": "zips/test2015.zip",
"test2015_annotations": "annotations/image_info_test2015.zip",
}
extracted_paths = dl_manager.download_and_extract({
key: root_url + url for key, url in urls.items()
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["train_images"],
annotation_dir=extracted_paths["trainval_annotations"],
split_type="train2014",
)),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["val_images"],
annotation_dir=extracted_paths["trainval_annotations"],
split_type="val2014",
)),
# Warning: Testing split only contains the images without any annotation
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["test_images"],
annotation_dir=extracted_paths["test_annotations"],
split_type="test2014",
has_annotation=False,
)),
tfds.core.SplitGenerator(
name="test2015",
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["test2015_images"],
annotation_dir=extracted_paths["test2015_annotations"],
split_type="test2015",
has_annotation=False,
)),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"root_url",
"=",
"\"http://images.cocodataset.org/\"",
"urls",
"=",
"{",
"# Train/validation set",
"\"train_images\"",
":",
"\"zips/train2014.zip\"",
",",
"\"val_images\"",
":",
"\"zips/val2014.zip\"",
",",
"\"trainval_annotations\"",
":",
"\"annotations/annotations_trainval2014.zip\"",
",",
"# Testing set (no annotations) (2014)",
"\"test_images\"",
":",
"\"zips/test2014.zip\"",
",",
"\"test_annotations\"",
":",
"\"annotations/image_info_test2014.zip\"",
",",
"# Testing set (no annotations) (2015)",
"\"test2015_images\"",
":",
"\"zips/test2015.zip\"",
",",
"\"test2015_annotations\"",
":",
"\"annotations/image_info_test2015.zip\"",
",",
"}",
"extracted_paths",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"{",
"key",
":",
"root_url",
"+",
"url",
"for",
"key",
",",
"url",
"in",
"urls",
".",
"items",
"(",
")",
"}",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"dict",
"(",
"image_dir",
"=",
"extracted_paths",
"[",
"\"train_images\"",
"]",
",",
"annotation_dir",
"=",
"extracted_paths",
"[",
"\"trainval_annotations\"",
"]",
",",
"split_type",
"=",
"\"train2014\"",
",",
")",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"VALIDATION",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"dict",
"(",
"image_dir",
"=",
"extracted_paths",
"[",
"\"val_images\"",
"]",
",",
"annotation_dir",
"=",
"extracted_paths",
"[",
"\"trainval_annotations\"",
"]",
",",
"split_type",
"=",
"\"val2014\"",
",",
")",
")",
",",
"# Warning: Testing split only contains the images without any annotation",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TEST",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"dict",
"(",
"image_dir",
"=",
"extracted_paths",
"[",
"\"test_images\"",
"]",
",",
"annotation_dir",
"=",
"extracted_paths",
"[",
"\"test_annotations\"",
"]",
",",
"split_type",
"=",
"\"test2014\"",
",",
"has_annotation",
"=",
"False",
",",
")",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"\"test2015\"",
",",
"num_shards",
"=",
"10",
",",
"gen_kwargs",
"=",
"dict",
"(",
"image_dir",
"=",
"extracted_paths",
"[",
"\"test2015_images\"",
"]",
",",
"annotation_dir",
"=",
"extracted_paths",
"[",
"\"test2015_annotations\"",
"]",
",",
"split_type",
"=",
"\"test2015\"",
",",
"has_annotation",
"=",
"False",
",",
")",
")",
",",
"]"
] |
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/coco.py#L94-L149
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/coco.py
|
Coco2014._generate_examples
|
def _generate_examples(
self, image_dir, annotation_dir, split_type, has_annotation=True):
"""Generate examples as dicts.
Args:
image_dir: `str`, directory containing the images
annotation_dir: `str`, directory containing
split_type: `str`, <split_name><year> (ex: train2014)
has_annotation: `bool`, when False (for the testing set), the annotations
are not recorded
Yields:
Generator yielding the next samples
"""
if has_annotation:
instance_filename = "instances_{}.json"
else:
instance_filename = "image_info_{}.json"
# Load the label names and images
instance_path = os.path.join(
annotation_dir,
"annotations",
instance_filename.format(split_type),
)
coco_annotation = CocoAnnotation(instance_path)
# Each category is a dict:
# {
# 'id': 51, # From 1-91, some entry missing
# 'name': 'bowl',
# 'supercategory': 'kitchen',
# }
categories = coco_annotation.categories
# Each image is a dict:
# {
# 'id': 262145,
# 'file_name': 'COCO_train2014_000000262145.jpg'
# 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg',
# 'coco_url': 'http://images.cocodataset.org/train2014/xyz.jpg',
# 'license': 2,
# 'date_captured': '2013-11-20 02:07:55',
# 'height': 427,
# 'width': 640,
# }
images = coco_annotation.images
# TODO(b/121375022): ClassLabel names should also contains 'id' and
# and 'supercategory' (in addition to 'name')
# Warning: As Coco only use 80 out of the 91 labels, the c['id'] and
# dataset names ids won't match.
self.info.features["objects"]["label"].names = [
c["name"] for c in categories
]
# TODO(b/121375022): Conversion should be done by ClassLabel
categories_id2name = {c["id"]: c["name"] for c in categories}
# Iterate over all images
annotation_skipped = 0
for image_info in sorted(images, key=lambda x: x["id"]):
if has_annotation:
# Each instance annotation is a dict:
# {
# 'iscrowd': 0,
# 'bbox': [116.95, 305.86, 285.3, 266.03],
# 'image_id': 480023,
# 'segmentation': [[312.29, 562.89, 402.25, ...]],
# 'category_id': 58,
# 'area': 54652.9556,
# 'id': 86,
# }
instances = coco_annotation.get_annotations(img_id=image_info["id"])
else:
instances = [] # No annotations
if not instances:
annotation_skipped += 1
def build_bbox(x, y, width, height):
# pylint: disable=cell-var-from-loop
# build_bbox is only used within the loop so it is ok to use image_info
return tfds.features.BBox(
ymin=y / image_info["height"],
xmin=x / image_info["width"],
ymax=(y + height) / image_info["height"],
xmax=(x + width) / image_info["width"],
)
# pylint: enable=cell-var-from-loop
yield {
"image": os.path.join(image_dir, split_type, image_info["file_name"]),
"image/filename": image_info["file_name"],
"objects": [{
"bbox": build_bbox(*instance_info["bbox"]),
"label": categories_id2name[instance_info["category_id"]],
"is_crowd": bool(instance_info["iscrowd"]),
} for instance_info in instances],
}
logging.info(
"%d/%d images do not contains any annotations",
annotation_skipped,
len(images),
)
|
python
|
def _generate_examples(
self, image_dir, annotation_dir, split_type, has_annotation=True):
"""Generate examples as dicts.
Args:
image_dir: `str`, directory containing the images
annotation_dir: `str`, directory containing
split_type: `str`, <split_name><year> (ex: train2014)
has_annotation: `bool`, when False (for the testing set), the annotations
are not recorded
Yields:
Generator yielding the next samples
"""
if has_annotation:
instance_filename = "instances_{}.json"
else:
instance_filename = "image_info_{}.json"
# Load the label names and images
instance_path = os.path.join(
annotation_dir,
"annotations",
instance_filename.format(split_type),
)
coco_annotation = CocoAnnotation(instance_path)
# Each category is a dict:
# {
# 'id': 51, # From 1-91, some entry missing
# 'name': 'bowl',
# 'supercategory': 'kitchen',
# }
categories = coco_annotation.categories
# Each image is a dict:
# {
# 'id': 262145,
# 'file_name': 'COCO_train2014_000000262145.jpg'
# 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg',
# 'coco_url': 'http://images.cocodataset.org/train2014/xyz.jpg',
# 'license': 2,
# 'date_captured': '2013-11-20 02:07:55',
# 'height': 427,
# 'width': 640,
# }
images = coco_annotation.images
# TODO(b/121375022): ClassLabel names should also contains 'id' and
# and 'supercategory' (in addition to 'name')
# Warning: As Coco only use 80 out of the 91 labels, the c['id'] and
# dataset names ids won't match.
self.info.features["objects"]["label"].names = [
c["name"] for c in categories
]
# TODO(b/121375022): Conversion should be done by ClassLabel
categories_id2name = {c["id"]: c["name"] for c in categories}
# Iterate over all images
annotation_skipped = 0
for image_info in sorted(images, key=lambda x: x["id"]):
if has_annotation:
# Each instance annotation is a dict:
# {
# 'iscrowd': 0,
# 'bbox': [116.95, 305.86, 285.3, 266.03],
# 'image_id': 480023,
# 'segmentation': [[312.29, 562.89, 402.25, ...]],
# 'category_id': 58,
# 'area': 54652.9556,
# 'id': 86,
# }
instances = coco_annotation.get_annotations(img_id=image_info["id"])
else:
instances = [] # No annotations
if not instances:
annotation_skipped += 1
def build_bbox(x, y, width, height):
# pylint: disable=cell-var-from-loop
# build_bbox is only used within the loop so it is ok to use image_info
return tfds.features.BBox(
ymin=y / image_info["height"],
xmin=x / image_info["width"],
ymax=(y + height) / image_info["height"],
xmax=(x + width) / image_info["width"],
)
# pylint: enable=cell-var-from-loop
yield {
"image": os.path.join(image_dir, split_type, image_info["file_name"]),
"image/filename": image_info["file_name"],
"objects": [{
"bbox": build_bbox(*instance_info["bbox"]),
"label": categories_id2name[instance_info["category_id"]],
"is_crowd": bool(instance_info["iscrowd"]),
} for instance_info in instances],
}
logging.info(
"%d/%d images do not contains any annotations",
annotation_skipped,
len(images),
)
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"image_dir",
",",
"annotation_dir",
",",
"split_type",
",",
"has_annotation",
"=",
"True",
")",
":",
"if",
"has_annotation",
":",
"instance_filename",
"=",
"\"instances_{}.json\"",
"else",
":",
"instance_filename",
"=",
"\"image_info_{}.json\"",
"# Load the label names and images",
"instance_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"annotation_dir",
",",
"\"annotations\"",
",",
"instance_filename",
".",
"format",
"(",
"split_type",
")",
",",
")",
"coco_annotation",
"=",
"CocoAnnotation",
"(",
"instance_path",
")",
"# Each category is a dict:",
"# {",
"# 'id': 51, # From 1-91, some entry missing",
"# 'name': 'bowl',",
"# 'supercategory': 'kitchen',",
"# }",
"categories",
"=",
"coco_annotation",
".",
"categories",
"# Each image is a dict:",
"# {",
"# 'id': 262145,",
"# 'file_name': 'COCO_train2014_000000262145.jpg'",
"# 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg',",
"# 'coco_url': 'http://images.cocodataset.org/train2014/xyz.jpg',",
"# 'license': 2,",
"# 'date_captured': '2013-11-20 02:07:55',",
"# 'height': 427,",
"# 'width': 640,",
"# }",
"images",
"=",
"coco_annotation",
".",
"images",
"# TODO(b/121375022): ClassLabel names should also contains 'id' and",
"# and 'supercategory' (in addition to 'name')",
"# Warning: As Coco only use 80 out of the 91 labels, the c['id'] and",
"# dataset names ids won't match.",
"self",
".",
"info",
".",
"features",
"[",
"\"objects\"",
"]",
"[",
"\"label\"",
"]",
".",
"names",
"=",
"[",
"c",
"[",
"\"name\"",
"]",
"for",
"c",
"in",
"categories",
"]",
"# TODO(b/121375022): Conversion should be done by ClassLabel",
"categories_id2name",
"=",
"{",
"c",
"[",
"\"id\"",
"]",
":",
"c",
"[",
"\"name\"",
"]",
"for",
"c",
"in",
"categories",
"}",
"# Iterate over all images",
"annotation_skipped",
"=",
"0",
"for",
"image_info",
"in",
"sorted",
"(",
"images",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"\"id\"",
"]",
")",
":",
"if",
"has_annotation",
":",
"# Each instance annotation is a dict:",
"# {",
"# 'iscrowd': 0,",
"# 'bbox': [116.95, 305.86, 285.3, 266.03],",
"# 'image_id': 480023,",
"# 'segmentation': [[312.29, 562.89, 402.25, ...]],",
"# 'category_id': 58,",
"# 'area': 54652.9556,",
"# 'id': 86,",
"# }",
"instances",
"=",
"coco_annotation",
".",
"get_annotations",
"(",
"img_id",
"=",
"image_info",
"[",
"\"id\"",
"]",
")",
"else",
":",
"instances",
"=",
"[",
"]",
"# No annotations",
"if",
"not",
"instances",
":",
"annotation_skipped",
"+=",
"1",
"def",
"build_bbox",
"(",
"x",
",",
"y",
",",
"width",
",",
"height",
")",
":",
"# pylint: disable=cell-var-from-loop",
"# build_bbox is only used within the loop so it is ok to use image_info",
"return",
"tfds",
".",
"features",
".",
"BBox",
"(",
"ymin",
"=",
"y",
"/",
"image_info",
"[",
"\"height\"",
"]",
",",
"xmin",
"=",
"x",
"/",
"image_info",
"[",
"\"width\"",
"]",
",",
"ymax",
"=",
"(",
"y",
"+",
"height",
")",
"/",
"image_info",
"[",
"\"height\"",
"]",
",",
"xmax",
"=",
"(",
"x",
"+",
"width",
")",
"/",
"image_info",
"[",
"\"width\"",
"]",
",",
")",
"# pylint: enable=cell-var-from-loop",
"yield",
"{",
"\"image\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"image_dir",
",",
"split_type",
",",
"image_info",
"[",
"\"file_name\"",
"]",
")",
",",
"\"image/filename\"",
":",
"image_info",
"[",
"\"file_name\"",
"]",
",",
"\"objects\"",
":",
"[",
"{",
"\"bbox\"",
":",
"build_bbox",
"(",
"*",
"instance_info",
"[",
"\"bbox\"",
"]",
")",
",",
"\"label\"",
":",
"categories_id2name",
"[",
"instance_info",
"[",
"\"category_id\"",
"]",
"]",
",",
"\"is_crowd\"",
":",
"bool",
"(",
"instance_info",
"[",
"\"iscrowd\"",
"]",
")",
",",
"}",
"for",
"instance_info",
"in",
"instances",
"]",
",",
"}",
"logging",
".",
"info",
"(",
"\"%d/%d images do not contains any annotations\"",
",",
"annotation_skipped",
",",
"len",
"(",
"images",
")",
",",
")"
] |
Generate examples as dicts.
Args:
image_dir: `str`, directory containing the images
annotation_dir: `str`, directory containing
split_type: `str`, <split_name><year> (ex: train2014)
has_annotation: `bool`, when False (for the testing set), the annotations
are not recorded
Yields:
Generator yielding the next samples
|
[
"Generate",
"examples",
"as",
"dicts",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/coco.py#L151-L252
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text_feature.py
|
Text.str2ints
|
def str2ints(self, str_value):
"""Conversion string => encoded list[int]."""
if not self._encoder:
raise ValueError(
"Text.str2ints is not available because encoder hasn't been defined.")
return self._encoder.encode(str_value)
|
python
|
def str2ints(self, str_value):
"""Conversion string => encoded list[int]."""
if not self._encoder:
raise ValueError(
"Text.str2ints is not available because encoder hasn't been defined.")
return self._encoder.encode(str_value)
|
[
"def",
"str2ints",
"(",
"self",
",",
"str_value",
")",
":",
"if",
"not",
"self",
".",
"_encoder",
":",
"raise",
"ValueError",
"(",
"\"Text.str2ints is not available because encoder hasn't been defined.\"",
")",
"return",
"self",
".",
"_encoder",
".",
"encode",
"(",
"str_value",
")"
] |
Conversion string => encoded list[int].
|
[
"Conversion",
"string",
"=",
">",
"encoded",
"list",
"[",
"int",
"]",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text_feature.py#L83-L88
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text_feature.py
|
Text.ints2str
|
def ints2str(self, int_values):
"""Conversion list[int] => decoded string."""
if not self._encoder:
raise ValueError(
"Text.ints2str is not available because encoder hasn't been defined.")
return self._encoder.decode(int_values)
|
python
|
def ints2str(self, int_values):
"""Conversion list[int] => decoded string."""
if not self._encoder:
raise ValueError(
"Text.ints2str is not available because encoder hasn't been defined.")
return self._encoder.decode(int_values)
|
[
"def",
"ints2str",
"(",
"self",
",",
"int_values",
")",
":",
"if",
"not",
"self",
".",
"_encoder",
":",
"raise",
"ValueError",
"(",
"\"Text.ints2str is not available because encoder hasn't been defined.\"",
")",
"return",
"self",
".",
"_encoder",
".",
"decode",
"(",
"int_values",
")"
] |
Conversion list[int] => decoded string.
|
[
"Conversion",
"list",
"[",
"int",
"]",
"=",
">",
"decoded",
"string",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text_feature.py#L90-L95
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/features/text_feature.py
|
Text.maybe_build_from_corpus
|
def maybe_build_from_corpus(self, corpus_generator, **kwargs):
"""Call SubwordTextEncoder.build_from_corpus is encoder_cls is such."""
if self._encoder_cls is not text_lib.SubwordTextEncoder:
return
if self.encoder:
return
vocab_size = self._encoder_config.vocab_size
self.encoder = text_lib.SubwordTextEncoder.build_from_corpus(
corpus_generator=corpus_generator,
target_vocab_size=vocab_size,
**kwargs)
|
python
|
def maybe_build_from_corpus(self, corpus_generator, **kwargs):
"""Call SubwordTextEncoder.build_from_corpus is encoder_cls is such."""
if self._encoder_cls is not text_lib.SubwordTextEncoder:
return
if self.encoder:
return
vocab_size = self._encoder_config.vocab_size
self.encoder = text_lib.SubwordTextEncoder.build_from_corpus(
corpus_generator=corpus_generator,
target_vocab_size=vocab_size,
**kwargs)
|
[
"def",
"maybe_build_from_corpus",
"(",
"self",
",",
"corpus_generator",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_encoder_cls",
"is",
"not",
"text_lib",
".",
"SubwordTextEncoder",
":",
"return",
"if",
"self",
".",
"encoder",
":",
"return",
"vocab_size",
"=",
"self",
".",
"_encoder_config",
".",
"vocab_size",
"self",
".",
"encoder",
"=",
"text_lib",
".",
"SubwordTextEncoder",
".",
"build_from_corpus",
"(",
"corpus_generator",
"=",
"corpus_generator",
",",
"target_vocab_size",
"=",
"vocab_size",
",",
"*",
"*",
"kwargs",
")"
] |
Call SubwordTextEncoder.build_from_corpus is encoder_cls is such.
|
[
"Call",
"SubwordTextEncoder",
".",
"build_from_corpus",
"is",
"encoder_cls",
"is",
"such",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text_feature.py#L137-L148
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/naming.py
|
sharded_filenames
|
def sharded_filenames(filename_prefix, num_shards):
"""Sharded filenames given prefix and number of shards."""
shard_suffix = "%05d-of-%05d"
return [
"%s-%s" % (filename_prefix, shard_suffix % (i, num_shards))
for i in range(num_shards)
]
|
python
|
def sharded_filenames(filename_prefix, num_shards):
"""Sharded filenames given prefix and number of shards."""
shard_suffix = "%05d-of-%05d"
return [
"%s-%s" % (filename_prefix, shard_suffix % (i, num_shards))
for i in range(num_shards)
]
|
[
"def",
"sharded_filenames",
"(",
"filename_prefix",
",",
"num_shards",
")",
":",
"shard_suffix",
"=",
"\"%05d-of-%05d\"",
"return",
"[",
"\"%s-%s\"",
"%",
"(",
"filename_prefix",
",",
"shard_suffix",
"%",
"(",
"i",
",",
"num_shards",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_shards",
")",
"]"
] |
Sharded filenames given prefix and number of shards.
|
[
"Sharded",
"filenames",
"given",
"prefix",
"and",
"number",
"of",
"shards",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/naming.py#L52-L58
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/omniglot.py
|
_walk_omniglot_dir
|
def _walk_omniglot_dir(directory):
"""Walk an Omniglot directory and yield examples."""
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0])
alphabets = sorted(tf.io.gfile.listdir(directory))
for alphabet in alphabets:
alphabet_dir = os.path.join(directory, alphabet)
characters = sorted(tf.io.gfile.listdir(alphabet_dir))
for character in characters:
character_id = int(character[len("character"):]) - 1
character_dir = os.path.join(alphabet_dir, character)
images = tf.io.gfile.listdir(character_dir)
for image in images:
label, _ = image.split("_")
label = int(label) - 1
image_path = os.path.join(character_dir, image)
yield alphabet, character_id, label, image_path
|
python
|
def _walk_omniglot_dir(directory):
"""Walk an Omniglot directory and yield examples."""
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0])
alphabets = sorted(tf.io.gfile.listdir(directory))
for alphabet in alphabets:
alphabet_dir = os.path.join(directory, alphabet)
characters = sorted(tf.io.gfile.listdir(alphabet_dir))
for character in characters:
character_id = int(character[len("character"):]) - 1
character_dir = os.path.join(alphabet_dir, character)
images = tf.io.gfile.listdir(character_dir)
for image in images:
label, _ = image.split("_")
label = int(label) - 1
image_path = os.path.join(character_dir, image)
yield alphabet, character_id, label, image_path
|
[
"def",
"_walk_omniglot_dir",
"(",
"directory",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"directory",
")",
"[",
"0",
"]",
")",
"alphabets",
"=",
"sorted",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"directory",
")",
")",
"for",
"alphabet",
"in",
"alphabets",
":",
"alphabet_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"alphabet",
")",
"characters",
"=",
"sorted",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"alphabet_dir",
")",
")",
"for",
"character",
"in",
"characters",
":",
"character_id",
"=",
"int",
"(",
"character",
"[",
"len",
"(",
"\"character\"",
")",
":",
"]",
")",
"-",
"1",
"character_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"alphabet_dir",
",",
"character",
")",
"images",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"character_dir",
")",
"for",
"image",
"in",
"images",
":",
"label",
",",
"_",
"=",
"image",
".",
"split",
"(",
"\"_\"",
")",
"label",
"=",
"int",
"(",
"label",
")",
"-",
"1",
"image_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"character_dir",
",",
"image",
")",
"yield",
"alphabet",
",",
"character_id",
",",
"label",
",",
"image_path"
] |
Walk an Omniglot directory and yield examples.
|
[
"Walk",
"an",
"Omniglot",
"directory",
"and",
"yield",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/omniglot.py#L128-L143
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/omniglot.py
|
_get_names
|
def _get_names(dirs):
"""Get alphabet and label names, union across all dirs."""
alphabets = set()
label_names = {}
for d in dirs:
for example in _walk_omniglot_dir(d):
alphabet, alphabet_char_id, label, _ = example
alphabets.add(alphabet)
label_name = "%s_%d" % (alphabet, alphabet_char_id)
if label in label_names:
assert label_names[label] == label_name
else:
label_names[label] = label_name
label_names = [label_names[k] for k in sorted(label_names)]
return alphabets, label_names
|
python
|
def _get_names(dirs):
"""Get alphabet and label names, union across all dirs."""
alphabets = set()
label_names = {}
for d in dirs:
for example in _walk_omniglot_dir(d):
alphabet, alphabet_char_id, label, _ = example
alphabets.add(alphabet)
label_name = "%s_%d" % (alphabet, alphabet_char_id)
if label in label_names:
assert label_names[label] == label_name
else:
label_names[label] = label_name
label_names = [label_names[k] for k in sorted(label_names)]
return alphabets, label_names
|
[
"def",
"_get_names",
"(",
"dirs",
")",
":",
"alphabets",
"=",
"set",
"(",
")",
"label_names",
"=",
"{",
"}",
"for",
"d",
"in",
"dirs",
":",
"for",
"example",
"in",
"_walk_omniglot_dir",
"(",
"d",
")",
":",
"alphabet",
",",
"alphabet_char_id",
",",
"label",
",",
"_",
"=",
"example",
"alphabets",
".",
"add",
"(",
"alphabet",
")",
"label_name",
"=",
"\"%s_%d\"",
"%",
"(",
"alphabet",
",",
"alphabet_char_id",
")",
"if",
"label",
"in",
"label_names",
":",
"assert",
"label_names",
"[",
"label",
"]",
"==",
"label_name",
"else",
":",
"label_names",
"[",
"label",
"]",
"=",
"label_name",
"label_names",
"=",
"[",
"label_names",
"[",
"k",
"]",
"for",
"k",
"in",
"sorted",
"(",
"label_names",
")",
"]",
"return",
"alphabets",
",",
"label_names"
] |
Get alphabet and label names, union across all dirs.
|
[
"Get",
"alphabet",
"and",
"label",
"names",
"union",
"across",
"all",
"dirs",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/omniglot.py#L146-L160
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/units.py
|
size_str
|
def size_str(size_in_bytes):
"""Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
"""
if not size_in_bytes:
return "?? GiB"
size_in_bytes = float(size_in_bytes)
for (name, size_bytes) in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return "{:.2f} {}".format(value, name)
return "{} {}".format(int(size_in_bytes), "bytes")
|
python
|
def size_str(size_in_bytes):
"""Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
"""
if not size_in_bytes:
return "?? GiB"
size_in_bytes = float(size_in_bytes)
for (name, size_bytes) in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return "{:.2f} {}".format(value, name)
return "{} {}".format(int(size_in_bytes), "bytes")
|
[
"def",
"size_str",
"(",
"size_in_bytes",
")",
":",
"if",
"not",
"size_in_bytes",
":",
"return",
"\"?? GiB\"",
"size_in_bytes",
"=",
"float",
"(",
"size_in_bytes",
")",
"for",
"(",
"name",
",",
"size_bytes",
")",
"in",
"_NAME_LIST",
":",
"value",
"=",
"size_in_bytes",
"/",
"size_bytes",
"if",
"value",
">=",
"1.0",
":",
"return",
"\"{:.2f} {}\"",
".",
"format",
"(",
"value",
",",
"name",
")",
"return",
"\"{} {}\"",
".",
"format",
"(",
"int",
"(",
"size_in_bytes",
")",
",",
"\"bytes\"",
")"
] |
Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
|
[
"Returns",
"a",
"human",
"readable",
"size",
"string",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/units.py#L34-L53
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/downloader.py
|
_Downloader.tqdm
|
def tqdm(self):
"""Add a progression bar for the current download."""
async_tqdm = utils.async_tqdm
with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url:
with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size:
self._pbar_url = pbar_url
self._pbar_dl_size = pbar_dl_size
yield
|
python
|
def tqdm(self):
"""Add a progression bar for the current download."""
async_tqdm = utils.async_tqdm
with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url:
with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size:
self._pbar_url = pbar_url
self._pbar_dl_size = pbar_dl_size
yield
|
[
"def",
"tqdm",
"(",
"self",
")",
":",
"async_tqdm",
"=",
"utils",
".",
"async_tqdm",
"with",
"async_tqdm",
"(",
"total",
"=",
"0",
",",
"desc",
"=",
"'Dl Completed...'",
",",
"unit",
"=",
"' url'",
")",
"as",
"pbar_url",
":",
"with",
"async_tqdm",
"(",
"total",
"=",
"0",
",",
"desc",
"=",
"'Dl Size...'",
",",
"unit",
"=",
"' MiB'",
")",
"as",
"pbar_dl_size",
":",
"self",
".",
"_pbar_url",
"=",
"pbar_url",
"self",
".",
"_pbar_dl_size",
"=",
"pbar_dl_size",
"yield"
] |
Add a progression bar for the current download.
|
[
"Add",
"a",
"progression",
"bar",
"for",
"the",
"current",
"download",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L84-L91
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/downloader.py
|
_Downloader.download
|
def download(self, url, destination_path):
"""Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
"""
self._pbar_url.update_total(1)
future = self._executor.submit(self._sync_download, url, destination_path)
return promise.Promise.resolve(future)
|
python
|
def download(self, url, destination_path):
"""Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
"""
self._pbar_url.update_total(1)
future = self._executor.submit(self._sync_download, url, destination_path)
return promise.Promise.resolve(future)
|
[
"def",
"download",
"(",
"self",
",",
"url",
",",
"destination_path",
")",
":",
"self",
".",
"_pbar_url",
".",
"update_total",
"(",
"1",
")",
"future",
"=",
"self",
".",
"_executor",
".",
"submit",
"(",
"self",
".",
"_sync_download",
",",
"url",
",",
"destination_path",
")",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"future",
")"
] |
Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
|
[
"Download",
"url",
"to",
"given",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L93-L107
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/downloader.py
|
_Downloader._sync_kaggle_download
|
def _sync_kaggle_download(self, kaggle_url, destination_path):
"""Download with Kaggle API."""
kaggle_file = kaggle.KaggleFile.from_url(kaggle_url)
downloader = self.kaggle_downloader(kaggle_file.competition)
filepath = downloader.download_file(kaggle_file.filename, destination_path)
dl_size = tf.io.gfile.stat(filepath).length
checksum = self._checksumer()
with tf.io.gfile.GFile(filepath, 'rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), dl_size
|
python
|
def _sync_kaggle_download(self, kaggle_url, destination_path):
"""Download with Kaggle API."""
kaggle_file = kaggle.KaggleFile.from_url(kaggle_url)
downloader = self.kaggle_downloader(kaggle_file.competition)
filepath = downloader.download_file(kaggle_file.filename, destination_path)
dl_size = tf.io.gfile.stat(filepath).length
checksum = self._checksumer()
with tf.io.gfile.GFile(filepath, 'rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), dl_size
|
[
"def",
"_sync_kaggle_download",
"(",
"self",
",",
"kaggle_url",
",",
"destination_path",
")",
":",
"kaggle_file",
"=",
"kaggle",
".",
"KaggleFile",
".",
"from_url",
"(",
"kaggle_url",
")",
"downloader",
"=",
"self",
".",
"kaggle_downloader",
"(",
"kaggle_file",
".",
"competition",
")",
"filepath",
"=",
"downloader",
".",
"download_file",
"(",
"kaggle_file",
".",
"filename",
",",
"destination_path",
")",
"dl_size",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"stat",
"(",
"filepath",
")",
".",
"length",
"checksum",
"=",
"self",
".",
"_checksumer",
"(",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"'rb'",
")",
"as",
"f",
":",
"while",
"True",
":",
"block",
"=",
"f",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"if",
"not",
"block",
":",
"break",
"checksum",
".",
"update",
"(",
"block",
")",
"return",
"checksum",
".",
"hexdigest",
"(",
")",
",",
"dl_size"
] |
Download with Kaggle API.
|
[
"Download",
"with",
"Kaggle",
"API",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L109-L123
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/downloader.py
|
_Downloader._get_drive_url
|
def _get_drive_url(self, url, session):
"""Returns url, possibly with confirmation token."""
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError(
'Failed to get url %s. HTTP code: %d.' % (url, response.status_code))
for k, v in response.cookies.items():
if k.startswith('download_warning'):
return url + '&confirm=' + v # v is the confirm token
# No token found, let's try with original URL:
return url
|
python
|
def _get_drive_url(self, url, session):
"""Returns url, possibly with confirmation token."""
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError(
'Failed to get url %s. HTTP code: %d.' % (url, response.status_code))
for k, v in response.cookies.items():
if k.startswith('download_warning'):
return url + '&confirm=' + v # v is the confirm token
# No token found, let's try with original URL:
return url
|
[
"def",
"_get_drive_url",
"(",
"self",
",",
"url",
",",
"session",
")",
":",
"response",
"=",
"session",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"DownloadError",
"(",
"'Failed to get url %s. HTTP code: %d.'",
"%",
"(",
"url",
",",
"response",
".",
"status_code",
")",
")",
"for",
"k",
",",
"v",
"in",
"response",
".",
"cookies",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"'download_warning'",
")",
":",
"return",
"url",
"+",
"'&confirm='",
"+",
"v",
"# v is the confirm token",
"# No token found, let's try with original URL:",
"return",
"url"
] |
Returns url, possibly with confirmation token.
|
[
"Returns",
"url",
"possibly",
"with",
"confirmation",
"token",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L125-L135
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/download/downloader.py
|
_Downloader._sync_download
|
def _sync_download(self, url, destination_path):
"""Synchronous version of `download` method."""
proxies = {
'http': os.environ.get('TFDS_HTTP_PROXY', None),
'https': os.environ.get('TFDS_HTTPS_PROXY', None),
'ftp': os.environ.get('TFDS_FTP_PROXY', None)
}
if kaggle.KaggleFile.is_kaggle_url(url):
if proxies['http']:
os.environ['KAGGLE_PROXY'] = proxies['http']
return self._sync_kaggle_download(url, destination_path)
try:
# If url is on a filesystem that gfile understands, use copy. Otherwise,
# use requests.
if not url.startswith('http'):
return self._sync_file_copy(url, destination_path)
except tf.errors.UnimplementedError:
pass
session = requests.Session()
session.proxies = proxies
if _DRIVE_URL.match(url):
url = self._get_drive_url(url, session)
use_urllib = url.startswith('ftp')
if use_urllib:
if proxies['ftp']:
proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener) # pylint: disable=too-many-function-args
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
else:
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError('Failed to get url %s. HTTP code: %d.' %
(url, response.status_code))
fname = _get_filename(response)
path = os.path.join(destination_path, fname)
size = 0
size_mb = 0
unit_mb = units.MiB
self._pbar_dl_size.update_total(
int(response.headers.get('Content-length', 0)) // unit_mb)
with tf.io.gfile.GFile(path, 'wb') as file_:
checksum = self._checksumer()
if use_urllib:
iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'')
else:
iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)
for block in iterator:
size += len(block)
# Update the progress bar
size_mb += len(block)
if size_mb > unit_mb:
self._pbar_dl_size.update(size_mb // unit_mb)
size_mb %= unit_mb
checksum.update(block)
file_.write(block)
self._pbar_url.update(1)
return checksum.hexdigest(), size
|
python
|
def _sync_download(self, url, destination_path):
"""Synchronous version of `download` method."""
proxies = {
'http': os.environ.get('TFDS_HTTP_PROXY', None),
'https': os.environ.get('TFDS_HTTPS_PROXY', None),
'ftp': os.environ.get('TFDS_FTP_PROXY', None)
}
if kaggle.KaggleFile.is_kaggle_url(url):
if proxies['http']:
os.environ['KAGGLE_PROXY'] = proxies['http']
return self._sync_kaggle_download(url, destination_path)
try:
# If url is on a filesystem that gfile understands, use copy. Otherwise,
# use requests.
if not url.startswith('http'):
return self._sync_file_copy(url, destination_path)
except tf.errors.UnimplementedError:
pass
session = requests.Session()
session.proxies = proxies
if _DRIVE_URL.match(url):
url = self._get_drive_url(url, session)
use_urllib = url.startswith('ftp')
if use_urllib:
if proxies['ftp']:
proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener) # pylint: disable=too-many-function-args
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
else:
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError('Failed to get url %s. HTTP code: %d.' %
(url, response.status_code))
fname = _get_filename(response)
path = os.path.join(destination_path, fname)
size = 0
size_mb = 0
unit_mb = units.MiB
self._pbar_dl_size.update_total(
int(response.headers.get('Content-length', 0)) // unit_mb)
with tf.io.gfile.GFile(path, 'wb') as file_:
checksum = self._checksumer()
if use_urllib:
iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'')
else:
iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)
for block in iterator:
size += len(block)
# Update the progress bar
size_mb += len(block)
if size_mb > unit_mb:
self._pbar_dl_size.update(size_mb // unit_mb)
size_mb %= unit_mb
checksum.update(block)
file_.write(block)
self._pbar_url.update(1)
return checksum.hexdigest(), size
|
[
"def",
"_sync_download",
"(",
"self",
",",
"url",
",",
"destination_path",
")",
":",
"proxies",
"=",
"{",
"'http'",
":",
"os",
".",
"environ",
".",
"get",
"(",
"'TFDS_HTTP_PROXY'",
",",
"None",
")",
",",
"'https'",
":",
"os",
".",
"environ",
".",
"get",
"(",
"'TFDS_HTTPS_PROXY'",
",",
"None",
")",
",",
"'ftp'",
":",
"os",
".",
"environ",
".",
"get",
"(",
"'TFDS_FTP_PROXY'",
",",
"None",
")",
"}",
"if",
"kaggle",
".",
"KaggleFile",
".",
"is_kaggle_url",
"(",
"url",
")",
":",
"if",
"proxies",
"[",
"'http'",
"]",
":",
"os",
".",
"environ",
"[",
"'KAGGLE_PROXY'",
"]",
"=",
"proxies",
"[",
"'http'",
"]",
"return",
"self",
".",
"_sync_kaggle_download",
"(",
"url",
",",
"destination_path",
")",
"try",
":",
"# If url is on a filesystem that gfile understands, use copy. Otherwise,",
"# use requests.",
"if",
"not",
"url",
".",
"startswith",
"(",
"'http'",
")",
":",
"return",
"self",
".",
"_sync_file_copy",
"(",
"url",
",",
"destination_path",
")",
"except",
"tf",
".",
"errors",
".",
"UnimplementedError",
":",
"pass",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"session",
".",
"proxies",
"=",
"proxies",
"if",
"_DRIVE_URL",
".",
"match",
"(",
"url",
")",
":",
"url",
"=",
"self",
".",
"_get_drive_url",
"(",
"url",
",",
"session",
")",
"use_urllib",
"=",
"url",
".",
"startswith",
"(",
"'ftp'",
")",
"if",
"use_urllib",
":",
"if",
"proxies",
"[",
"'ftp'",
"]",
":",
"proxy",
"=",
"urllib",
".",
"request",
".",
"ProxyHandler",
"(",
"{",
"'ftp'",
":",
"proxies",
"[",
"'ftp'",
"]",
"}",
")",
"opener",
"=",
"urllib",
".",
"request",
".",
"build_opener",
"(",
"proxy",
")",
"urllib",
".",
"request",
".",
"install_opener",
"(",
"opener",
")",
"# pylint: disable=too-many-function-args",
"request",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"url",
")",
"response",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"request",
")",
"else",
":",
"response",
"=",
"session",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"DownloadError",
"(",
"'Failed to get url %s. HTTP code: %d.'",
"%",
"(",
"url",
",",
"response",
".",
"status_code",
")",
")",
"fname",
"=",
"_get_filename",
"(",
"response",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_path",
",",
"fname",
")",
"size",
"=",
"0",
"size_mb",
"=",
"0",
"unit_mb",
"=",
"units",
".",
"MiB",
"self",
".",
"_pbar_dl_size",
".",
"update_total",
"(",
"int",
"(",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-length'",
",",
"0",
")",
")",
"//",
"unit_mb",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"'wb'",
")",
"as",
"file_",
":",
"checksum",
"=",
"self",
".",
"_checksumer",
"(",
")",
"if",
"use_urllib",
":",
"iterator",
"=",
"iter",
"(",
"lambda",
":",
"response",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
",",
"b''",
")",
"else",
":",
"iterator",
"=",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"for",
"block",
"in",
"iterator",
":",
"size",
"+=",
"len",
"(",
"block",
")",
"# Update the progress bar",
"size_mb",
"+=",
"len",
"(",
"block",
")",
"if",
"size_mb",
">",
"unit_mb",
":",
"self",
".",
"_pbar_dl_size",
".",
"update",
"(",
"size_mb",
"//",
"unit_mb",
")",
"size_mb",
"%=",
"unit_mb",
"checksum",
".",
"update",
"(",
"block",
")",
"file_",
".",
"write",
"(",
"block",
")",
"self",
".",
"_pbar_url",
".",
"update",
"(",
"1",
")",
"return",
"checksum",
".",
"hexdigest",
"(",
")",
",",
"size"
] |
Synchronous version of `download` method.
|
[
"Synchronous",
"version",
"of",
"download",
"method",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L144-L208
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/diabetic_retinopathy_detection.py
|
_resize_image_if_necessary
|
def _resize_image_if_necessary(image_fobj, target_pixels=None):
"""Resize an image to have (roughly) the given number of target pixels.
Args:
image_fobj: File object containing the original image.
target_pixels: If given, number of pixels that the image must have.
Returns:
A file object.
"""
if target_pixels is None:
return image_fobj
cv2 = tfds.core.lazy_imports.cv2
# Decode image using OpenCV2.
image = cv2.imdecode(
np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3)
# Get image height and width.
height, width, _ = image.shape
actual_pixels = height * width
if actual_pixels > target_pixels:
factor = np.sqrt(target_pixels / actual_pixels)
image = cv2.resize(image, dsize=None, fx=factor, fy=factor)
# Encode the image with quality=72 and store it in a BytesIO object.
_, buff = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), 72])
return io.BytesIO(buff.tostring())
|
python
|
def _resize_image_if_necessary(image_fobj, target_pixels=None):
"""Resize an image to have (roughly) the given number of target pixels.
Args:
image_fobj: File object containing the original image.
target_pixels: If given, number of pixels that the image must have.
Returns:
A file object.
"""
if target_pixels is None:
return image_fobj
cv2 = tfds.core.lazy_imports.cv2
# Decode image using OpenCV2.
image = cv2.imdecode(
np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3)
# Get image height and width.
height, width, _ = image.shape
actual_pixels = height * width
if actual_pixels > target_pixels:
factor = np.sqrt(target_pixels / actual_pixels)
image = cv2.resize(image, dsize=None, fx=factor, fy=factor)
# Encode the image with quality=72 and store it in a BytesIO object.
_, buff = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), 72])
return io.BytesIO(buff.tostring())
|
[
"def",
"_resize_image_if_necessary",
"(",
"image_fobj",
",",
"target_pixels",
"=",
"None",
")",
":",
"if",
"target_pixels",
"is",
"None",
":",
"return",
"image_fobj",
"cv2",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
"# Decode image using OpenCV2.",
"image",
"=",
"cv2",
".",
"imdecode",
"(",
"np",
".",
"fromstring",
"(",
"image_fobj",
".",
"read",
"(",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
",",
"flags",
"=",
"3",
")",
"# Get image height and width.",
"height",
",",
"width",
",",
"_",
"=",
"image",
".",
"shape",
"actual_pixels",
"=",
"height",
"*",
"width",
"if",
"actual_pixels",
">",
"target_pixels",
":",
"factor",
"=",
"np",
".",
"sqrt",
"(",
"target_pixels",
"/",
"actual_pixels",
")",
"image",
"=",
"cv2",
".",
"resize",
"(",
"image",
",",
"dsize",
"=",
"None",
",",
"fx",
"=",
"factor",
",",
"fy",
"=",
"factor",
")",
"# Encode the image with quality=72 and store it in a BytesIO object.",
"_",
",",
"buff",
"=",
"cv2",
".",
"imencode",
"(",
"\".jpg\"",
",",
"image",
",",
"[",
"int",
"(",
"cv2",
".",
"IMWRITE_JPEG_QUALITY",
")",
",",
"72",
"]",
")",
"return",
"io",
".",
"BytesIO",
"(",
"buff",
".",
"tostring",
"(",
")",
")"
] |
Resize an image to have (roughly) the given number of target pixels.
Args:
image_fobj: File object containing the original image.
target_pixels: If given, number of pixels that the image must have.
Returns:
A file object.
|
[
"Resize",
"an",
"image",
"to",
"have",
"(",
"roughly",
")",
"the",
"given",
"number",
"of",
"target",
"pixels",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/diabetic_retinopathy_detection.py#L181-L206
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/diabetic_retinopathy_detection.py
|
DiabeticRetinopathyDetection._generate_examples
|
def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None):
"""Yields Example instances from given CSV.
Args:
images_dir_path: path to dir in which images are stored.
csv_path: optional, path to csv file with two columns: name of image and
label. If not provided, just scan image directory, don't set labels.
csv_usage: optional, subset of examples from the csv file to use based on
the "Usage" column from the csv.
"""
if csv_path:
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
data = [(row["image"], int(row["level"]))
for row in reader
if csv_usage is None or row["Usage"] == csv_usage]
else:
data = [(fname[:-5], -1)
for fname in tf.io.gfile.listdir(images_dir_path)
if fname.endswith(".jpeg")]
for name, label in data:
yield {
"name": name,
"image": _resize_image_if_necessary(
tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name),
mode="rb"),
target_pixels=self.builder_config.target_pixels),
"label": label,
}
|
python
|
def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None):
"""Yields Example instances from given CSV.
Args:
images_dir_path: path to dir in which images are stored.
csv_path: optional, path to csv file with two columns: name of image and
label. If not provided, just scan image directory, don't set labels.
csv_usage: optional, subset of examples from the csv file to use based on
the "Usage" column from the csv.
"""
if csv_path:
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
data = [(row["image"], int(row["level"]))
for row in reader
if csv_usage is None or row["Usage"] == csv_usage]
else:
data = [(fname[:-5], -1)
for fname in tf.io.gfile.listdir(images_dir_path)
if fname.endswith(".jpeg")]
for name, label in data:
yield {
"name": name,
"image": _resize_image_if_necessary(
tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name),
mode="rb"),
target_pixels=self.builder_config.target_pixels),
"label": label,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"images_dir_path",
",",
"csv_path",
"=",
"None",
",",
"csv_usage",
"=",
"None",
")",
":",
"if",
"csv_path",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"csv_path",
")",
"as",
"csv_f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"csv_f",
")",
"data",
"=",
"[",
"(",
"row",
"[",
"\"image\"",
"]",
",",
"int",
"(",
"row",
"[",
"\"level\"",
"]",
")",
")",
"for",
"row",
"in",
"reader",
"if",
"csv_usage",
"is",
"None",
"or",
"row",
"[",
"\"Usage\"",
"]",
"==",
"csv_usage",
"]",
"else",
":",
"data",
"=",
"[",
"(",
"fname",
"[",
":",
"-",
"5",
"]",
",",
"-",
"1",
")",
"for",
"fname",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"images_dir_path",
")",
"if",
"fname",
".",
"endswith",
"(",
"\".jpeg\"",
")",
"]",
"for",
"name",
",",
"label",
"in",
"data",
":",
"yield",
"{",
"\"name\"",
":",
"name",
",",
"\"image\"",
":",
"_resize_image_if_necessary",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"\"%s/%s.jpeg\"",
"%",
"(",
"images_dir_path",
",",
"name",
")",
",",
"mode",
"=",
"\"rb\"",
")",
",",
"target_pixels",
"=",
"self",
".",
"builder_config",
".",
"target_pixels",
")",
",",
"\"label\"",
":",
"label",
",",
"}"
] |
Yields Example instances from given CSV.
Args:
images_dir_path: path to dir in which images are stored.
csv_path: optional, path to csv file with two columns: name of image and
label. If not provided, just scan image directory, don't set labels.
csv_usage: optional, subset of examples from the csv file to use based on
the "Usage" column from the csv.
|
[
"Yields",
"Example",
"instances",
"from",
"given",
"CSV",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/diabetic_retinopathy_detection.py#L150-L178
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_builder.py
|
FileAdapterBuilder._slice_split_info_to_instruction_dicts
|
def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info):
"""Return the list of files and reading mask of the files to read."""
instruction_dicts = []
for sliced_split_info in list_sliced_split_info:
mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value)
# Compute filenames from the given split
filepaths = list(sorted(self._build_split_filenames(
split_info_list=[sliced_split_info.split_info],
)))
# Compute the offsets
if sliced_split_info.split_info.num_examples:
shard_id2num_examples = splits_lib.get_shard_id2num_examples(
sliced_split_info.split_info.num_shards,
sliced_split_info.split_info.num_examples,
)
mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples)
else:
logging.warning(
"Statistics not present in the dataset. TFDS is not able to load "
"the total number of examples, so using the subsplit API may not "
"provide precise subsplits."
)
mask_offsets = [0] * len(filepaths)
for filepath, mask_offset in zip(filepaths, mask_offsets):
instruction_dicts.append({
"filepath": filepath,
"mask": mask,
"mask_offset": mask_offset,
})
return instruction_dicts
|
python
|
def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info):
"""Return the list of files and reading mask of the files to read."""
instruction_dicts = []
for sliced_split_info in list_sliced_split_info:
mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value)
# Compute filenames from the given split
filepaths = list(sorted(self._build_split_filenames(
split_info_list=[sliced_split_info.split_info],
)))
# Compute the offsets
if sliced_split_info.split_info.num_examples:
shard_id2num_examples = splits_lib.get_shard_id2num_examples(
sliced_split_info.split_info.num_shards,
sliced_split_info.split_info.num_examples,
)
mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples)
else:
logging.warning(
"Statistics not present in the dataset. TFDS is not able to load "
"the total number of examples, so using the subsplit API may not "
"provide precise subsplits."
)
mask_offsets = [0] * len(filepaths)
for filepath, mask_offset in zip(filepaths, mask_offsets):
instruction_dicts.append({
"filepath": filepath,
"mask": mask,
"mask_offset": mask_offset,
})
return instruction_dicts
|
[
"def",
"_slice_split_info_to_instruction_dicts",
"(",
"self",
",",
"list_sliced_split_info",
")",
":",
"instruction_dicts",
"=",
"[",
"]",
"for",
"sliced_split_info",
"in",
"list_sliced_split_info",
":",
"mask",
"=",
"splits_lib",
".",
"slice_to_percent_mask",
"(",
"sliced_split_info",
".",
"slice_value",
")",
"# Compute filenames from the given split",
"filepaths",
"=",
"list",
"(",
"sorted",
"(",
"self",
".",
"_build_split_filenames",
"(",
"split_info_list",
"=",
"[",
"sliced_split_info",
".",
"split_info",
"]",
",",
")",
")",
")",
"# Compute the offsets",
"if",
"sliced_split_info",
".",
"split_info",
".",
"num_examples",
":",
"shard_id2num_examples",
"=",
"splits_lib",
".",
"get_shard_id2num_examples",
"(",
"sliced_split_info",
".",
"split_info",
".",
"num_shards",
",",
"sliced_split_info",
".",
"split_info",
".",
"num_examples",
",",
")",
"mask_offsets",
"=",
"splits_lib",
".",
"compute_mask_offsets",
"(",
"shard_id2num_examples",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"\"Statistics not present in the dataset. TFDS is not able to load \"",
"\"the total number of examples, so using the subsplit API may not \"",
"\"provide precise subsplits.\"",
")",
"mask_offsets",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"filepaths",
")",
"for",
"filepath",
",",
"mask_offset",
"in",
"zip",
"(",
"filepaths",
",",
"mask_offsets",
")",
":",
"instruction_dicts",
".",
"append",
"(",
"{",
"\"filepath\"",
":",
"filepath",
",",
"\"mask\"",
":",
"mask",
",",
"\"mask_offset\"",
":",
"mask_offset",
",",
"}",
")",
"return",
"instruction_dicts"
] |
Return the list of files and reading mask of the files to read.
|
[
"Return",
"the",
"list",
"of",
"files",
"and",
"reading",
"mask",
"of",
"the",
"files",
"to",
"read",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_builder.py#L707-L739
|
train
|
tensorflow/datasets
|
tensorflow_datasets/core/dataset_builder.py
|
FileAdapterBuilder._build_split_filenames
|
def _build_split_filenames(self, split_info_list):
"""Construct the split filenames associated with the split info.
The filenames correspond to the pre-processed datasets files present in
the root directory of the dataset.
Args:
split_info_list: (list[SplitInfo]) List of split from which generate the
filenames
Returns:
filenames: (list[str]) The list of filenames path corresponding to the
split info object
"""
filenames = []
for split_info in split_info_list:
filenames.extend(naming.filepaths_for_dataset_split(
dataset_name=self.name,
split=split_info.name,
num_shards=split_info.num_shards,
data_dir=self._data_dir,
filetype_suffix=self._file_format_adapter.filetype_suffix,
))
return filenames
|
python
|
def _build_split_filenames(self, split_info_list):
"""Construct the split filenames associated with the split info.
The filenames correspond to the pre-processed datasets files present in
the root directory of the dataset.
Args:
split_info_list: (list[SplitInfo]) List of split from which generate the
filenames
Returns:
filenames: (list[str]) The list of filenames path corresponding to the
split info object
"""
filenames = []
for split_info in split_info_list:
filenames.extend(naming.filepaths_for_dataset_split(
dataset_name=self.name,
split=split_info.name,
num_shards=split_info.num_shards,
data_dir=self._data_dir,
filetype_suffix=self._file_format_adapter.filetype_suffix,
))
return filenames
|
[
"def",
"_build_split_filenames",
"(",
"self",
",",
"split_info_list",
")",
":",
"filenames",
"=",
"[",
"]",
"for",
"split_info",
"in",
"split_info_list",
":",
"filenames",
".",
"extend",
"(",
"naming",
".",
"filepaths_for_dataset_split",
"(",
"dataset_name",
"=",
"self",
".",
"name",
",",
"split",
"=",
"split_info",
".",
"name",
",",
"num_shards",
"=",
"split_info",
".",
"num_shards",
",",
"data_dir",
"=",
"self",
".",
"_data_dir",
",",
"filetype_suffix",
"=",
"self",
".",
"_file_format_adapter",
".",
"filetype_suffix",
",",
")",
")",
"return",
"filenames"
] |
Construct the split filenames associated with the split info.
The filenames correspond to the pre-processed datasets files present in
the root directory of the dataset.
Args:
split_info_list: (list[SplitInfo]) List of split from which generate the
filenames
Returns:
filenames: (list[str]) The list of filenames path corresponding to the
split info object
|
[
"Construct",
"the",
"split",
"filenames",
"associated",
"with",
"the",
"split",
"info",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_builder.py#L741-L765
|
train
|
tensorflow/datasets
|
tensorflow_datasets/video/moving_mnist.py
|
MovingMnist._generate_examples
|
def _generate_examples(self, data_path):
"""Generate MovingMnist sequences.
Args:
data_path (str): Path to the data file
Yields:
20 x 64 x 64 x 1 uint8 numpy arrays
"""
with tf.io.gfile.GFile(data_path, "rb") as fp:
images = np.load(fp)
images = np.transpose(images, (1, 0, 2, 3))
images = np.expand_dims(images, axis=-1)
for sequence in images:
yield dict(image_sequence=sequence)
|
python
|
def _generate_examples(self, data_path):
"""Generate MovingMnist sequences.
Args:
data_path (str): Path to the data file
Yields:
20 x 64 x 64 x 1 uint8 numpy arrays
"""
with tf.io.gfile.GFile(data_path, "rb") as fp:
images = np.load(fp)
images = np.transpose(images, (1, 0, 2, 3))
images = np.expand_dims(images, axis=-1)
for sequence in images:
yield dict(image_sequence=sequence)
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"data_path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"data_path",
",",
"\"rb\"",
")",
"as",
"fp",
":",
"images",
"=",
"np",
".",
"load",
"(",
"fp",
")",
"images",
"=",
"np",
".",
"transpose",
"(",
"images",
",",
"(",
"1",
",",
"0",
",",
"2",
",",
"3",
")",
")",
"images",
"=",
"np",
".",
"expand_dims",
"(",
"images",
",",
"axis",
"=",
"-",
"1",
")",
"for",
"sequence",
"in",
"images",
":",
"yield",
"dict",
"(",
"image_sequence",
"=",
"sequence",
")"
] |
Generate MovingMnist sequences.
Args:
data_path (str): Path to the data file
Yields:
20 x 64 x 64 x 1 uint8 numpy arrays
|
[
"Generate",
"MovingMnist",
"sequences",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_mnist.py#L85-L99
|
train
|
tensorflow/datasets
|
tensorflow_datasets/video/starcraft.py
|
StarcraftVideo._parse_single_video
|
def _parse_single_video(self, example_proto):
"""Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
"""
context_features = {
"game_duration_loops": tf.io.FixedLenFeature([1], tf.int64),
"game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32),
"n_steps": tf.io.FixedLenFeature([1], tf.int64),
"screen_size": tf.io.FixedLenFeature([2], tf.int64),
}
sequence_features = {
"rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string),
}
_, seq_feat = tf.io.parse_single_sequence_example(
example_proto,
context_features=context_features,
sequence_features=sequence_features)
video_frames = tf.map_fn(
tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8)
return video_frames
|
python
|
def _parse_single_video(self, example_proto):
"""Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
"""
context_features = {
"game_duration_loops": tf.io.FixedLenFeature([1], tf.int64),
"game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32),
"n_steps": tf.io.FixedLenFeature([1], tf.int64),
"screen_size": tf.io.FixedLenFeature([2], tf.int64),
}
sequence_features = {
"rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string),
}
_, seq_feat = tf.io.parse_single_sequence_example(
example_proto,
context_features=context_features,
sequence_features=sequence_features)
video_frames = tf.map_fn(
tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8)
return video_frames
|
[
"def",
"_parse_single_video",
"(",
"self",
",",
"example_proto",
")",
":",
"context_features",
"=",
"{",
"\"game_duration_loops\"",
":",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"[",
"1",
"]",
",",
"tf",
".",
"int64",
")",
",",
"\"game_duration_seconds\"",
":",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"[",
"1",
"]",
",",
"tf",
".",
"float32",
")",
",",
"\"n_steps\"",
":",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"[",
"1",
"]",
",",
"tf",
".",
"int64",
")",
",",
"\"screen_size\"",
":",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"[",
"2",
"]",
",",
"tf",
".",
"int64",
")",
",",
"}",
"sequence_features",
"=",
"{",
"\"rgb_screen\"",
":",
"tf",
".",
"io",
".",
"FixedLenSequenceFeature",
"(",
"[",
"]",
",",
"tf",
".",
"string",
")",
",",
"}",
"_",
",",
"seq_feat",
"=",
"tf",
".",
"io",
".",
"parse_single_sequence_example",
"(",
"example_proto",
",",
"context_features",
"=",
"context_features",
",",
"sequence_features",
"=",
"sequence_features",
")",
"video_frames",
"=",
"tf",
".",
"map_fn",
"(",
"tf",
".",
"image",
".",
"decode_png",
",",
"seq_feat",
"[",
"\"rgb_screen\"",
"]",
",",
"dtype",
"=",
"tf",
".",
"uint8",
")",
"return",
"video_frames"
] |
Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
|
[
"Parses",
"single",
"video",
"from",
"the",
"input",
"tfrecords",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/starcraft.py#L181-L208
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/dsprites.py
|
Dsprites._generate_examples
|
def _generate_examples(self, filepath):
"""Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file is >100x slower and the data set is small (26.7MB). Hence, we first
# load everything into memory before yielding the samples.
image_array, class_array, values_array = _load_data(filepath)
for image, classes, values in moves.zip(image_array, class_array,
values_array):
yield dict(
image=np.expand_dims(image, -1),
label_shape=classes[1],
label_scale=classes[2],
label_orientation=classes[3],
label_x_position=classes[4],
label_y_position=classes[5],
value_shape=values[1],
value_scale=values[2],
value_orientation=values[3],
value_x_position=values[4],
value_y_position=values[5])
|
python
|
def _generate_examples(self, filepath):
"""Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file is >100x slower and the data set is small (26.7MB). Hence, we first
# load everything into memory before yielding the samples.
image_array, class_array, values_array = _load_data(filepath)
for image, classes, values in moves.zip(image_array, class_array,
values_array):
yield dict(
image=np.expand_dims(image, -1),
label_shape=classes[1],
label_scale=classes[2],
label_orientation=classes[3],
label_x_position=classes[4],
label_y_position=classes[5],
value_shape=values[1],
value_scale=values[2],
value_orientation=values[3],
value_x_position=values[4],
value_y_position=values[5])
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"# Simultaneously iterating through the different data sets in the hdf5",
"# file is >100x slower and the data set is small (26.7MB). Hence, we first",
"# load everything into memory before yielding the samples.",
"image_array",
",",
"class_array",
",",
"values_array",
"=",
"_load_data",
"(",
"filepath",
")",
"for",
"image",
",",
"classes",
",",
"values",
"in",
"moves",
".",
"zip",
"(",
"image_array",
",",
"class_array",
",",
"values_array",
")",
":",
"yield",
"dict",
"(",
"image",
"=",
"np",
".",
"expand_dims",
"(",
"image",
",",
"-",
"1",
")",
",",
"label_shape",
"=",
"classes",
"[",
"1",
"]",
",",
"label_scale",
"=",
"classes",
"[",
"2",
"]",
",",
"label_orientation",
"=",
"classes",
"[",
"3",
"]",
",",
"label_x_position",
"=",
"classes",
"[",
"4",
"]",
",",
"label_y_position",
"=",
"classes",
"[",
"5",
"]",
",",
"value_shape",
"=",
"values",
"[",
"1",
"]",
",",
"value_scale",
"=",
"values",
"[",
"2",
"]",
",",
"value_orientation",
"=",
"values",
"[",
"3",
"]",
",",
"value_x_position",
"=",
"values",
"[",
"4",
"]",
",",
"value_y_position",
"=",
"values",
"[",
"5",
"]",
")"
] |
Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values.
|
[
"Generates",
"examples",
"for",
"the",
"dSprites",
"data",
"set",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/dsprites.py#L117-L143
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/oxford_iiit_pet.py
|
OxfordIIITPet._split_generators
|
def _split_generators(self, dl_manager):
"""Returns splits."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=os.path.join(_BASE_URL, "images.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR),
"annotations": tfds.download.Resource(
url=os.path.join(_BASE_URL, "annotations.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR)
})
images_path_dir = os.path.join(dl_paths["images"], "images")
annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations")
# Setup train and test splits
train_split = tfds.core.SplitGenerator(
name="train",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"trainval.txt"),
},
)
test_split = tfds.core.SplitGenerator(
name="test",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"test.txt")
},
)
return [train_split, test_split]
|
python
|
def _split_generators(self, dl_manager):
"""Returns splits."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=os.path.join(_BASE_URL, "images.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR),
"annotations": tfds.download.Resource(
url=os.path.join(_BASE_URL, "annotations.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR)
})
images_path_dir = os.path.join(dl_paths["images"], "images")
annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations")
# Setup train and test splits
train_split = tfds.core.SplitGenerator(
name="train",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"trainval.txt"),
},
)
test_split = tfds.core.SplitGenerator(
name="test",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"test.txt")
},
)
return [train_split, test_split]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"# Download images and annotations that come in separate archives.",
"# Note, that the extension of archives is .tar.gz even though the actual",
"# archives format is uncompressed tar.",
"dl_paths",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"{",
"\"images\"",
":",
"tfds",
".",
"download",
".",
"Resource",
"(",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_BASE_URL",
",",
"\"images.tar.gz\"",
")",
",",
"extract_method",
"=",
"tfds",
".",
"download",
".",
"ExtractMethod",
".",
"TAR",
")",
",",
"\"annotations\"",
":",
"tfds",
".",
"download",
".",
"Resource",
"(",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_BASE_URL",
",",
"\"annotations.tar.gz\"",
")",
",",
"extract_method",
"=",
"tfds",
".",
"download",
".",
"ExtractMethod",
".",
"TAR",
")",
"}",
")",
"images_path_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_paths",
"[",
"\"images\"",
"]",
",",
"\"images\"",
")",
"annotations_path_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_paths",
"[",
"\"annotations\"",
"]",
",",
"\"annotations\"",
")",
"# Setup train and test splits",
"train_split",
"=",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"\"train\"",
",",
"num_shards",
"=",
"_NUM_SHARDS",
",",
"gen_kwargs",
"=",
"{",
"\"images_dir_path\"",
":",
"images_path_dir",
",",
"\"images_list_file\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"annotations_path_dir",
",",
"\"trainval.txt\"",
")",
",",
"}",
",",
")",
"test_split",
"=",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"\"test\"",
",",
"num_shards",
"=",
"_NUM_SHARDS",
",",
"gen_kwargs",
"=",
"{",
"\"images_dir_path\"",
":",
"images_path_dir",
",",
"\"images_list_file\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"annotations_path_dir",
",",
"\"test.txt\"",
")",
"}",
",",
")",
"return",
"[",
"train_split",
",",
"test_split",
"]"
] |
Returns splits.
|
[
"Returns",
"splits",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/oxford_iiit_pet.py#L65-L102
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/open_images.py
|
_load_objects
|
def _load_objects(csv_paths, csv_positions, prefix):
"""Returns objects listed within given CSV files."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_paths, csv_positions, prefix)
objects = collections.defaultdict(list)
for i, labels_path in enumerate(csv_paths):
with tf.io.gfile.GFile(labels_path) as csv_f:
if csv_positions[i] > 0:
csv_f.seek(csv_positions[i])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for image_id, source, label, confidence in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[i] = csv_f.tell()
image_id = int(image_id, 16)
current_obj = _Object(label, int(float(confidence) * 10), source)
objects[image_id].append(current_obj)
return dict(objects)
|
python
|
def _load_objects(csv_paths, csv_positions, prefix):
"""Returns objects listed within given CSV files."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_paths, csv_positions, prefix)
objects = collections.defaultdict(list)
for i, labels_path in enumerate(csv_paths):
with tf.io.gfile.GFile(labels_path) as csv_f:
if csv_positions[i] > 0:
csv_f.seek(csv_positions[i])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for image_id, source, label, confidence in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[i] = csv_f.tell()
image_id = int(image_id, 16)
current_obj = _Object(label, int(float(confidence) * 10), source)
objects[image_id].append(current_obj)
return dict(objects)
|
[
"def",
"_load_objects",
"(",
"csv_paths",
",",
"csv_positions",
",",
"prefix",
")",
":",
"logging",
".",
"info",
"(",
"'Loading CSVs %s from positions %s with prefix %s'",
",",
"csv_paths",
",",
"csv_positions",
",",
"prefix",
")",
"objects",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"i",
",",
"labels_path",
"in",
"enumerate",
"(",
"csv_paths",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_path",
")",
"as",
"csv_f",
":",
"if",
"csv_positions",
"[",
"i",
"]",
">",
"0",
":",
"csv_f",
".",
"seek",
"(",
"csv_positions",
"[",
"i",
"]",
")",
"else",
":",
"csv_f",
".",
"readline",
"(",
")",
"# Drop headers",
"reader",
"=",
"csv",
".",
"reader",
"(",
"csv_f",
")",
"for",
"image_id",
",",
"source",
",",
"label",
",",
"confidence",
"in",
"reader",
":",
"if",
"prefix",
"and",
"image_id",
"[",
"0",
"]",
"!=",
"prefix",
":",
"break",
"csv_positions",
"[",
"i",
"]",
"=",
"csv_f",
".",
"tell",
"(",
")",
"image_id",
"=",
"int",
"(",
"image_id",
",",
"16",
")",
"current_obj",
"=",
"_Object",
"(",
"label",
",",
"int",
"(",
"float",
"(",
"confidence",
")",
"*",
"10",
")",
",",
"source",
")",
"objects",
"[",
"image_id",
"]",
".",
"append",
"(",
"current_obj",
")",
"return",
"dict",
"(",
"objects",
")"
] |
Returns objects listed within given CSV files.
|
[
"Returns",
"objects",
"listed",
"within",
"given",
"CSV",
"files",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L322-L341
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/open_images.py
|
_load_bboxes
|
def _load_bboxes(csv_path, csv_positions, prefix):
"""Returns bounded boxes listed within given CSV file."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_path, csv_positions, prefix)
boxes = collections.defaultdict(list)
with tf.io.gfile.GFile(csv_path) as csv_f:
if csv_positions[0] > 0:
csv_f.seek(csv_positions[0])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for (image_id, source, label, confidence, xmin, xmax, ymin, ymax,
is_occluded, is_truncated, is_group_of, is_depiction, is_inside,
) in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[0] = csv_f.tell()
image_id = int(image_id, 16)
del confidence # always 1 in bounding boxes.
current_row = _Bbox(
label, source, tfds.features.BBox(
float(ymin), float(xmin), float(ymax), float(xmax)),
int(is_occluded), int(is_truncated),
int(is_group_of), int(is_depiction), int(is_inside))
boxes[image_id].append(current_row)
return dict(boxes)
|
python
|
def _load_bboxes(csv_path, csv_positions, prefix):
"""Returns bounded boxes listed within given CSV file."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_path, csv_positions, prefix)
boxes = collections.defaultdict(list)
with tf.io.gfile.GFile(csv_path) as csv_f:
if csv_positions[0] > 0:
csv_f.seek(csv_positions[0])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for (image_id, source, label, confidence, xmin, xmax, ymin, ymax,
is_occluded, is_truncated, is_group_of, is_depiction, is_inside,
) in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[0] = csv_f.tell()
image_id = int(image_id, 16)
del confidence # always 1 in bounding boxes.
current_row = _Bbox(
label, source, tfds.features.BBox(
float(ymin), float(xmin), float(ymax), float(xmax)),
int(is_occluded), int(is_truncated),
int(is_group_of), int(is_depiction), int(is_inside))
boxes[image_id].append(current_row)
return dict(boxes)
|
[
"def",
"_load_bboxes",
"(",
"csv_path",
",",
"csv_positions",
",",
"prefix",
")",
":",
"logging",
".",
"info",
"(",
"'Loading CSVs %s from positions %s with prefix %s'",
",",
"csv_path",
",",
"csv_positions",
",",
"prefix",
")",
"boxes",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"csv_path",
")",
"as",
"csv_f",
":",
"if",
"csv_positions",
"[",
"0",
"]",
">",
"0",
":",
"csv_f",
".",
"seek",
"(",
"csv_positions",
"[",
"0",
"]",
")",
"else",
":",
"csv_f",
".",
"readline",
"(",
")",
"# Drop headers",
"reader",
"=",
"csv",
".",
"reader",
"(",
"csv_f",
")",
"for",
"(",
"image_id",
",",
"source",
",",
"label",
",",
"confidence",
",",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
",",
"is_occluded",
",",
"is_truncated",
",",
"is_group_of",
",",
"is_depiction",
",",
"is_inside",
",",
")",
"in",
"reader",
":",
"if",
"prefix",
"and",
"image_id",
"[",
"0",
"]",
"!=",
"prefix",
":",
"break",
"csv_positions",
"[",
"0",
"]",
"=",
"csv_f",
".",
"tell",
"(",
")",
"image_id",
"=",
"int",
"(",
"image_id",
",",
"16",
")",
"del",
"confidence",
"# always 1 in bounding boxes.",
"current_row",
"=",
"_Bbox",
"(",
"label",
",",
"source",
",",
"tfds",
".",
"features",
".",
"BBox",
"(",
"float",
"(",
"ymin",
")",
",",
"float",
"(",
"xmin",
")",
",",
"float",
"(",
"ymax",
")",
",",
"float",
"(",
"xmax",
")",
")",
",",
"int",
"(",
"is_occluded",
")",
",",
"int",
"(",
"is_truncated",
")",
",",
"int",
"(",
"is_group_of",
")",
",",
"int",
"(",
"is_depiction",
")",
",",
"int",
"(",
"is_inside",
")",
")",
"boxes",
"[",
"image_id",
"]",
".",
"append",
"(",
"current_row",
")",
"return",
"dict",
"(",
"boxes",
")"
] |
Returns bounded boxes listed within given CSV file.
|
[
"Returns",
"bounded",
"boxes",
"listed",
"within",
"given",
"CSV",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L344-L369
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/open_images.py
|
OpenImagesV4._split_generators
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
paths = dl_manager.download_and_extract(_URLS)
# Load labels from CSVs:
def load(names):
csv_positions = [0] * len(names)
return functools.partial(_load_objects, [paths[name] for name in names],
csv_positions)
train_objects = load(['train_human_labels', 'train_machine_labels'])
test_objects = load(['test_human_labels', 'test_machine_labels'])
validation_objects = load(['validation_human_labels',
'validation_machine_labels'])
def load_boxes(name):
csv_positions = [0]
return functools.partial(_load_bboxes, paths[name], csv_positions)
train_bbox = load_boxes('train-annotations-bbox')
test_bbox = load_boxes('test-annotations-bbox')
validation_bbox = load_boxes('validation-annotations-bbox')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=512,
gen_kwargs=dict(archive_paths=paths['train_images'],
objects_getter=train_objects,
bboxes_getter=train_bbox,
prefixes='0123456789abcdef'),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=36,
gen_kwargs=dict(archive_paths=[paths['test_images']],
objects_getter=test_objects,
bboxes_getter=test_bbox),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=12,
gen_kwargs=dict(archive_paths=[paths['validation_images']],
objects_getter=validation_objects,
bboxes_getter=validation_bbox),
),
]
|
python
|
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
paths = dl_manager.download_and_extract(_URLS)
# Load labels from CSVs:
def load(names):
csv_positions = [0] * len(names)
return functools.partial(_load_objects, [paths[name] for name in names],
csv_positions)
train_objects = load(['train_human_labels', 'train_machine_labels'])
test_objects = load(['test_human_labels', 'test_machine_labels'])
validation_objects = load(['validation_human_labels',
'validation_machine_labels'])
def load_boxes(name):
csv_positions = [0]
return functools.partial(_load_bboxes, paths[name], csv_positions)
train_bbox = load_boxes('train-annotations-bbox')
test_bbox = load_boxes('test-annotations-bbox')
validation_bbox = load_boxes('validation-annotations-bbox')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=512,
gen_kwargs=dict(archive_paths=paths['train_images'],
objects_getter=train_objects,
bboxes_getter=train_bbox,
prefixes='0123456789abcdef'),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=36,
gen_kwargs=dict(archive_paths=[paths['test_images']],
objects_getter=test_objects,
bboxes_getter=test_bbox),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=12,
gen_kwargs=dict(archive_paths=[paths['validation_images']],
objects_getter=validation_objects,
bboxes_getter=validation_bbox),
),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"paths",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"_URLS",
")",
"# Load labels from CSVs:",
"def",
"load",
"(",
"names",
")",
":",
"csv_positions",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"names",
")",
"return",
"functools",
".",
"partial",
"(",
"_load_objects",
",",
"[",
"paths",
"[",
"name",
"]",
"for",
"name",
"in",
"names",
"]",
",",
"csv_positions",
")",
"train_objects",
"=",
"load",
"(",
"[",
"'train_human_labels'",
",",
"'train_machine_labels'",
"]",
")",
"test_objects",
"=",
"load",
"(",
"[",
"'test_human_labels'",
",",
"'test_machine_labels'",
"]",
")",
"validation_objects",
"=",
"load",
"(",
"[",
"'validation_human_labels'",
",",
"'validation_machine_labels'",
"]",
")",
"def",
"load_boxes",
"(",
"name",
")",
":",
"csv_positions",
"=",
"[",
"0",
"]",
"return",
"functools",
".",
"partial",
"(",
"_load_bboxes",
",",
"paths",
"[",
"name",
"]",
",",
"csv_positions",
")",
"train_bbox",
"=",
"load_boxes",
"(",
"'train-annotations-bbox'",
")",
"test_bbox",
"=",
"load_boxes",
"(",
"'test-annotations-bbox'",
")",
"validation_bbox",
"=",
"load_boxes",
"(",
"'validation-annotations-bbox'",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
",",
"num_shards",
"=",
"512",
",",
"gen_kwargs",
"=",
"dict",
"(",
"archive_paths",
"=",
"paths",
"[",
"'train_images'",
"]",
",",
"objects_getter",
"=",
"train_objects",
",",
"bboxes_getter",
"=",
"train_bbox",
",",
"prefixes",
"=",
"'0123456789abcdef'",
")",
",",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TEST",
",",
"num_shards",
"=",
"36",
",",
"gen_kwargs",
"=",
"dict",
"(",
"archive_paths",
"=",
"[",
"paths",
"[",
"'test_images'",
"]",
"]",
",",
"objects_getter",
"=",
"test_objects",
",",
"bboxes_getter",
"=",
"test_bbox",
")",
",",
")",
",",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"VALIDATION",
",",
"num_shards",
"=",
"12",
",",
"gen_kwargs",
"=",
"dict",
"(",
"archive_paths",
"=",
"[",
"paths",
"[",
"'validation_images'",
"]",
"]",
",",
"objects_getter",
"=",
"validation_objects",
",",
"bboxes_getter",
"=",
"validation_bbox",
")",
",",
")",
",",
"]"
] |
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L221-L262
|
train
|
tensorflow/datasets
|
tensorflow_datasets/image/open_images.py
|
OpenImagesV4._generate_examples
|
def _generate_examples(self, archive_paths, objects_getter, bboxes_getter,
prefixes=None):
"""Yields examples."""
trainable_classes = set(
self.info.features['objects_trainable']['label'].names)
for i, archive_path in enumerate(archive_paths):
prefix = prefixes[i] if prefixes else None
objects = objects_getter(prefix)
bboxes = bboxes_getter(prefix)
logging.info('Opening archive %s ...', archive_path)
archive = tfds.download.iter_archive(
archive_path, tfds.download.ExtractMethod.TAR_STREAM)
for fpath, fobj in archive:
fname = os.path.basename(fpath)
image_id = int(os.path.splitext(fname)[0], 16)
image_objects = [obj._asdict() for obj in objects.get(image_id, [])]
image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])]
image_objects_trainable = [
obj for obj in image_objects if obj['label'] in trainable_classes
]
yield {
'image': _resize_image_if_necessary(
fobj, target_pixels=self.builder_config.target_pixels),
'image/filename': fname,
'objects': image_objects,
'objects_trainable': image_objects_trainable,
'bobjects': image_bboxes,
}
|
python
|
def _generate_examples(self, archive_paths, objects_getter, bboxes_getter,
prefixes=None):
"""Yields examples."""
trainable_classes = set(
self.info.features['objects_trainable']['label'].names)
for i, archive_path in enumerate(archive_paths):
prefix = prefixes[i] if prefixes else None
objects = objects_getter(prefix)
bboxes = bboxes_getter(prefix)
logging.info('Opening archive %s ...', archive_path)
archive = tfds.download.iter_archive(
archive_path, tfds.download.ExtractMethod.TAR_STREAM)
for fpath, fobj in archive:
fname = os.path.basename(fpath)
image_id = int(os.path.splitext(fname)[0], 16)
image_objects = [obj._asdict() for obj in objects.get(image_id, [])]
image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])]
image_objects_trainable = [
obj for obj in image_objects if obj['label'] in trainable_classes
]
yield {
'image': _resize_image_if_necessary(
fobj, target_pixels=self.builder_config.target_pixels),
'image/filename': fname,
'objects': image_objects,
'objects_trainable': image_objects_trainable,
'bobjects': image_bboxes,
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"archive_paths",
",",
"objects_getter",
",",
"bboxes_getter",
",",
"prefixes",
"=",
"None",
")",
":",
"trainable_classes",
"=",
"set",
"(",
"self",
".",
"info",
".",
"features",
"[",
"'objects_trainable'",
"]",
"[",
"'label'",
"]",
".",
"names",
")",
"for",
"i",
",",
"archive_path",
"in",
"enumerate",
"(",
"archive_paths",
")",
":",
"prefix",
"=",
"prefixes",
"[",
"i",
"]",
"if",
"prefixes",
"else",
"None",
"objects",
"=",
"objects_getter",
"(",
"prefix",
")",
"bboxes",
"=",
"bboxes_getter",
"(",
"prefix",
")",
"logging",
".",
"info",
"(",
"'Opening archive %s ...'",
",",
"archive_path",
")",
"archive",
"=",
"tfds",
".",
"download",
".",
"iter_archive",
"(",
"archive_path",
",",
"tfds",
".",
"download",
".",
"ExtractMethod",
".",
"TAR_STREAM",
")",
"for",
"fpath",
",",
"fobj",
"in",
"archive",
":",
"fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fpath",
")",
"image_id",
"=",
"int",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"[",
"0",
"]",
",",
"16",
")",
"image_objects",
"=",
"[",
"obj",
".",
"_asdict",
"(",
")",
"for",
"obj",
"in",
"objects",
".",
"get",
"(",
"image_id",
",",
"[",
"]",
")",
"]",
"image_bboxes",
"=",
"[",
"bbox",
".",
"_asdict",
"(",
")",
"for",
"bbox",
"in",
"bboxes",
".",
"get",
"(",
"image_id",
",",
"[",
"]",
")",
"]",
"image_objects_trainable",
"=",
"[",
"obj",
"for",
"obj",
"in",
"image_objects",
"if",
"obj",
"[",
"'label'",
"]",
"in",
"trainable_classes",
"]",
"yield",
"{",
"'image'",
":",
"_resize_image_if_necessary",
"(",
"fobj",
",",
"target_pixels",
"=",
"self",
".",
"builder_config",
".",
"target_pixels",
")",
",",
"'image/filename'",
":",
"fname",
",",
"'objects'",
":",
"image_objects",
",",
"'objects_trainable'",
":",
"image_objects_trainable",
",",
"'bobjects'",
":",
"image_bboxes",
",",
"}"
] |
Yields examples.
|
[
"Yields",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L264-L291
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/imdb.py
|
IMDBReviews._generate_examples
|
def _generate_examples(self, archive, directory):
"""Generate IMDB examples."""
reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", ""))
for path, imdb_f in archive:
res = reg.match(path)
if not res:
continue
text = imdb_f.read().strip()
yield {
"text": text,
"label": res.groupdict()["label"],
}
|
python
|
def _generate_examples(self, archive, directory):
"""Generate IMDB examples."""
reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", ""))
for path, imdb_f in archive:
res = reg.match(path)
if not res:
continue
text = imdb_f.read().strip()
yield {
"text": text,
"label": res.groupdict()["label"],
}
|
[
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
",",
"directory",
")",
":",
"reg",
"=",
"re",
".",
"compile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"^%s\"",
"%",
"directory",
",",
"\"(?P<label>neg|pos)\"",
",",
"\"\"",
")",
")",
"for",
"path",
",",
"imdb_f",
"in",
"archive",
":",
"res",
"=",
"reg",
".",
"match",
"(",
"path",
")",
"if",
"not",
"res",
":",
"continue",
"text",
"=",
"imdb_f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"yield",
"{",
"\"text\"",
":",
"text",
",",
"\"label\"",
":",
"res",
".",
"groupdict",
"(",
")",
"[",
"\"label\"",
"]",
",",
"}"
] |
Generate IMDB examples.
|
[
"Generate",
"IMDB",
"examples",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/imdb.py#L146-L157
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/cnn_dailymail.py
|
_get_url_hashes
|
def _get_url_hashes(path):
"""Get hashes of urls in file."""
urls = _read_text_file(path)
def url_hash(u):
h = hashlib.sha1()
try:
u = u.encode('utf-8')
except UnicodeDecodeError:
logging.error('Cannot hash url: %s', u)
h.update(u)
return h.hexdigest()
return {url_hash(u): True for u in urls}
|
python
|
def _get_url_hashes(path):
"""Get hashes of urls in file."""
urls = _read_text_file(path)
def url_hash(u):
h = hashlib.sha1()
try:
u = u.encode('utf-8')
except UnicodeDecodeError:
logging.error('Cannot hash url: %s', u)
h.update(u)
return h.hexdigest()
return {url_hash(u): True for u in urls}
|
[
"def",
"_get_url_hashes",
"(",
"path",
")",
":",
"urls",
"=",
"_read_text_file",
"(",
"path",
")",
"def",
"url_hash",
"(",
"u",
")",
":",
"h",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"try",
":",
"u",
"=",
"u",
".",
"encode",
"(",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
":",
"logging",
".",
"error",
"(",
"'Cannot hash url: %s'",
",",
"u",
")",
"h",
".",
"update",
"(",
"u",
")",
"return",
"h",
".",
"hexdigest",
"(",
")",
"return",
"{",
"url_hash",
"(",
"u",
")",
":",
"True",
"for",
"u",
"in",
"urls",
"}"
] |
Get hashes of urls in file.
|
[
"Get",
"hashes",
"of",
"urls",
"in",
"file",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L97-L108
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/cnn_dailymail.py
|
_find_files
|
def _find_files(dl_paths, publisher, url_dict):
"""Find files corresponding to urls."""
if publisher == 'cnn':
top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
elif publisher == 'dm':
top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
else:
logging.fatal('Unsupported publisher: %s', publisher)
files = tf.io.gfile.listdir(top_dir)
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0:basename.find('.story')] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files
|
python
|
def _find_files(dl_paths, publisher, url_dict):
"""Find files corresponding to urls."""
if publisher == 'cnn':
top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
elif publisher == 'dm':
top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
else:
logging.fatal('Unsupported publisher: %s', publisher)
files = tf.io.gfile.listdir(top_dir)
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0:basename.find('.story')] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files
|
[
"def",
"_find_files",
"(",
"dl_paths",
",",
"publisher",
",",
"url_dict",
")",
":",
"if",
"publisher",
"==",
"'cnn'",
":",
"top_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_paths",
"[",
"'cnn_stories'",
"]",
",",
"'cnn'",
",",
"'stories'",
")",
"elif",
"publisher",
"==",
"'dm'",
":",
"top_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_paths",
"[",
"'dm_stories'",
"]",
",",
"'dailymail'",
",",
"'stories'",
")",
"else",
":",
"logging",
".",
"fatal",
"(",
"'Unsupported publisher: %s'",
",",
"publisher",
")",
"files",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"top_dir",
")",
"ret_files",
"=",
"[",
"]",
"for",
"p",
"in",
"files",
":",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"p",
")",
"if",
"basename",
"[",
"0",
":",
"basename",
".",
"find",
"(",
"'.story'",
")",
"]",
"in",
"url_dict",
":",
"ret_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"top_dir",
",",
"p",
")",
")",
"return",
"ret_files"
] |
Find files corresponding to urls.
|
[
"Find",
"files",
"corresponding",
"to",
"urls",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L111-L126
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/cnn_dailymail.py
|
_subset_filenames
|
def _subset_filenames(dl_paths, split):
"""Get filenames for a particular split."""
assert isinstance(dl_paths, dict), dl_paths
# Get filenames for a split.
if split == tfds.Split.TRAIN:
urls = _get_url_hashes(dl_paths['train_urls'])
elif split == tfds.Split.VALIDATION:
urls = _get_url_hashes(dl_paths['val_urls'])
elif split == tfds.Split.TEST:
urls = _get_url_hashes(dl_paths['test_urls'])
else:
logging.fatal('Unsupported split: %s', split)
cnn = _find_files(dl_paths, 'cnn', urls)
dm = _find_files(dl_paths, 'dm', urls)
return cnn + dm
|
python
|
def _subset_filenames(dl_paths, split):
"""Get filenames for a particular split."""
assert isinstance(dl_paths, dict), dl_paths
# Get filenames for a split.
if split == tfds.Split.TRAIN:
urls = _get_url_hashes(dl_paths['train_urls'])
elif split == tfds.Split.VALIDATION:
urls = _get_url_hashes(dl_paths['val_urls'])
elif split == tfds.Split.TEST:
urls = _get_url_hashes(dl_paths['test_urls'])
else:
logging.fatal('Unsupported split: %s', split)
cnn = _find_files(dl_paths, 'cnn', urls)
dm = _find_files(dl_paths, 'dm', urls)
return cnn + dm
|
[
"def",
"_subset_filenames",
"(",
"dl_paths",
",",
"split",
")",
":",
"assert",
"isinstance",
"(",
"dl_paths",
",",
"dict",
")",
",",
"dl_paths",
"# Get filenames for a split.",
"if",
"split",
"==",
"tfds",
".",
"Split",
".",
"TRAIN",
":",
"urls",
"=",
"_get_url_hashes",
"(",
"dl_paths",
"[",
"'train_urls'",
"]",
")",
"elif",
"split",
"==",
"tfds",
".",
"Split",
".",
"VALIDATION",
":",
"urls",
"=",
"_get_url_hashes",
"(",
"dl_paths",
"[",
"'val_urls'",
"]",
")",
"elif",
"split",
"==",
"tfds",
".",
"Split",
".",
"TEST",
":",
"urls",
"=",
"_get_url_hashes",
"(",
"dl_paths",
"[",
"'test_urls'",
"]",
")",
"else",
":",
"logging",
".",
"fatal",
"(",
"'Unsupported split: %s'",
",",
"split",
")",
"cnn",
"=",
"_find_files",
"(",
"dl_paths",
",",
"'cnn'",
",",
"urls",
")",
"dm",
"=",
"_find_files",
"(",
"dl_paths",
",",
"'dm'",
",",
"urls",
")",
"return",
"cnn",
"+",
"dm"
] |
Get filenames for a particular split.
|
[
"Get",
"filenames",
"for",
"a",
"particular",
"split",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L129-L143
|
train
|
tensorflow/datasets
|
tensorflow_datasets/text/cnn_dailymail.py
|
_get_art_abs
|
def _get_art_abs(story_file):
"""Get abstract (highlights) and article from a story file path."""
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
# make_datafiles.py
lines = _read_text_file(story_file)
# Lowercase everything
lines = [line.lower() for line in lines]
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
def fix_missing_period(line):
"""Adds a period to a line that is missing a period."""
if '@highlight' in line: return line
if not line: return line
if line[-1] in END_TOKENS: return line
return line + ' .'
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith('@highlight'):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = ' '.join(article_lines)
# Make abstract into a single string, putting <s> and </s> tags around
# the sentences.
abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent,
SENTENCE_END) for sent in highlights])
return article, abstract
|
python
|
def _get_art_abs(story_file):
"""Get abstract (highlights) and article from a story file path."""
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
# make_datafiles.py
lines = _read_text_file(story_file)
# Lowercase everything
lines = [line.lower() for line in lines]
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
def fix_missing_period(line):
"""Adds a period to a line that is missing a period."""
if '@highlight' in line: return line
if not line: return line
if line[-1] in END_TOKENS: return line
return line + ' .'
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith('@highlight'):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = ' '.join(article_lines)
# Make abstract into a single string, putting <s> and </s> tags around
# the sentences.
abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent,
SENTENCE_END) for sent in highlights])
return article, abstract
|
[
"def",
"_get_art_abs",
"(",
"story_file",
")",
":",
"# Based on https://github.com/abisee/cnn-dailymail/blob/master/",
"# make_datafiles.py",
"lines",
"=",
"_read_text_file",
"(",
"story_file",
")",
"# Lowercase everything",
"lines",
"=",
"[",
"line",
".",
"lower",
"(",
")",
"for",
"line",
"in",
"lines",
"]",
"# Put periods on the ends of lines that are missing them",
"# (this is a problem in the dataset because many image captions don't end in",
"# periods; consequently they end up in the body of the article as run-on",
"# sentences)",
"def",
"fix_missing_period",
"(",
"line",
")",
":",
"\"\"\"Adds a period to a line that is missing a period.\"\"\"",
"if",
"'@highlight'",
"in",
"line",
":",
"return",
"line",
"if",
"not",
"line",
":",
"return",
"line",
"if",
"line",
"[",
"-",
"1",
"]",
"in",
"END_TOKENS",
":",
"return",
"line",
"return",
"line",
"+",
"' .'",
"lines",
"=",
"[",
"fix_missing_period",
"(",
"line",
")",
"for",
"line",
"in",
"lines",
"]",
"# Separate out article and abstract sentences",
"article_lines",
"=",
"[",
"]",
"highlights",
"=",
"[",
"]",
"next_is_highlight",
"=",
"False",
"for",
"line",
"in",
"lines",
":",
"if",
"not",
"line",
":",
"continue",
"# empty line",
"elif",
"line",
".",
"startswith",
"(",
"'@highlight'",
")",
":",
"next_is_highlight",
"=",
"True",
"elif",
"next_is_highlight",
":",
"highlights",
".",
"append",
"(",
"line",
")",
"else",
":",
"article_lines",
".",
"append",
"(",
"line",
")",
"# Make article into a single string",
"article",
"=",
"' '",
".",
"join",
"(",
"article_lines",
")",
"# Make abstract into a single string, putting <s> and </s> tags around",
"# the sentences.",
"abstract",
"=",
"' '",
".",
"join",
"(",
"[",
"'%s %s %s'",
"%",
"(",
"SENTENCE_START",
",",
"sent",
",",
"SENTENCE_END",
")",
"for",
"sent",
"in",
"highlights",
"]",
")",
"return",
"article",
",",
"abstract"
] |
Get abstract (highlights) and article from a story file path.
|
[
"Get",
"abstract",
"(",
"highlights",
")",
"and",
"article",
"from",
"a",
"story",
"file",
"path",
"."
] |
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
|
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L163-L207
|
train
|
s0md3v/Photon
|
plugins/exporter.py
|
exporter
|
def exporter(directory, method, datasets):
"""Export the results."""
if method.lower() == 'json':
# Convert json_dict to a JSON styled string
json_string = json.dumps(datasets, indent=4)
savefile = open('{}/exported.json'.format(directory), 'w+')
savefile.write(json_string)
savefile.close()
if method.lower() == 'csv':
with open('{}/exported.csv'.format(directory), 'w+') as csvfile:
csv_writer = csv.writer(
csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, values in datasets.items():
if values is None:
csv_writer.writerow([key])
else:
csv_writer.writerow([key] + values)
csvfile.close()
|
python
|
def exporter(directory, method, datasets):
"""Export the results."""
if method.lower() == 'json':
# Convert json_dict to a JSON styled string
json_string = json.dumps(datasets, indent=4)
savefile = open('{}/exported.json'.format(directory), 'w+')
savefile.write(json_string)
savefile.close()
if method.lower() == 'csv':
with open('{}/exported.csv'.format(directory), 'w+') as csvfile:
csv_writer = csv.writer(
csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, values in datasets.items():
if values is None:
csv_writer.writerow([key])
else:
csv_writer.writerow([key] + values)
csvfile.close()
|
[
"def",
"exporter",
"(",
"directory",
",",
"method",
",",
"datasets",
")",
":",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'json'",
":",
"# Convert json_dict to a JSON styled string",
"json_string",
"=",
"json",
".",
"dumps",
"(",
"datasets",
",",
"indent",
"=",
"4",
")",
"savefile",
"=",
"open",
"(",
"'{}/exported.json'",
".",
"format",
"(",
"directory",
")",
",",
"'w+'",
")",
"savefile",
".",
"write",
"(",
"json_string",
")",
"savefile",
".",
"close",
"(",
")",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'csv'",
":",
"with",
"open",
"(",
"'{}/exported.csv'",
".",
"format",
"(",
"directory",
")",
",",
"'w+'",
")",
"as",
"csvfile",
":",
"csv_writer",
"=",
"csv",
".",
"writer",
"(",
"csvfile",
",",
"delimiter",
"=",
"','",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
")",
"for",
"key",
",",
"values",
"in",
"datasets",
".",
"items",
"(",
")",
":",
"if",
"values",
"is",
"None",
":",
"csv_writer",
".",
"writerow",
"(",
"[",
"key",
"]",
")",
"else",
":",
"csv_writer",
".",
"writerow",
"(",
"[",
"key",
"]",
"+",
"values",
")",
"csvfile",
".",
"close",
"(",
")"
] |
Export the results.
|
[
"Export",
"the",
"results",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/exporter.py#L6-L24
|
train
|
s0md3v/Photon
|
plugins/wayback.py
|
time_machine
|
def time_machine(host, mode):
"""Query archive.org."""
now = datetime.datetime.now()
to = str(now.year) + str(now.day) + str(now.month)
if now.month > 6:
fro = str(now.year) + str(now.day) + str(now.month - 6)
else:
fro = str(now.year - 1) + str(now.day) + str(now.month + 6)
url = "http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s" % (host, mode, fro, to)
response = get(url).text
parsed = json.loads(response)[1:]
urls = []
for item in parsed:
urls.append(item[0])
return urls
|
python
|
def time_machine(host, mode):
"""Query archive.org."""
now = datetime.datetime.now()
to = str(now.year) + str(now.day) + str(now.month)
if now.month > 6:
fro = str(now.year) + str(now.day) + str(now.month - 6)
else:
fro = str(now.year - 1) + str(now.day) + str(now.month + 6)
url = "http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s" % (host, mode, fro, to)
response = get(url).text
parsed = json.loads(response)[1:]
urls = []
for item in parsed:
urls.append(item[0])
return urls
|
[
"def",
"time_machine",
"(",
"host",
",",
"mode",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"to",
"=",
"str",
"(",
"now",
".",
"year",
")",
"+",
"str",
"(",
"now",
".",
"day",
")",
"+",
"str",
"(",
"now",
".",
"month",
")",
"if",
"now",
".",
"month",
">",
"6",
":",
"fro",
"=",
"str",
"(",
"now",
".",
"year",
")",
"+",
"str",
"(",
"now",
".",
"day",
")",
"+",
"str",
"(",
"now",
".",
"month",
"-",
"6",
")",
"else",
":",
"fro",
"=",
"str",
"(",
"now",
".",
"year",
"-",
"1",
")",
"+",
"str",
"(",
"now",
".",
"day",
")",
"+",
"str",
"(",
"now",
".",
"month",
"+",
"6",
")",
"url",
"=",
"\"http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s\"",
"%",
"(",
"host",
",",
"mode",
",",
"fro",
",",
"to",
")",
"response",
"=",
"get",
"(",
"url",
")",
".",
"text",
"parsed",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"[",
"1",
":",
"]",
"urls",
"=",
"[",
"]",
"for",
"item",
"in",
"parsed",
":",
"urls",
".",
"append",
"(",
"item",
"[",
"0",
"]",
")",
"return",
"urls"
] |
Query archive.org.
|
[
"Query",
"archive",
".",
"org",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/wayback.py#L8-L22
|
train
|
s0md3v/Photon
|
core/zap.py
|
zap
|
def zap(input_url, archive, domain, host, internal, robots, proxies):
"""Extract links from robots.txt and sitemap.xml."""
if archive:
print('%s Fetching URLs from archive.org' % run)
if False:
archived_urls = time_machine(domain, 'domain')
else:
archived_urls = time_machine(host, 'host')
print('%s Retrieved %i URLs from archive.org' % (
good, len(archived_urls) - 1))
for url in archived_urls:
verb('Internal page', url)
internal.add(url)
# Makes request to robots.txt
response = requests.get(input_url + '/robots.txt',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
# If you know it, you know it
matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response)
if matches:
# Iterating over the matches, match is a tuple here
for match in matches:
# One item in match will always be empty so will combine both
# items
match = ''.join(match)
# If the URL doesn't use a wildcard
if '*' not in match:
url = input_url + match
# Add the URL to internal list for crawling
internal.add(url)
# Add the URL to robots list
robots.add(url)
print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
# Makes request to sitemap.xml
response = requests.get(input_url + '/sitemap.xml',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
matches = xml_parser(response)
if matches: # if there are any matches
print('%s URLs retrieved from sitemap.xml: %s' % (
good, len(matches)))
for match in matches:
verb('Internal page', match)
# Cleaning up the URL and adding it to the internal list for
# crawling
internal.add(match)
|
python
|
def zap(input_url, archive, domain, host, internal, robots, proxies):
"""Extract links from robots.txt and sitemap.xml."""
if archive:
print('%s Fetching URLs from archive.org' % run)
if False:
archived_urls = time_machine(domain, 'domain')
else:
archived_urls = time_machine(host, 'host')
print('%s Retrieved %i URLs from archive.org' % (
good, len(archived_urls) - 1))
for url in archived_urls:
verb('Internal page', url)
internal.add(url)
# Makes request to robots.txt
response = requests.get(input_url + '/robots.txt',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
# If you know it, you know it
matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response)
if matches:
# Iterating over the matches, match is a tuple here
for match in matches:
# One item in match will always be empty so will combine both
# items
match = ''.join(match)
# If the URL doesn't use a wildcard
if '*' not in match:
url = input_url + match
# Add the URL to internal list for crawling
internal.add(url)
# Add the URL to robots list
robots.add(url)
print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
# Makes request to sitemap.xml
response = requests.get(input_url + '/sitemap.xml',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
matches = xml_parser(response)
if matches: # if there are any matches
print('%s URLs retrieved from sitemap.xml: %s' % (
good, len(matches)))
for match in matches:
verb('Internal page', match)
# Cleaning up the URL and adding it to the internal list for
# crawling
internal.add(match)
|
[
"def",
"zap",
"(",
"input_url",
",",
"archive",
",",
"domain",
",",
"host",
",",
"internal",
",",
"robots",
",",
"proxies",
")",
":",
"if",
"archive",
":",
"print",
"(",
"'%s Fetching URLs from archive.org'",
"%",
"run",
")",
"if",
"False",
":",
"archived_urls",
"=",
"time_machine",
"(",
"domain",
",",
"'domain'",
")",
"else",
":",
"archived_urls",
"=",
"time_machine",
"(",
"host",
",",
"'host'",
")",
"print",
"(",
"'%s Retrieved %i URLs from archive.org'",
"%",
"(",
"good",
",",
"len",
"(",
"archived_urls",
")",
"-",
"1",
")",
")",
"for",
"url",
"in",
"archived_urls",
":",
"verb",
"(",
"'Internal page'",
",",
"url",
")",
"internal",
".",
"add",
"(",
"url",
")",
"# Makes request to robots.txt",
"response",
"=",
"requests",
".",
"get",
"(",
"input_url",
"+",
"'/robots.txt'",
",",
"proxies",
"=",
"random",
".",
"choice",
"(",
"proxies",
")",
")",
".",
"text",
"# Making sure robots.txt isn't some fancy 404 page",
"if",
"'<body'",
"not",
"in",
"response",
":",
"# If you know it, you know it",
"matches",
"=",
"re",
".",
"findall",
"(",
"r'Allow: (.*)|Disallow: (.*)'",
",",
"response",
")",
"if",
"matches",
":",
"# Iterating over the matches, match is a tuple here",
"for",
"match",
"in",
"matches",
":",
"# One item in match will always be empty so will combine both",
"# items",
"match",
"=",
"''",
".",
"join",
"(",
"match",
")",
"# If the URL doesn't use a wildcard",
"if",
"'*'",
"not",
"in",
"match",
":",
"url",
"=",
"input_url",
"+",
"match",
"# Add the URL to internal list for crawling",
"internal",
".",
"add",
"(",
"url",
")",
"# Add the URL to robots list",
"robots",
".",
"add",
"(",
"url",
")",
"print",
"(",
"'%s URLs retrieved from robots.txt: %s'",
"%",
"(",
"good",
",",
"len",
"(",
"robots",
")",
")",
")",
"# Makes request to sitemap.xml",
"response",
"=",
"requests",
".",
"get",
"(",
"input_url",
"+",
"'/sitemap.xml'",
",",
"proxies",
"=",
"random",
".",
"choice",
"(",
"proxies",
")",
")",
".",
"text",
"# Making sure robots.txt isn't some fancy 404 page",
"if",
"'<body'",
"not",
"in",
"response",
":",
"matches",
"=",
"xml_parser",
"(",
"response",
")",
"if",
"matches",
":",
"# if there are any matches",
"print",
"(",
"'%s URLs retrieved from sitemap.xml: %s'",
"%",
"(",
"good",
",",
"len",
"(",
"matches",
")",
")",
")",
"for",
"match",
"in",
"matches",
":",
"verb",
"(",
"'Internal page'",
",",
"match",
")",
"# Cleaning up the URL and adding it to the internal list for",
"# crawling",
"internal",
".",
"add",
"(",
"match",
")"
] |
Extract links from robots.txt and sitemap.xml.
|
[
"Extract",
"links",
"from",
"robots",
".",
"txt",
"and",
"sitemap",
".",
"xml",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/zap.py#L10-L57
|
train
|
s0md3v/Photon
|
core/requester.py
|
requester
|
def requester(
url,
main_url=None,
delay=0,
cook=None,
headers=None,
timeout=10,
host=None,
proxies=[None],
user_agents=[None],
failed=None,
processed=None
):
"""Handle the requests and return the response body."""
cook = cook or set()
headers = headers or set()
user_agents = user_agents or ['Photon']
failed = failed or set()
processed = processed or set()
# Mark the URL as crawled
processed.add(url)
# Pause/sleep the program for specified time
time.sleep(delay)
def make_request(url):
"""Default request"""
final_headers = headers or {
'Host': host,
# Selecting a random user-agent
'User-Agent': random.choice(user_agents),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip',
'DNT': '1',
'Connection': 'close',
}
try:
response = SESSION.get(
url,
cookies=cook,
headers=final_headers,
verify=False,
timeout=timeout,
stream=True,
proxies=random.choice(proxies)
)
except TooManyRedirects:
return 'dummy'
if 'text/html' in response.headers['content-type'] or \
'text/plain' in response.headers['content-type']:
if response.status_code != '404':
return response.text
else:
response.close()
failed.add(url)
return 'dummy'
else:
response.close()
return 'dummy'
return make_request(url)
|
python
|
def requester(
url,
main_url=None,
delay=0,
cook=None,
headers=None,
timeout=10,
host=None,
proxies=[None],
user_agents=[None],
failed=None,
processed=None
):
"""Handle the requests and return the response body."""
cook = cook or set()
headers = headers or set()
user_agents = user_agents or ['Photon']
failed = failed or set()
processed = processed or set()
# Mark the URL as crawled
processed.add(url)
# Pause/sleep the program for specified time
time.sleep(delay)
def make_request(url):
"""Default request"""
final_headers = headers or {
'Host': host,
# Selecting a random user-agent
'User-Agent': random.choice(user_agents),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip',
'DNT': '1',
'Connection': 'close',
}
try:
response = SESSION.get(
url,
cookies=cook,
headers=final_headers,
verify=False,
timeout=timeout,
stream=True,
proxies=random.choice(proxies)
)
except TooManyRedirects:
return 'dummy'
if 'text/html' in response.headers['content-type'] or \
'text/plain' in response.headers['content-type']:
if response.status_code != '404':
return response.text
else:
response.close()
failed.add(url)
return 'dummy'
else:
response.close()
return 'dummy'
return make_request(url)
|
[
"def",
"requester",
"(",
"url",
",",
"main_url",
"=",
"None",
",",
"delay",
"=",
"0",
",",
"cook",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"timeout",
"=",
"10",
",",
"host",
"=",
"None",
",",
"proxies",
"=",
"[",
"None",
"]",
",",
"user_agents",
"=",
"[",
"None",
"]",
",",
"failed",
"=",
"None",
",",
"processed",
"=",
"None",
")",
":",
"cook",
"=",
"cook",
"or",
"set",
"(",
")",
"headers",
"=",
"headers",
"or",
"set",
"(",
")",
"user_agents",
"=",
"user_agents",
"or",
"[",
"'Photon'",
"]",
"failed",
"=",
"failed",
"or",
"set",
"(",
")",
"processed",
"=",
"processed",
"or",
"set",
"(",
")",
"# Mark the URL as crawled",
"processed",
".",
"add",
"(",
"url",
")",
"# Pause/sleep the program for specified time",
"time",
".",
"sleep",
"(",
"delay",
")",
"def",
"make_request",
"(",
"url",
")",
":",
"\"\"\"Default request\"\"\"",
"final_headers",
"=",
"headers",
"or",
"{",
"'Host'",
":",
"host",
",",
"# Selecting a random user-agent",
"'User-Agent'",
":",
"random",
".",
"choice",
"(",
"user_agents",
")",
",",
"'Accept'",
":",
"'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'",
",",
"'Accept-Language'",
":",
"'en-US,en;q=0.5'",
",",
"'Accept-Encoding'",
":",
"'gzip'",
",",
"'DNT'",
":",
"'1'",
",",
"'Connection'",
":",
"'close'",
",",
"}",
"try",
":",
"response",
"=",
"SESSION",
".",
"get",
"(",
"url",
",",
"cookies",
"=",
"cook",
",",
"headers",
"=",
"final_headers",
",",
"verify",
"=",
"False",
",",
"timeout",
"=",
"timeout",
",",
"stream",
"=",
"True",
",",
"proxies",
"=",
"random",
".",
"choice",
"(",
"proxies",
")",
")",
"except",
"TooManyRedirects",
":",
"return",
"'dummy'",
"if",
"'text/html'",
"in",
"response",
".",
"headers",
"[",
"'content-type'",
"]",
"or",
"'text/plain'",
"in",
"response",
".",
"headers",
"[",
"'content-type'",
"]",
":",
"if",
"response",
".",
"status_code",
"!=",
"'404'",
":",
"return",
"response",
".",
"text",
"else",
":",
"response",
".",
"close",
"(",
")",
"failed",
".",
"add",
"(",
"url",
")",
"return",
"'dummy'",
"else",
":",
"response",
".",
"close",
"(",
")",
"return",
"'dummy'",
"return",
"make_request",
"(",
"url",
")"
] |
Handle the requests and return the response body.
|
[
"Handle",
"the",
"requests",
"and",
"return",
"the",
"response",
"body",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/requester.py#L11-L72
|
train
|
s0md3v/Photon
|
photon.py
|
intel_extractor
|
def intel_extractor(url, response):
"""Extract intel from the response body."""
for rintel in rintels:
res = re.sub(r'<(script).*?</\1>(?s)', '', response)
res = re.sub(r'<[^<]+?>', '', res)
matches = rintel[0].findall(res)
if matches:
for match in matches:
verb('Intel', match)
bad_intel.add((match, rintel[1], url))
|
python
|
def intel_extractor(url, response):
"""Extract intel from the response body."""
for rintel in rintels:
res = re.sub(r'<(script).*?</\1>(?s)', '', response)
res = re.sub(r'<[^<]+?>', '', res)
matches = rintel[0].findall(res)
if matches:
for match in matches:
verb('Intel', match)
bad_intel.add((match, rintel[1], url))
|
[
"def",
"intel_extractor",
"(",
"url",
",",
"response",
")",
":",
"for",
"rintel",
"in",
"rintels",
":",
"res",
"=",
"re",
".",
"sub",
"(",
"r'<(script).*?</\\1>(?s)'",
",",
"''",
",",
"response",
")",
"res",
"=",
"re",
".",
"sub",
"(",
"r'<[^<]+?>'",
",",
"''",
",",
"res",
")",
"matches",
"=",
"rintel",
"[",
"0",
"]",
".",
"findall",
"(",
"res",
")",
"if",
"matches",
":",
"for",
"match",
"in",
"matches",
":",
"verb",
"(",
"'Intel'",
",",
"match",
")",
"bad_intel",
".",
"add",
"(",
"(",
"match",
",",
"rintel",
"[",
"1",
"]",
",",
"url",
")",
")"
] |
Extract intel from the response body.
|
[
"Extract",
"intel",
"from",
"the",
"response",
"body",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L208-L217
|
train
|
s0md3v/Photon
|
photon.py
|
js_extractor
|
def js_extractor(response):
"""Extract js files from the response body"""
# Extract .js files
matches = rscript.findall(response)
for match in matches:
match = match[2].replace('\'', '').replace('"', '')
verb('JS file', match)
bad_scripts.add(match)
|
python
|
def js_extractor(response):
"""Extract js files from the response body"""
# Extract .js files
matches = rscript.findall(response)
for match in matches:
match = match[2].replace('\'', '').replace('"', '')
verb('JS file', match)
bad_scripts.add(match)
|
[
"def",
"js_extractor",
"(",
"response",
")",
":",
"# Extract .js files\r",
"matches",
"=",
"rscript",
".",
"findall",
"(",
"response",
")",
"for",
"match",
"in",
"matches",
":",
"match",
"=",
"match",
"[",
"2",
"]",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"verb",
"(",
"'JS file'",
",",
"match",
")",
"bad_scripts",
".",
"add",
"(",
"match",
")"
] |
Extract js files from the response body
|
[
"Extract",
"js",
"files",
"from",
"the",
"response",
"body"
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L220-L227
|
train
|
s0md3v/Photon
|
photon.py
|
extractor
|
def extractor(url):
"""Extract details from the response body."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
if clone:
mirror(url, response)
matches = rhref.findall(response)
for link in matches:
# Remove everything after a "#" to deal with in-page anchors
link = link[1].replace('\'', '').replace('"', '').split('#')[0]
# Checks if the URLs should be crawled
if is_link(link, processed, files):
if link[:4] == 'http':
if link.startswith(main_url):
verb('Internal page', link)
internal.add(link)
else:
verb('External page', link)
external.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
verb('Internal page', link)
internal.add(schema + '://' + link)
else:
verb('External page', link)
external.add(link)
elif link[:1] == '/':
verb('Internal page', link)
internal.add(remove_file(url) + link)
else:
verb('Internal page', link)
usable_url = remove_file(url)
if usable_url.endswith('/'):
internal.add(usable_url + link)
elif link.startswith('/'):
internal.add(usable_url + link)
else:
internal.add(usable_url + '/' + link)
if not only_urls:
intel_extractor(url, response)
js_extractor(response)
if args.regex and not supress_regex:
regxy(args.regex, response, supress_regex, custom)
if api:
matches = rentropy.findall(response)
for match in matches:
if entropy(match) >= 4:
verb('Key', match)
keys.add(url + ': ' + match)
|
python
|
def extractor(url):
"""Extract details from the response body."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
if clone:
mirror(url, response)
matches = rhref.findall(response)
for link in matches:
# Remove everything after a "#" to deal with in-page anchors
link = link[1].replace('\'', '').replace('"', '').split('#')[0]
# Checks if the URLs should be crawled
if is_link(link, processed, files):
if link[:4] == 'http':
if link.startswith(main_url):
verb('Internal page', link)
internal.add(link)
else:
verb('External page', link)
external.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
verb('Internal page', link)
internal.add(schema + '://' + link)
else:
verb('External page', link)
external.add(link)
elif link[:1] == '/':
verb('Internal page', link)
internal.add(remove_file(url) + link)
else:
verb('Internal page', link)
usable_url = remove_file(url)
if usable_url.endswith('/'):
internal.add(usable_url + link)
elif link.startswith('/'):
internal.add(usable_url + link)
else:
internal.add(usable_url + '/' + link)
if not only_urls:
intel_extractor(url, response)
js_extractor(response)
if args.regex and not supress_regex:
regxy(args.regex, response, supress_regex, custom)
if api:
matches = rentropy.findall(response)
for match in matches:
if entropy(match) >= 4:
verb('Key', match)
keys.add(url + ': ' + match)
|
[
"def",
"extractor",
"(",
"url",
")",
":",
"response",
"=",
"requester",
"(",
"url",
",",
"main_url",
",",
"delay",
",",
"cook",
",",
"headers",
",",
"timeout",
",",
"host",
",",
"proxies",
",",
"user_agents",
",",
"failed",
",",
"processed",
")",
"if",
"clone",
":",
"mirror",
"(",
"url",
",",
"response",
")",
"matches",
"=",
"rhref",
".",
"findall",
"(",
"response",
")",
"for",
"link",
"in",
"matches",
":",
"# Remove everything after a \"#\" to deal with in-page anchors\r",
"link",
"=",
"link",
"[",
"1",
"]",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
"# Checks if the URLs should be crawled\r",
"if",
"is_link",
"(",
"link",
",",
"processed",
",",
"files",
")",
":",
"if",
"link",
"[",
":",
"4",
"]",
"==",
"'http'",
":",
"if",
"link",
".",
"startswith",
"(",
"main_url",
")",
":",
"verb",
"(",
"'Internal page'",
",",
"link",
")",
"internal",
".",
"add",
"(",
"link",
")",
"else",
":",
"verb",
"(",
"'External page'",
",",
"link",
")",
"external",
".",
"add",
"(",
"link",
")",
"elif",
"link",
"[",
":",
"2",
"]",
"==",
"'//'",
":",
"if",
"link",
".",
"split",
"(",
"'/'",
")",
"[",
"2",
"]",
".",
"startswith",
"(",
"host",
")",
":",
"verb",
"(",
"'Internal page'",
",",
"link",
")",
"internal",
".",
"add",
"(",
"schema",
"+",
"'://'",
"+",
"link",
")",
"else",
":",
"verb",
"(",
"'External page'",
",",
"link",
")",
"external",
".",
"add",
"(",
"link",
")",
"elif",
"link",
"[",
":",
"1",
"]",
"==",
"'/'",
":",
"verb",
"(",
"'Internal page'",
",",
"link",
")",
"internal",
".",
"add",
"(",
"remove_file",
"(",
"url",
")",
"+",
"link",
")",
"else",
":",
"verb",
"(",
"'Internal page'",
",",
"link",
")",
"usable_url",
"=",
"remove_file",
"(",
"url",
")",
"if",
"usable_url",
".",
"endswith",
"(",
"'/'",
")",
":",
"internal",
".",
"add",
"(",
"usable_url",
"+",
"link",
")",
"elif",
"link",
".",
"startswith",
"(",
"'/'",
")",
":",
"internal",
".",
"add",
"(",
"usable_url",
"+",
"link",
")",
"else",
":",
"internal",
".",
"add",
"(",
"usable_url",
"+",
"'/'",
"+",
"link",
")",
"if",
"not",
"only_urls",
":",
"intel_extractor",
"(",
"url",
",",
"response",
")",
"js_extractor",
"(",
"response",
")",
"if",
"args",
".",
"regex",
"and",
"not",
"supress_regex",
":",
"regxy",
"(",
"args",
".",
"regex",
",",
"response",
",",
"supress_regex",
",",
"custom",
")",
"if",
"api",
":",
"matches",
"=",
"rentropy",
".",
"findall",
"(",
"response",
")",
"for",
"match",
"in",
"matches",
":",
"if",
"entropy",
"(",
"match",
")",
">=",
"4",
":",
"verb",
"(",
"'Key'",
",",
"match",
")",
"keys",
".",
"add",
"(",
"url",
"+",
"': '",
"+",
"match",
")"
] |
Extract details from the response body.
|
[
"Extract",
"details",
"from",
"the",
"response",
"body",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L239-L287
|
train
|
s0md3v/Photon
|
photon.py
|
jscanner
|
def jscanner(url):
"""Extract endpoints from JavaScript code."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
# Extract URLs/endpoints
matches = rendpoint.findall(response)
# Iterate over the matches, match is a tuple
for match in matches:
# Combining the items because one of them is always empty
match = match[0] + match[1]
# Making sure it's not some JavaScript code
if not re.search(r'[}{><"\']', match) and not match == '/':
verb('JS endpoint', match)
endpoints.add(match)
|
python
|
def jscanner(url):
"""Extract endpoints from JavaScript code."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
# Extract URLs/endpoints
matches = rendpoint.findall(response)
# Iterate over the matches, match is a tuple
for match in matches:
# Combining the items because one of them is always empty
match = match[0] + match[1]
# Making sure it's not some JavaScript code
if not re.search(r'[}{><"\']', match) and not match == '/':
verb('JS endpoint', match)
endpoints.add(match)
|
[
"def",
"jscanner",
"(",
"url",
")",
":",
"response",
"=",
"requester",
"(",
"url",
",",
"main_url",
",",
"delay",
",",
"cook",
",",
"headers",
",",
"timeout",
",",
"host",
",",
"proxies",
",",
"user_agents",
",",
"failed",
",",
"processed",
")",
"# Extract URLs/endpoints\r",
"matches",
"=",
"rendpoint",
".",
"findall",
"(",
"response",
")",
"# Iterate over the matches, match is a tuple\r",
"for",
"match",
"in",
"matches",
":",
"# Combining the items because one of them is always empty\r",
"match",
"=",
"match",
"[",
"0",
"]",
"+",
"match",
"[",
"1",
"]",
"# Making sure it's not some JavaScript code\r",
"if",
"not",
"re",
".",
"search",
"(",
"r'[}{><\"\\']'",
",",
"match",
")",
"and",
"not",
"match",
"==",
"'/'",
":",
"verb",
"(",
"'JS endpoint'",
",",
"match",
")",
"endpoints",
".",
"add",
"(",
"match",
")"
] |
Extract endpoints from JavaScript code.
|
[
"Extract",
"endpoints",
"from",
"JavaScript",
"code",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L290-L302
|
train
|
s0md3v/Photon
|
core/updater.py
|
updater
|
def updater():
"""Update the current installation.
git clones the latest version and merges it with the current directory.
"""
print('%s Checking for updates' % run)
# Changes must be separated by ;
changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''
latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com')
# Just a hack to see if a new version is available
if changes not in latest_commit:
changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
# Splitting the changes to form a list
changelog = changelog.group(1).split(';')
print('%s A new version of Photon is available.' % good)
print('%s Changes:' % info)
for change in changelog: # print changes
print('%s>%s %s' % (green, end, change))
current_path = os.getcwd().split('/') # if you know it, you know it
folder = current_path[-1] # current directory name
path = '/'.join(current_path) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
print('%s Updating Photon' % run)
os.system('git clone --quiet https://github.com/s0md3v/Photon %s'
% (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'
% (path, folder, path, path, folder))
print('%s Update successful!' % good)
else:
print('%s Photon is up to date!' % good)
|
python
|
def updater():
"""Update the current installation.
git clones the latest version and merges it with the current directory.
"""
print('%s Checking for updates' % run)
# Changes must be separated by ;
changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''
latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com')
# Just a hack to see if a new version is available
if changes not in latest_commit:
changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
# Splitting the changes to form a list
changelog = changelog.group(1).split(';')
print('%s A new version of Photon is available.' % good)
print('%s Changes:' % info)
for change in changelog: # print changes
print('%s>%s %s' % (green, end, change))
current_path = os.getcwd().split('/') # if you know it, you know it
folder = current_path[-1] # current directory name
path = '/'.join(current_path) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
print('%s Updating Photon' % run)
os.system('git clone --quiet https://github.com/s0md3v/Photon %s'
% (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'
% (path, folder, path, path, folder))
print('%s Update successful!' % good)
else:
print('%s Photon is up to date!' % good)
|
[
"def",
"updater",
"(",
")",
":",
"print",
"(",
"'%s Checking for updates'",
"%",
"run",
")",
"# Changes must be separated by ;",
"changes",
"=",
"'''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''",
"latest_commit",
"=",
"requester",
"(",
"'https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py'",
",",
"host",
"=",
"'raw.githubusercontent.com'",
")",
"# Just a hack to see if a new version is available",
"if",
"changes",
"not",
"in",
"latest_commit",
":",
"changelog",
"=",
"re",
".",
"search",
"(",
"r\"changes = '''(.*?)'''\"",
",",
"latest_commit",
")",
"# Splitting the changes to form a list",
"changelog",
"=",
"changelog",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"';'",
")",
"print",
"(",
"'%s A new version of Photon is available.'",
"%",
"good",
")",
"print",
"(",
"'%s Changes:'",
"%",
"info",
")",
"for",
"change",
"in",
"changelog",
":",
"# print changes",
"print",
"(",
"'%s>%s %s'",
"%",
"(",
"green",
",",
"end",
",",
"change",
")",
")",
"current_path",
"=",
"os",
".",
"getcwd",
"(",
")",
".",
"split",
"(",
"'/'",
")",
"# if you know it, you know it",
"folder",
"=",
"current_path",
"[",
"-",
"1",
"]",
"# current directory name",
"path",
"=",
"'/'",
".",
"join",
"(",
"current_path",
")",
"# current directory path",
"choice",
"=",
"input",
"(",
"'%s Would you like to update? [Y/n] '",
"%",
"que",
")",
".",
"lower",
"(",
")",
"if",
"choice",
"!=",
"'n'",
":",
"print",
"(",
"'%s Updating Photon'",
"%",
"run",
")",
"os",
".",
"system",
"(",
"'git clone --quiet https://github.com/s0md3v/Photon %s'",
"%",
"(",
"folder",
")",
")",
"os",
".",
"system",
"(",
"'cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'",
"%",
"(",
"path",
",",
"folder",
",",
"path",
",",
"path",
",",
"folder",
")",
")",
"print",
"(",
"'%s Update successful!'",
"%",
"good",
")",
"else",
":",
"print",
"(",
"'%s Photon is up to date!'",
"%",
"good",
")"
] |
Update the current installation.
git clones the latest version and merges it with the current directory.
|
[
"Update",
"the",
"current",
"installation",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/updater.py#L8-L40
|
train
|
s0md3v/Photon
|
plugins/find_subdomains.py
|
find_subdomains
|
def find_subdomains(domain):
"""Find subdomains according to the TLD."""
result = set()
response = get('https://findsubdomains.com/subdomains-of/' + domain).text
matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
for match in matches:
result.add(match.replace(' ', '').replace('\n', ''))
return list(result)
|
python
|
def find_subdomains(domain):
"""Find subdomains according to the TLD."""
result = set()
response = get('https://findsubdomains.com/subdomains-of/' + domain).text
matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
for match in matches:
result.add(match.replace(' ', '').replace('\n', ''))
return list(result)
|
[
"def",
"find_subdomains",
"(",
"domain",
")",
":",
"result",
"=",
"set",
"(",
")",
"response",
"=",
"get",
"(",
"'https://findsubdomains.com/subdomains-of/'",
"+",
"domain",
")",
".",
"text",
"matches",
"=",
"findall",
"(",
"r'(?s)<div class=\"domains js-domain-name\">(.*?)</div>'",
",",
"response",
")",
"for",
"match",
"in",
"matches",
":",
"result",
".",
"add",
"(",
"match",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
")",
"return",
"list",
"(",
"result",
")"
] |
Find subdomains according to the TLD.
|
[
"Find",
"subdomains",
"according",
"to",
"the",
"TLD",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/find_subdomains.py#L7-L14
|
train
|
s0md3v/Photon
|
core/flash.py
|
flash
|
def flash(function, links, thread_count):
"""Process the URLs and uses a threadpool to execute a function."""
# Convert links (set) to list
links = list(links)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=thread_count)
futures = (threadpool.submit(function, link) for link in links)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(links) or (i + 1) % thread_count == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(links)),
end='\r')
print('')
|
python
|
def flash(function, links, thread_count):
"""Process the URLs and uses a threadpool to execute a function."""
# Convert links (set) to list
links = list(links)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=thread_count)
futures = (threadpool.submit(function, link) for link in links)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(links) or (i + 1) % thread_count == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(links)),
end='\r')
print('')
|
[
"def",
"flash",
"(",
"function",
",",
"links",
",",
"thread_count",
")",
":",
"# Convert links (set) to list",
"links",
"=",
"list",
"(",
"links",
")",
"threadpool",
"=",
"concurrent",
".",
"futures",
".",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"thread_count",
")",
"futures",
"=",
"(",
"threadpool",
".",
"submit",
"(",
"function",
",",
"link",
")",
"for",
"link",
"in",
"links",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"concurrent",
".",
"futures",
".",
"as_completed",
"(",
"futures",
")",
")",
":",
"if",
"i",
"+",
"1",
"==",
"len",
"(",
"links",
")",
"or",
"(",
"i",
"+",
"1",
")",
"%",
"thread_count",
"==",
"0",
":",
"print",
"(",
"'%s Progress: %i/%i'",
"%",
"(",
"info",
",",
"i",
"+",
"1",
",",
"len",
"(",
"links",
")",
")",
",",
"end",
"=",
"'\\r'",
")",
"print",
"(",
"''",
")"
] |
Process the URLs and uses a threadpool to execute a function.
|
[
"Process",
"the",
"URLs",
"and",
"uses",
"a",
"threadpool",
"to",
"execute",
"a",
"function",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/flash.py#L6-L17
|
train
|
s0md3v/Photon
|
core/utils.py
|
regxy
|
def regxy(pattern, response, supress_regex, custom):
"""Extract a string based on regex pattern supplied by user."""
try:
matches = re.findall(r'%s' % pattern, response)
for match in matches:
verb('Custom regex', match)
custom.add(match)
except:
supress_regex = True
|
python
|
def regxy(pattern, response, supress_regex, custom):
"""Extract a string based on regex pattern supplied by user."""
try:
matches = re.findall(r'%s' % pattern, response)
for match in matches:
verb('Custom regex', match)
custom.add(match)
except:
supress_regex = True
|
[
"def",
"regxy",
"(",
"pattern",
",",
"response",
",",
"supress_regex",
",",
"custom",
")",
":",
"try",
":",
"matches",
"=",
"re",
".",
"findall",
"(",
"r'%s'",
"%",
"pattern",
",",
"response",
")",
"for",
"match",
"in",
"matches",
":",
"verb",
"(",
"'Custom regex'",
",",
"match",
")",
"custom",
".",
"add",
"(",
"match",
")",
"except",
":",
"supress_regex",
"=",
"True"
] |
Extract a string based on regex pattern supplied by user.
|
[
"Extract",
"a",
"string",
"based",
"on",
"regex",
"pattern",
"supplied",
"by",
"user",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L15-L23
|
train
|
s0md3v/Photon
|
core/utils.py
|
is_link
|
def is_link(url, processed, files):
"""
Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled
"""
if url not in processed:
is_file = url.endswith(BAD_TYPES)
if is_file:
files.add(url)
return False
return True
return False
|
python
|
def is_link(url, processed, files):
"""
Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled
"""
if url not in processed:
is_file = url.endswith(BAD_TYPES)
if is_file:
files.add(url)
return False
return True
return False
|
[
"def",
"is_link",
"(",
"url",
",",
"processed",
",",
"files",
")",
":",
"if",
"url",
"not",
"in",
"processed",
":",
"is_file",
"=",
"url",
".",
"endswith",
"(",
"BAD_TYPES",
")",
"if",
"is_file",
":",
"files",
".",
"add",
"(",
"url",
")",
"return",
"False",
"return",
"True",
"return",
"False"
] |
Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled
|
[
"Determine",
"whether",
"or",
"not",
"a",
"link",
"should",
"be",
"crawled",
"A",
"url",
"should",
"not",
"be",
"crawled",
"if",
"it",
"-",
"Is",
"a",
"file",
"-",
"Has",
"already",
"been",
"crawled"
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L26-L46
|
train
|
s0md3v/Photon
|
core/utils.py
|
remove_regex
|
def remove_regex(urls, regex):
"""
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
"""
if not regex:
return urls
# To avoid iterating over the characters of a string
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not re.search(regex, url)]
except TypeError:
return []
return non_matching_urls
|
python
|
def remove_regex(urls, regex):
"""
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
"""
if not regex:
return urls
# To avoid iterating over the characters of a string
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not re.search(regex, url)]
except TypeError:
return []
return non_matching_urls
|
[
"def",
"remove_regex",
"(",
"urls",
",",
"regex",
")",
":",
"if",
"not",
"regex",
":",
"return",
"urls",
"# To avoid iterating over the characters of a string",
"if",
"not",
"isinstance",
"(",
"urls",
",",
"(",
"list",
",",
"set",
",",
"tuple",
")",
")",
":",
"urls",
"=",
"[",
"urls",
"]",
"try",
":",
"non_matching_urls",
"=",
"[",
"url",
"for",
"url",
"in",
"urls",
"if",
"not",
"re",
".",
"search",
"(",
"regex",
",",
"url",
")",
"]",
"except",
"TypeError",
":",
"return",
"[",
"]",
"return",
"non_matching_urls"
] |
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
|
[
"Parse",
"a",
"list",
"for",
"non",
"-",
"matches",
"to",
"a",
"regex",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L49-L73
|
train
|
s0md3v/Photon
|
core/utils.py
|
writer
|
def writer(datasets, dataset_names, output_dir):
"""Write the results."""
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
filepath = output_dir + '/' + dataset_name + '.txt'
with open(filepath, 'w+') as out_file:
joined = '\n'.join(dataset)
out_file.write(str(joined.encode('utf-8').decode('utf-8')))
out_file.write('\n')
|
python
|
def writer(datasets, dataset_names, output_dir):
"""Write the results."""
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
filepath = output_dir + '/' + dataset_name + '.txt'
with open(filepath, 'w+') as out_file:
joined = '\n'.join(dataset)
out_file.write(str(joined.encode('utf-8').decode('utf-8')))
out_file.write('\n')
|
[
"def",
"writer",
"(",
"datasets",
",",
"dataset_names",
",",
"output_dir",
")",
":",
"for",
"dataset",
",",
"dataset_name",
"in",
"zip",
"(",
"datasets",
",",
"dataset_names",
")",
":",
"if",
"dataset",
":",
"filepath",
"=",
"output_dir",
"+",
"'/'",
"+",
"dataset_name",
"+",
"'.txt'",
"with",
"open",
"(",
"filepath",
",",
"'w+'",
")",
"as",
"out_file",
":",
"joined",
"=",
"'\\n'",
".",
"join",
"(",
"dataset",
")",
"out_file",
".",
"write",
"(",
"str",
"(",
"joined",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"out_file",
".",
"write",
"(",
"'\\n'",
")"
] |
Write the results.
|
[
"Write",
"the",
"results",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L76-L84
|
train
|
s0md3v/Photon
|
core/utils.py
|
timer
|
def timer(diff, processed):
"""Return the passed time."""
# Changes seconds into minutes and seconds
minutes, seconds = divmod(diff, 60)
try:
# Finds average time taken by requests
time_per_request = diff / float(len(processed))
except ZeroDivisionError:
time_per_request = 0
return minutes, seconds, time_per_request
|
python
|
def timer(diff, processed):
"""Return the passed time."""
# Changes seconds into minutes and seconds
minutes, seconds = divmod(diff, 60)
try:
# Finds average time taken by requests
time_per_request = diff / float(len(processed))
except ZeroDivisionError:
time_per_request = 0
return minutes, seconds, time_per_request
|
[
"def",
"timer",
"(",
"diff",
",",
"processed",
")",
":",
"# Changes seconds into minutes and seconds",
"minutes",
",",
"seconds",
"=",
"divmod",
"(",
"diff",
",",
"60",
")",
"try",
":",
"# Finds average time taken by requests",
"time_per_request",
"=",
"diff",
"/",
"float",
"(",
"len",
"(",
"processed",
")",
")",
"except",
"ZeroDivisionError",
":",
"time_per_request",
"=",
"0",
"return",
"minutes",
",",
"seconds",
",",
"time_per_request"
] |
Return the passed time.
|
[
"Return",
"the",
"passed",
"time",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L87-L96
|
train
|
s0md3v/Photon
|
core/utils.py
|
entropy
|
def entropy(string):
"""Calculate the entropy of a string."""
entropy = 0
for number in range(256):
result = float(string.encode('utf-8').count(
chr(number))) / len(string.encode('utf-8'))
if result != 0:
entropy = entropy - result * math.log(result, 2)
return entropy
|
python
|
def entropy(string):
"""Calculate the entropy of a string."""
entropy = 0
for number in range(256):
result = float(string.encode('utf-8').count(
chr(number))) / len(string.encode('utf-8'))
if result != 0:
entropy = entropy - result * math.log(result, 2)
return entropy
|
[
"def",
"entropy",
"(",
"string",
")",
":",
"entropy",
"=",
"0",
"for",
"number",
"in",
"range",
"(",
"256",
")",
":",
"result",
"=",
"float",
"(",
"string",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"count",
"(",
"chr",
"(",
"number",
")",
")",
")",
"/",
"len",
"(",
"string",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"if",
"result",
"!=",
"0",
":",
"entropy",
"=",
"entropy",
"-",
"result",
"*",
"math",
".",
"log",
"(",
"result",
",",
"2",
")",
"return",
"entropy"
] |
Calculate the entropy of a string.
|
[
"Calculate",
"the",
"entropy",
"of",
"a",
"string",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L99-L107
|
train
|
s0md3v/Photon
|
core/utils.py
|
extract_headers
|
def extract_headers(headers):
"""This function extracts valid headers from interactive input."""
sorted_headers = {}
matches = re.findall(r'(.*):\s(.*)', headers)
for match in matches:
header = match[0]
value = match[1]
try:
if value[-1] == ',':
value = value[:-1]
sorted_headers[header] = value
except IndexError:
pass
return sorted_headers
|
python
|
def extract_headers(headers):
"""This function extracts valid headers from interactive input."""
sorted_headers = {}
matches = re.findall(r'(.*):\s(.*)', headers)
for match in matches:
header = match[0]
value = match[1]
try:
if value[-1] == ',':
value = value[:-1]
sorted_headers[header] = value
except IndexError:
pass
return sorted_headers
|
[
"def",
"extract_headers",
"(",
"headers",
")",
":",
"sorted_headers",
"=",
"{",
"}",
"matches",
"=",
"re",
".",
"findall",
"(",
"r'(.*):\\s(.*)'",
",",
"headers",
")",
"for",
"match",
"in",
"matches",
":",
"header",
"=",
"match",
"[",
"0",
"]",
"value",
"=",
"match",
"[",
"1",
"]",
"try",
":",
"if",
"value",
"[",
"-",
"1",
"]",
"==",
"','",
":",
"value",
"=",
"value",
"[",
":",
"-",
"1",
"]",
"sorted_headers",
"[",
"header",
"]",
"=",
"value",
"except",
"IndexError",
":",
"pass",
"return",
"sorted_headers"
] |
This function extracts valid headers from interactive input.
|
[
"This",
"function",
"extracts",
"valid",
"headers",
"from",
"interactive",
"input",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L122-L135
|
train
|
s0md3v/Photon
|
core/utils.py
|
top_level
|
def top_level(url, fix_protocol=True):
"""Extract the top level domain from an URL."""
ext = tld.get_tld(url, fix_protocol=fix_protocol)
toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split(
ext)[0] + ext
return toplevel
|
python
|
def top_level(url, fix_protocol=True):
"""Extract the top level domain from an URL."""
ext = tld.get_tld(url, fix_protocol=fix_protocol)
toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split(
ext)[0] + ext
return toplevel
|
[
"def",
"top_level",
"(",
"url",
",",
"fix_protocol",
"=",
"True",
")",
":",
"ext",
"=",
"tld",
".",
"get_tld",
"(",
"url",
",",
"fix_protocol",
"=",
"fix_protocol",
")",
"toplevel",
"=",
"'.'",
".",
"join",
"(",
"urlparse",
"(",
"url",
")",
".",
"netloc",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"2",
":",
"]",
")",
".",
"split",
"(",
"ext",
")",
"[",
"0",
"]",
"+",
"ext",
"return",
"toplevel"
] |
Extract the top level domain from an URL.
|
[
"Extract",
"the",
"top",
"level",
"domain",
"from",
"an",
"URL",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L138-L143
|
train
|
s0md3v/Photon
|
core/utils.py
|
proxy_type
|
def proxy_type(v):
""" Match IP:PORT or DOMAIN:PORT in a losse manner """
proxies = []
if re.match(r"((http|socks5):\/\/.)?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})", v):
proxies.append({"http": v,
"https": v})
return proxies
elif re.match(r"((http|socks5):\/\/.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}:(\d{1,5})", v):
proxies.append({"http": v,
"https": v})
return proxies
elif is_proxy_list(v, proxies):
return proxies
else:
raise argparse.ArgumentTypeError(
"Proxy should follow IP:PORT or DOMAIN:PORT format")
|
python
|
def proxy_type(v):
""" Match IP:PORT or DOMAIN:PORT in a losse manner """
proxies = []
if re.match(r"((http|socks5):\/\/.)?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})", v):
proxies.append({"http": v,
"https": v})
return proxies
elif re.match(r"((http|socks5):\/\/.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}:(\d{1,5})", v):
proxies.append({"http": v,
"https": v})
return proxies
elif is_proxy_list(v, proxies):
return proxies
else:
raise argparse.ArgumentTypeError(
"Proxy should follow IP:PORT or DOMAIN:PORT format")
|
[
"def",
"proxy_type",
"(",
"v",
")",
":",
"proxies",
"=",
"[",
"]",
"if",
"re",
".",
"match",
"(",
"r\"((http|socks5):\\/\\/.)?(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}):(\\d{1,5})\"",
",",
"v",
")",
":",
"proxies",
".",
"append",
"(",
"{",
"\"http\"",
":",
"v",
",",
"\"https\"",
":",
"v",
"}",
")",
"return",
"proxies",
"elif",
"re",
".",
"match",
"(",
"r\"((http|socks5):\\/\\/.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}:(\\d{1,5})\"",
",",
"v",
")",
":",
"proxies",
".",
"append",
"(",
"{",
"\"http\"",
":",
"v",
",",
"\"https\"",
":",
"v",
"}",
")",
"return",
"proxies",
"elif",
"is_proxy_list",
"(",
"v",
",",
"proxies",
")",
":",
"return",
"proxies",
"else",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"Proxy should follow IP:PORT or DOMAIN:PORT format\"",
")"
] |
Match IP:PORT or DOMAIN:PORT in a losse manner
|
[
"Match",
"IP",
":",
"PORT",
"or",
"DOMAIN",
":",
"PORT",
"in",
"a",
"losse",
"manner"
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L162-L177
|
train
|
s0md3v/Photon
|
plugins/dnsdumpster.py
|
dnsdumpster
|
def dnsdumpster(domain, output_dir):
"""Query dnsdumpster.com."""
response = requests.Session().get('https://dnsdumpster.com/').text
csrf_token = re.search(
r"name='csrfmiddlewaretoken' value='(.*?)'", response).group(1)
cookies = {'csrftoken': csrf_token}
headers = {'Referer': 'https://dnsdumpster.com/'}
data = {'csrfmiddlewaretoken': csrf_token, 'targetip': domain}
response = requests.Session().post(
'https://dnsdumpster.com/', cookies=cookies, data=data, headers=headers)
image = requests.get('https://dnsdumpster.com/static/map/%s.png' % domain)
if image.status_code == 200:
with open('%s/%s.png' % (output_dir, domain), 'wb') as f:
f.write(image.content)
|
python
|
def dnsdumpster(domain, output_dir):
"""Query dnsdumpster.com."""
response = requests.Session().get('https://dnsdumpster.com/').text
csrf_token = re.search(
r"name='csrfmiddlewaretoken' value='(.*?)'", response).group(1)
cookies = {'csrftoken': csrf_token}
headers = {'Referer': 'https://dnsdumpster.com/'}
data = {'csrfmiddlewaretoken': csrf_token, 'targetip': domain}
response = requests.Session().post(
'https://dnsdumpster.com/', cookies=cookies, data=data, headers=headers)
image = requests.get('https://dnsdumpster.com/static/map/%s.png' % domain)
if image.status_code == 200:
with open('%s/%s.png' % (output_dir, domain), 'wb') as f:
f.write(image.content)
|
[
"def",
"dnsdumpster",
"(",
"domain",
",",
"output_dir",
")",
":",
"response",
"=",
"requests",
".",
"Session",
"(",
")",
".",
"get",
"(",
"'https://dnsdumpster.com/'",
")",
".",
"text",
"csrf_token",
"=",
"re",
".",
"search",
"(",
"r\"name='csrfmiddlewaretoken' value='(.*?)'\"",
",",
"response",
")",
".",
"group",
"(",
"1",
")",
"cookies",
"=",
"{",
"'csrftoken'",
":",
"csrf_token",
"}",
"headers",
"=",
"{",
"'Referer'",
":",
"'https://dnsdumpster.com/'",
"}",
"data",
"=",
"{",
"'csrfmiddlewaretoken'",
":",
"csrf_token",
",",
"'targetip'",
":",
"domain",
"}",
"response",
"=",
"requests",
".",
"Session",
"(",
")",
".",
"post",
"(",
"'https://dnsdumpster.com/'",
",",
"cookies",
"=",
"cookies",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")",
"image",
"=",
"requests",
".",
"get",
"(",
"'https://dnsdumpster.com/static/map/%s.png'",
"%",
"domain",
")",
"if",
"image",
".",
"status_code",
"==",
"200",
":",
"with",
"open",
"(",
"'%s/%s.png'",
"%",
"(",
"output_dir",
",",
"domain",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"image",
".",
"content",
")"
] |
Query dnsdumpster.com.
|
[
"Query",
"dnsdumpster",
".",
"com",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/dnsdumpster.py#L7-L22
|
train
|
s0md3v/Photon
|
core/prompt.py
|
prompt
|
def prompt(default=None):
"""Present the user a prompt."""
editor = 'nano'
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
child_pid = os.fork()
is_child = child_pid == 0
if is_child:
os.execvp(editor, [editor, tmpfile.name])
else:
os.waitpid(child_pid, 0)
tmpfile.seek(0)
return tmpfile.read().strip()
|
python
|
def prompt(default=None):
"""Present the user a prompt."""
editor = 'nano'
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
child_pid = os.fork()
is_child = child_pid == 0
if is_child:
os.execvp(editor, [editor, tmpfile.name])
else:
os.waitpid(child_pid, 0)
tmpfile.seek(0)
return tmpfile.read().strip()
|
[
"def",
"prompt",
"(",
"default",
"=",
"None",
")",
":",
"editor",
"=",
"'nano'",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'r+'",
")",
"as",
"tmpfile",
":",
"if",
"default",
":",
"tmpfile",
".",
"write",
"(",
"default",
")",
"tmpfile",
".",
"flush",
"(",
")",
"child_pid",
"=",
"os",
".",
"fork",
"(",
")",
"is_child",
"=",
"child_pid",
"==",
"0",
"if",
"is_child",
":",
"os",
".",
"execvp",
"(",
"editor",
",",
"[",
"editor",
",",
"tmpfile",
".",
"name",
"]",
")",
"else",
":",
"os",
".",
"waitpid",
"(",
"child_pid",
",",
"0",
")",
"tmpfile",
".",
"seek",
"(",
"0",
")",
"return",
"tmpfile",
".",
"read",
"(",
")",
".",
"strip",
"(",
")"
] |
Present the user a prompt.
|
[
"Present",
"the",
"user",
"a",
"prompt",
"."
] |
6a29f2c9782ea9b3dc090db1774a259033600e39
|
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/prompt.py#L6-L22
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAApplication/QATradeRealtime.py
|
QA_RealTrade.start_market
|
def start_market(self):
"""
start the market thread and register backtest broker thread
QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承
"""
# 启动 trade_engine 线程
self.market.start()
# 注册 backtest_broker ,并且启动和它关联线程QAThread 存放在 kernels 词典中, { 'broker_name': QAThread }
#self.market.register(self.broker_name, self.broker)
self.market.connect(self.broker_name)
|
python
|
def start_market(self):
"""
start the market thread and register backtest broker thread
QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承
"""
# 启动 trade_engine 线程
self.market.start()
# 注册 backtest_broker ,并且启动和它关联线程QAThread 存放在 kernels 词典中, { 'broker_name': QAThread }
#self.market.register(self.broker_name, self.broker)
self.market.connect(self.broker_name)
|
[
"def",
"start_market",
"(",
"self",
")",
":",
"# 启动 trade_engine 线程",
"self",
".",
"market",
".",
"start",
"(",
")",
"# 注册 backtest_broker ,并且启动和它关联线程QAThread 存放在 kernels 词典中, { 'broker_name': QAThread }",
"#self.market.register(self.broker_name, self.broker)",
"self",
".",
"market",
".",
"connect",
"(",
"self",
".",
"broker_name",
")"
] |
start the market thread and register backtest broker thread
QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承
|
[
"start",
"the",
"market",
"thread",
"and",
"register",
"backtest",
"broker",
"thread",
"QAMarket",
"继承QATrader,",
"QATrader",
"中有",
"trade_engine属性",
",",
"trade_engine类型是QA_Engine从",
"QA_Thread继承"
] |
bb1fe424e4108b62a1f712b81a05cf829297a5c0
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAApplication/QATradeRealtime.py#L72-L82
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAApplication/QATradeRealtime.py
|
QA_RealTrade.run
|
def run(self):
"""generator driven data flow
"""
# 如果出现了日期的改变 才会进行结算的事件
_date = None
while QA_util_if_tradetime(self.now):
for data in self.ingest_data: # 对于在ingest_data中的数据
# <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'>
date = data.date[0]
if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场
if _date != date: # 如果新的date
# 前一天的交易日已经过去
# 往 broker 和 account 发送 settle 事件
try:
self.market.trade_engine.join()
# time.sleep(2)
self.market._settle(self.broker_name)
except Exception as e:
raise e
# 基金 指数 期货
elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]:
self.market._settle(self.broker_name)
# print(data)
self.broker.run(
QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data))
# 生成 UPCOMING_DATA 事件放到 队列中去执行
self.market.upcoming_data(self.broker_name, data)
self.market.trade_engine.join()
_date = date
|
python
|
def run(self):
"""generator driven data flow
"""
# 如果出现了日期的改变 才会进行结算的事件
_date = None
while QA_util_if_tradetime(self.now):
for data in self.ingest_data: # 对于在ingest_data中的数据
# <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'>
date = data.date[0]
if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场
if _date != date: # 如果新的date
# 前一天的交易日已经过去
# 往 broker 和 account 发送 settle 事件
try:
self.market.trade_engine.join()
# time.sleep(2)
self.market._settle(self.broker_name)
except Exception as e:
raise e
# 基金 指数 期货
elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]:
self.market._settle(self.broker_name)
# print(data)
self.broker.run(
QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data))
# 生成 UPCOMING_DATA 事件放到 队列中去执行
self.market.upcoming_data(self.broker_name, data)
self.market.trade_engine.join()
_date = date
|
[
"def",
"run",
"(",
"self",
")",
":",
"# 如果出现了日期的改变 才会进行结算的事件",
"_date",
"=",
"None",
"while",
"QA_util_if_tradetime",
"(",
"self",
".",
"now",
")",
":",
"for",
"data",
"in",
"self",
".",
"ingest_data",
":",
"# 对于在ingest_data中的数据",
"# <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'>",
"date",
"=",
"data",
".",
"date",
"[",
"0",
"]",
"if",
"self",
".",
"market_type",
"is",
"MARKET_TYPE",
".",
"STOCK_CN",
":",
"# 如果是股票市场",
"if",
"_date",
"!=",
"date",
":",
"# 如果新的date",
"# 前一天的交易日已经过去",
"# 往 broker 和 account 发送 settle 事件",
"try",
":",
"self",
".",
"market",
".",
"trade_engine",
".",
"join",
"(",
")",
"# time.sleep(2)",
"self",
".",
"market",
".",
"_settle",
"(",
"self",
".",
"broker_name",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"# 基金 指数 期货",
"elif",
"self",
".",
"market_type",
"in",
"[",
"MARKET_TYPE",
".",
"FUND_CN",
",",
"MARKET_TYPE",
".",
"INDEX_CN",
",",
"MARKET_TYPE",
".",
"FUTURE_CN",
"]",
":",
"self",
".",
"market",
".",
"_settle",
"(",
"self",
".",
"broker_name",
")",
"# print(data)",
"self",
".",
"broker",
".",
"run",
"(",
"QA_Event",
"(",
"event_type",
"=",
"ENGINE_EVENT",
".",
"UPCOMING_DATA",
",",
"market_data",
"=",
"data",
")",
")",
"# 生成 UPCOMING_DATA 事件放到 队列中去执行",
"self",
".",
"market",
".",
"upcoming_data",
"(",
"self",
".",
"broker_name",
",",
"data",
")",
"self",
".",
"market",
".",
"trade_engine",
".",
"join",
"(",
")",
"_date",
"=",
"date"
] |
generator driven data flow
|
[
"generator",
"driven",
"data",
"flow"
] |
bb1fe424e4108b62a1f712b81a05cf829297a5c0
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAApplication/QATradeRealtime.py#L84-L117
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAARP/QAAccount.py
|
QA_Account.message
|
def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
}
|
python
|
def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
}
|
[
"def",
"message",
"(",
"self",
")",
":",
"return",
"{",
"'source'",
":",
"'account'",
",",
"'frequence'",
":",
"self",
".",
"frequence",
",",
"'account_cookie'",
":",
"self",
".",
"account_cookie",
",",
"'portfolio_cookie'",
":",
"self",
".",
"portfolio_cookie",
",",
"'user_cookie'",
":",
"self",
".",
"user_cookie",
",",
"'broker'",
":",
"self",
".",
"broker",
",",
"'market_type'",
":",
"self",
".",
"market_type",
",",
"'strategy_name'",
":",
"self",
".",
"strategy_name",
",",
"'current_time'",
":",
"str",
"(",
"self",
".",
"_currenttime",
")",
",",
"'allow_sellopen'",
":",
"self",
".",
"allow_sellopen",
",",
"'allow_margin'",
":",
"self",
".",
"allow_margin",
",",
"'allow_t0'",
":",
"self",
".",
"allow_t0",
",",
"'margin_level'",
":",
"self",
".",
"margin_level",
",",
"'init_assets'",
":",
"self",
".",
"init_assets",
",",
"'init_cash'",
":",
"self",
".",
"init_cash",
",",
"'init_hold'",
":",
"self",
".",
"init_hold",
".",
"to_dict",
"(",
")",
",",
"'commission_coeff'",
":",
"self",
".",
"commission_coeff",
",",
"'tax_coeff'",
":",
"self",
".",
"tax_coeff",
",",
"'cash'",
":",
"self",
".",
"cash",
",",
"'history'",
":",
"self",
".",
"history",
",",
"'trade_index'",
":",
"self",
".",
"time_index_max",
",",
"'running_time'",
":",
"str",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
"if",
"self",
".",
"running_time",
"is",
"None",
"else",
"str",
"(",
"self",
".",
"running_time",
")",
",",
"'quantaxis_version'",
":",
"self",
".",
"quantaxis_version",
",",
"'running_environment'",
":",
"self",
".",
"running_environment",
",",
"'start_date'",
":",
"self",
".",
"start_date",
",",
"'end_date'",
":",
"self",
".",
"end_date",
",",
"'frozen'",
":",
"self",
".",
"frozen",
",",
"'finished_id'",
":",
"self",
".",
"finishedOrderid",
"}"
] |
the standard message which can be transfer
|
[
"the",
"standard",
"message",
"which",
"can",
"be",
"transfer"
] |
bb1fe424e4108b62a1f712b81a05cf829297a5c0
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L429-L489
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAARP/QAAccount.py
|
QA_Account.init_hold_with_account
|
def init_hold_with_account(self):
"""带account_cookie的初始化持仓
Returns:
[type] -- [description]
"""
return self.init_hold.reset_index().assign(
account_cookie=self.account_cookie
).set_index(['code',
'account_cookie'])
|
python
|
def init_hold_with_account(self):
"""带account_cookie的初始化持仓
Returns:
[type] -- [description]
"""
return self.init_hold.reset_index().assign(
account_cookie=self.account_cookie
).set_index(['code',
'account_cookie'])
|
[
"def",
"init_hold_with_account",
"(",
"self",
")",
":",
"return",
"self",
".",
"init_hold",
".",
"reset_index",
"(",
")",
".",
"assign",
"(",
"account_cookie",
"=",
"self",
".",
"account_cookie",
")",
".",
"set_index",
"(",
"[",
"'code'",
",",
"'account_cookie'",
"]",
")"
] |
带account_cookie的初始化持仓
Returns:
[type] -- [description]
|
[
"带account_cookie的初始化持仓"
] |
bb1fe424e4108b62a1f712b81a05cf829297a5c0
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L508-L518
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAARP/QAAccount.py
|
QA_Account.start_date
|
def start_date(self):
"""账户的起始交易日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
"""
if self.start_==None:
if len(self.time_index_max) > 0:
return str(min(self.time_index_max))[0:10]
else:
print(
RuntimeWarning(
'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'
)
)
else:
return self.start_
|
python
|
def start_date(self):
"""账户的起始交易日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
"""
if self.start_==None:
if len(self.time_index_max) > 0:
return str(min(self.time_index_max))[0:10]
else:
print(
RuntimeWarning(
'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'
)
)
else:
return self.start_
|
[
"def",
"start_date",
"(",
"self",
")",
":",
"if",
"self",
".",
"start_",
"==",
"None",
":",
"if",
"len",
"(",
"self",
".",
"time_index_max",
")",
">",
"0",
":",
"return",
"str",
"(",
"min",
"(",
"self",
".",
"time_index_max",
")",
")",
"[",
"0",
":",
"10",
"]",
"else",
":",
"print",
"(",
"RuntimeWarning",
"(",
"'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'",
")",
")",
"else",
":",
"return",
"self",
".",
"start_"
] |
账户的起始交易日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
|
[
"账户的起始交易日期",
"(",
"只在回测中使用",
")"
] |
bb1fe424e4108b62a1f712b81a05cf829297a5c0
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L558-L577
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAARP/QAAccount.py
|
QA_Account.end_date
|
def end_date(self):
"""账户的交易结束日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
"""
if self.start_==None:
if len(self.time_index_max) > 0:
return str(max(self.time_index_max))[0:10]
else:
print(
RuntimeWarning(
'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'
)
)
else:
return self.end_
|
python
|
def end_date(self):
"""账户的交易结束日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
"""
if self.start_==None:
if len(self.time_index_max) > 0:
return str(max(self.time_index_max))[0:10]
else:
print(
RuntimeWarning(
'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'
)
)
else:
return self.end_
|
[
"def",
"end_date",
"(",
"self",
")",
":",
"if",
"self",
".",
"start_",
"==",
"None",
":",
"if",
"len",
"(",
"self",
".",
"time_index_max",
")",
">",
"0",
":",
"return",
"str",
"(",
"max",
"(",
"self",
".",
"time_index_max",
")",
")",
"[",
"0",
":",
"10",
"]",
"else",
":",
"print",
"(",
"RuntimeWarning",
"(",
"'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'",
")",
")",
"else",
":",
"return",
"self",
".",
"end_"
] |
账户的交易结束日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
|
[
"账户的交易结束日期",
"(",
"只在回测中使用",
")"
] |
bb1fe424e4108b62a1f712b81a05cf829297a5c0
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L580-L599
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAARP/QAAccount.py
|
QA_Account.history_table_min
|
def history_table_min(self):
'区间交易历史的table'
if len(self.history_min) > 0:
lens = len(self.history_min[0])
else:
lens = len(self._history_headers)
return pd.DataFrame(
data=self.history_min,
columns=self._history_headers[:lens]
).sort_index()
|
python
|
def history_table_min(self):
'区间交易历史的table'
if len(self.history_min) > 0:
lens = len(self.history_min[0])
else:
lens = len(self._history_headers)
return pd.DataFrame(
data=self.history_min,
columns=self._history_headers[:lens]
).sort_index()
|
[
"def",
"history_table_min",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"history_min",
")",
">",
"0",
":",
"lens",
"=",
"len",
"(",
"self",
".",
"history_min",
"[",
"0",
"]",
")",
"else",
":",
"lens",
"=",
"len",
"(",
"self",
".",
"_history_headers",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"self",
".",
"history_min",
",",
"columns",
"=",
"self",
".",
"_history_headers",
"[",
":",
"lens",
"]",
")",
".",
"sort_index",
"(",
")"
] |
区间交易历史的table
|
[
"区间交易历史的table"
] |
bb1fe424e4108b62a1f712b81a05cf829297a5c0
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L639-L649
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAARP/QAAccount.py
|
QA_Account.history_table
|
def history_table(self):
'交易历史的table'
if len(self.history) > 0:
lens = len(self.history[0])
else:
lens = len(self._history_headers)
return pd.DataFrame(
data=self.history,
columns=self._history_headers[:lens]
).sort_index()
|
python
|
def history_table(self):
'交易历史的table'
if len(self.history) > 0:
lens = len(self.history[0])
else:
lens = len(self._history_headers)
return pd.DataFrame(
data=self.history,
columns=self._history_headers[:lens]
).sort_index()
|
[
"def",
"history_table",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"history",
")",
">",
"0",
":",
"lens",
"=",
"len",
"(",
"self",
".",
"history",
"[",
"0",
"]",
")",
"else",
":",
"lens",
"=",
"len",
"(",
"self",
".",
"_history_headers",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"self",
".",
"history",
",",
"columns",
"=",
"self",
".",
"_history_headers",
"[",
":",
"lens",
"]",
")",
".",
"sort_index",
"(",
")"
] |
交易历史的table
|
[
"交易历史的table"
] |
bb1fe424e4108b62a1f712b81a05cf829297a5c0
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L670-L680
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.