text stringlengths 81 112k |
|---|
Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
def build_dataset(instruction_dicts,
dataset_from_file_fn,
shuffle_files=False,
parallel_reads=64):
"""Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
"""
# First case: All examples are taken (No value skipped)
if _no_examples_skipped(instruction_dicts):
# Only use the filenames as instruction
instruction_ds = tf.data.Dataset.from_tensor_slices([
d["filepath"] for d in instruction_dicts
])
build_ds_from_instruction = dataset_from_file_fn
# Second case: Use the instructions to read the examples
else:
instruction_ds = _build_instruction_ds(instruction_dicts)
build_ds_from_instruction = functools.partial(
_build_ds_from_instruction,
ds_from_file_fn=dataset_from_file_fn,
)
# If shuffle is True, we shuffle the instructions/shards
if shuffle_files:
instruction_ds = instruction_ds.shuffle(len(instruction_dicts))
# Use interleave to parallel read files and decode records
ds = instruction_ds.interleave(
build_ds_from_instruction,
cycle_length=parallel_reads,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds |
Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard.
def _build_instruction_ds(instructions):
"""Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard.
"""
# Transpose the list[dict] into dict[list]
tensor_inputs = {
# offset_mask need to be converted to int64 explicitly
k: np.array(vals, dtype=np.int64) if k == "mask_offset" else list(vals)
for k, vals in utils.zip_dict(*instructions)
}
return tf.data.Dataset.from_tensor_slices(tensor_inputs) |
Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip
and True for examples to keep.
def _build_mask_ds(mask, mask_offset):
"""Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip
and True for examples to keep.
"""
mask_ds = tf.data.Dataset.from_tensor_slices(mask)
mask_ds = mask_ds.repeat()
mask_ds = mask_ds.skip(mask_offset)
return mask_ds |
Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
def _build_ds_from_instruction(instruction, ds_from_file_fn):
"""Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
"""
# Create the example and mask ds for this particular shard
examples_ds = ds_from_file_fn(instruction["filepath"])
mask_ds = _build_mask_ds(
mask_offset=instruction["mask_offset"],
mask=instruction["mask"],
)
# Zip the mask and real examples
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
# Filter according to the mask (only keep True)
ds = ds.filter(lambda example, mask: mask)
# Only keep the examples
ds = ds.map(lambda example, mask: example)
return ds |
Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
def as_numpy(dataset, graph=None):
"""Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
"""
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
# Eager mode
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
# Graph mode
# First create iterators for datasets
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next()
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
# Then create numpy arrays for tensors
with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor
# Calling sess.run once so that randomness is shared.
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
# Merge the dataset iterators and np arrays
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
# Nest
return tf.nest.pack_sequence_as(nested_ds, flat_np) |
Loads the images and latent values into Numpy arrays.
def _load_data(filepath):
"""Loads the images and latent values into Numpy arrays."""
with h5py.File(filepath, "r") as h5dataset:
image_array = np.array(h5dataset["images"])
# The 'label' data set in the hdf5 file actually contains the float values
# and not the class labels.
values_array = np.array(h5dataset["labels"])
return image_array, values_array |
Discretizes array values to class labels.
def _discretize(a):
"""Discretizes array values to class labels."""
arr = np.asarray(a)
index = np.argsort(arr)
inverse_index = np.zeros(arr.size, dtype=np.intp)
inverse_index[index] = np.arange(arr.size, dtype=np.intp)
arr = arr[index]
obs = np.r_[True, arr[1:] != arr[:-1]]
return obs.cumsum()[inverse_index] - 1 |
Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
def _generate_examples(self, filepath):
"""Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file will be slow with a single file. Instead, we first load everything
# into memory before yielding the samples.
image_array, values_array = _load_data(filepath)
# We need to calculate the class labels from the float values in the file.
labels_array = np.zeros_like(values_array, dtype=np.int64)
for i in range(values_array.shape[1]):
labels_array[:, i] = _discretize(values_array[:, i]) # pylint: disable=unsupported-assignment-operation
for image, labels, values in moves.zip(image_array, labels_array,
values_array):
yield {
"image": image,
"label_floor_hue": labels[0],
"label_wall_hue": labels[1],
"label_object_hue": labels[2],
"label_scale": labels[3],
"label_shape": labels[4],
"label_orientation": labels[5],
"value_floor_hue": values[0],
"value_wall_hue": values[1],
"value_object_hue": values[2],
"value_scale": values[3],
"value_shape": values[4],
"value_orientation": values[5],
} |
Strips formatting and unwanted sections from raw page content.
def _parse_and_clean_wikicode(raw_content):
"""Strips formatting and unwanted sections from raw page content."""
wikicode = tfds.core.lazy_imports.mwparserfromhell.parse(raw_content)
# Filters for references, tables, and file/image links.
re_rm_wikilink = re.compile(
"^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
def rm_wikilink(obj):
return bool(re_rm_wikilink.match(six.text_type(obj.title)))
def rm_tag(obj):
return six.text_type(obj.tag) in {"ref", "table"}
def rm_template(obj):
return obj.name.lower() in {
"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur",
"notelist-lg"}
def try_remove_obj(obj, section):
try:
section.remove(obj)
except ValueError:
# For unknown reasons, objects are sometimes not found.
pass
section_text = []
# Filter individual sections to clean.
for section in wikicode.get_sections(
flat=True, include_lead=True, include_headings=True):
for obj in section.ifilter_wikilinks(matches=rm_wikilink, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_templates(matches=rm_template, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
try_remove_obj(obj, section)
section_text.append(section.strip_code().strip())
return "\n\n".join(section_text) |
Build PCollection of examples in the raw (text) form.
def _build_pcollection(self, pipeline, filepaths, language):
"""Build PCollection of examples in the raw (text) form."""
beam = tfds.core.lazy_imports.apache_beam
def _extract_content(filepath):
"""Extracts article content from a single WikiMedia XML file."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
for _, elem in etree.iterparse(f, events=("end",)):
if not elem.tag.endswith("page"):
continue
namespace = elem.tag[:-4]
title = elem.find("./{0}title".format(namespace)).text
ns = elem.find("./{0}ns".format(namespace)).text
# Filter pages that are not in the "main" namespace.
if ns != "0":
continue
raw_content = elem.find(
"./{0}revision/{0}text".format(namespace)).text
elem.clear()
# Filter redirects.
if raw_content is None or raw_content.lower().startswith("#redirect"):
beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
continue
beam.metrics.Metrics.counter(language, "extracted-examples").inc()
yield (title, raw_content)
def _clean_content(inputs):
"""Cleans raw wikicode to extract text."""
title, raw_content = inputs
try:
text = _parse_and_clean_wikicode(raw_content)
except (
tfds.core.lazy_imports.mwparserfromhell.parser.ParserError) as e:
beam.metrics.Metrics.counter(language, "parser-error").inc()
logging.error("mwparserfromhell ParseError: %s", e)
return
beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
yield {
"title": title,
"text": text
}
return (
pipeline
| beam.Create(filepaths)
| beam.FlatMap(_extract_content)
| beam.FlatMap(_clean_content)
) |
Generate data for a given dataset.
def download_and_prepare(builder):
"""Generate data for a given dataset."""
print("download_and_prepare for dataset {}...".format(builder.info.full_name))
dl_config = download_config()
if isinstance(builder, tfds.core.BeamBasedBuilder):
beam = tfds.core.lazy_imports.apache_beam
# TODO(b/129149715): Restore compute stats. Currently skipped because not
# beam supported.
dl_config.compute_stats = tfds.download.ComputeStatsMode.SKIP
dl_config.beam_options = beam.options.pipeline_options.PipelineOptions()
builder.download_and_prepare(
download_dir=FLAGS.download_dir,
download_config=dl_config,
)
termcolor.cprint(str(builder.info.as_proto), attrs=["bold"])
if FLAGS.debug:
dataset = builder.as_dataset(split=tfds.Split.TRAIN)
pdb.set_trace()
del dataset |
See base class for details.
def encode_example(self, bbox):
"""See base class for details."""
# Validate the coordinates
for coordinate in bbox:
if not isinstance(coordinate, float):
raise ValueError(
'BBox coordinates should be float. Got {}.'.format(bbox))
if not 0.0 <= coordinate <= 1.0:
raise ValueError(
'BBox coordinates should be between 0 and 1. Got {}.'.format(bbox))
if bbox.xmax < bbox.xmin or bbox.ymax < bbox.ymin:
raise ValueError(
'BBox coordinates should have min <= max. Got {}.'.format(bbox))
return super(BBoxFeature, self).encode_example(
[bbox.ymin, bbox.xmin, bbox.ymax, bbox.xmax]
) |
Yields (labels, np_image) tuples.
def _load_data(path, labels_number=1):
"""Yields (labels, np_image) tuples."""
with tf.io.gfile.GFile(path, "rb") as f:
data = f.read()
offset = 0
max_offset = len(data) - 1
while offset < max_offset:
labels = np.frombuffer(data, dtype=np.uint8, count=labels_number,
offset=offset).reshape((labels_number,))
# 1 byte per label, 1024 * 3 = 3072 bytes for the image.
offset += labels_number
img = (np.frombuffer(data, dtype=np.uint8, count=3072, offset=offset)
.reshape((3, _CIFAR_IMAGE_SIZE, _CIFAR_IMAGE_SIZE))
.transpose((1, 2, 0))
)
offset += 3072
yield labels, img |
Returns SplitGenerators.
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cifar_path = dl_manager.download_and_extract(self._cifar_info.url)
cifar_info = self._cifar_info
cifar_path = os.path.join(cifar_path, cifar_info.prefix)
# Load the label names
for label_key, label_file in zip(cifar_info.label_keys,
cifar_info.label_files):
labels_path = os.path.join(cifar_path, label_file)
with tf.io.gfile.GFile(labels_path) as label_f:
label_names = [name for name in label_f.read().split("\n") if name]
self.info.features[label_key].names = label_names
# Define the splits
def gen_filenames(filenames):
for f in filenames:
yield os.path.join(cifar_path, f)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs={"filepaths": gen_filenames(cifar_info.train_files)}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={"filepaths": gen_filenames(cifar_info.test_files)}),
] |
Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
def _generate_examples(self, filepaths):
"""Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
"""
label_keys = self._cifar_info.label_keys
for path in filepaths:
for labels, np_image in _load_data(path, len(label_keys)):
row = dict(zip(label_keys, labels))
row["image"] = np_image
yield row |
Requires function to be called using keyword arguments.
def disallow_positional_args(wrapped=None, allowed=None):
"""Requires function to be called using keyword arguments."""
# See
# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments
# for decorator pattern.
if wrapped is None:
return functools.partial(disallow_positional_args, allowed=allowed)
@wrapt.decorator
def disallow_positional_args_dec(fn, instance, args, kwargs):
ismethod = instance is not None
_check_no_positional(fn, args, ismethod, allowed=allowed)
_check_required(fn, kwargs)
return fn(*args, **kwargs)
return disallow_positional_args_dec(wrapped) |
Returns arguments of fn with default=REQUIRED_ARG.
def _required_args(fn):
"""Returns arguments of fn with default=REQUIRED_ARG."""
spec = getargspec(fn)
if not spec.defaults:
return []
arg_names = spec.args[-len(spec.defaults):]
return [name for name, val in zip(arg_names, spec.defaults)
if val is REQUIRED_ARG] |
Download a file from GCS, optionally to a file.
def download_gcs_file(path, out_fname=None, prefix_filter=None):
"""Download a file from GCS, optionally to a file."""
url = posixpath.join(GCS_BUCKET, path)
if prefix_filter:
url += "?prefix=%s" % prefix_filter
stream = bool(out_fname)
resp = requests.get(url, stream=stream)
if not resp.ok:
raise ValueError("GCS bucket inaccessible")
if out_fname:
with tf.io.gfile.GFile(out_fname, "wb") as f:
for chunk in resp.iter_content(1024):
f.write(chunk)
else:
return resp.content |
List all files in GCS bucket.
def gcs_files(prefix_filter=None):
"""List all files in GCS bucket."""
top_level_xml_str = download_gcs_file("", prefix_filter=prefix_filter)
xml_root = ElementTree.fromstring(top_level_xml_str)
filenames = [el[0].text for el in xml_root if el.tag.endswith("Contents")]
return filenames |
Return paths to GCS files in the given dataset directory.
def gcs_dataset_info_files(dataset_dir):
"""Return paths to GCS files in the given dataset directory."""
prefix = posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir, "")
# Filter for this dataset
filenames = [el for el in gcs_files(prefix_filter=prefix)
if el.startswith(prefix) and len(el) > len(prefix)]
return filenames |
If the dataset is available on the GCS bucket gs://tfds-data/datasets.
def is_dataset_on_gcs(dataset_name):
"""If the dataset is available on the GCS bucket gs://tfds-data/datasets."""
dir_name = posixpath.join(GCS_DATASETS_DIR, dataset_name)
return len(gcs_files(prefix_filter=dir_name)) > 2 |
Run kaggle command with subprocess.
def _run_kaggle_command(command_args, competition_name):
"""Run kaggle command with subprocess."""
try:
output = sp.check_output(command_args)
return tf.compat.as_text(output)
except sp.CalledProcessError as err:
output = err.output
_log_command_output(output, error=True)
if output.startswith(b"404"):
logging.error(_NOT_FOUND_ERR_MSG, competition_name)
raise
logging.error(_ERR_MSG, competition_name)
raise |
List of competition files.
def competition_files(self):
"""List of competition files."""
command = [
"kaggle",
"datasets" if "/" in self._competition_name else "competitions",
"files",
"-v",
self._competition_name,
]
output = _run_kaggle_command(command, self._competition_name)
return sorted([
line.split(",")[0] for line in output.split("\n")[1:] if line
]) |
Returns 'kaggle://' urls.
def competition_urls(self):
"""Returns 'kaggle://' urls."""
return [
KaggleFile(self._competition_name, fname).to_url()
for fname in self.competition_files # pylint: disable=not-an-iterable
] |
Downloads competition file to output_dir.
def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname) |
Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
def _generate_examples(self, images_dir_path):
"""Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
"""
parent_dir = tf.io.gfile.listdir(images_dir_path)[0]
walk_dir = os.path.join(images_dir_path, parent_dir)
dirs = tf.io.gfile.listdir(walk_dir)
for d in dirs:
if tf.io.gfile.isdir(os.path.join(walk_dir, d)):
for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)):
for image_file in fname:
if image_file.endswith(".jpg"):
image_path = os.path.join(full_path, image_file)
yield {
"image": image_path,
"label": d.lower(),
} |
Returns dict {'dataset_name': 'path/to/checksums/file'}.
def _checksum_paths():
"""Returns dict {'dataset_name': 'path/to/checksums/file'}."""
dataset2path = {}
for dir_path in _CHECKSUM_DIRS:
for fname in _list_dir(dir_path):
if not fname.endswith(_CHECKSUM_SUFFIX):
continue
fpath = os.path.join(dir_path, fname)
dataset_name = fname[:-len(_CHECKSUM_SUFFIX)]
dataset2path[dataset_name] = fpath
return dataset2path |
Returns path to where checksums are stored for a given dataset.
def _get_path(dataset_name):
"""Returns path to where checksums are stored for a given dataset."""
path = _checksum_paths().get(dataset_name, None)
if path:
return path
msg = ('No checksums file could be find for dataset %s. Please create one in '
'one of: %s') % (dataset_name, ', '.join(_CHECKSUM_DIRS))
raise AssertionError(msg) |
Returns {URL: (size, checksum)}s stored within file.
def _get_sizes_checksums(checksums_path):
"""Returns {URL: (size, checksum)}s stored within file."""
checksums = {}
for line in _read_file(checksums_path).split('\n'):
if not line:
continue
# URL might have spaces inside, but size and checksum will not.
url, size, checksum = line.rsplit(' ', 2)
checksums[url] = (int(size), checksum)
return checksums |
Returns dict associating URL to (size, sha256).
def get_all_sizes_checksums():
"""Returns dict associating URL to (size, sha256)."""
sizes_checksums = {}
for path in _checksum_paths().values():
data = _get_sizes_checksums(path)
for url, size_checksum in data.items():
if (url in sizes_checksums and
sizes_checksums[url] != size_checksum):
raise AssertionError(
'URL %s is registered with 2+ distinct size/checksum tuples.' % url)
sizes_checksums.update(data)
return sizes_checksums |
Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
dataset_name: string.
sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
def store_checksums(dataset_name, sizes_checksums):
"""Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
dataset_name: string.
sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
"""
path = _get_path(dataset_name)
original_data = _get_sizes_checksums(path)
new_data = original_data.copy()
new_data.update(sizes_checksums)
if original_data == new_data:
return
with tf.io.gfile.GFile(path, 'w') as f:
for url, (size, checksum) in sorted(new_data.items()):
f.write('%s %s %s\n' % (url, size, checksum)) |
Guess extraction method, given file name (or path).
def _guess_extract_method(fname):
"""Guess extraction method, given file name (or path)."""
for method, extensions in _EXTRACTION_METHOD_TO_EXTS:
for ext in extensions:
if fname.endswith(ext):
return method
return ExtractMethod.NO_EXTRACT |
Sanitize and shorten url to fit in max_length.
Function is stable: same input MUST ALWAYS give same result, accros changes
in code as well. Different URLs might give same result.
As much as possible, the extension should be kept.
Heuristics are applied to only keep useful info from url.
1- Drop generic [sub]domains.
'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...'
'storage.googleapis.com/foo/...' -> 'foo/...'
'drive.google.com/bar/...' -> 'bar/...'
'github.com/baz/...' -> 'baz/...'
2- Remove leading '0's from url components:
'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords'
3- Truncate each component of url until total size fits or each component is
left with 4 chars (or total size is <= limit):
'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords'
(here truncate components to 4 chars per component max)
-> 'Move_64x6_png/trai-4-of-10.tfrecords'
4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def'
Args:
url: string, url to sanitize and shorten.
max_length: int, max length of result.
Returns:
(string, string): sanitized and shorted url, file extension.
def _sanitize_url(url, max_length):
"""Sanitize and shorten url to fit in max_length.
Function is stable: same input MUST ALWAYS give same result, accros changes
in code as well. Different URLs might give same result.
As much as possible, the extension should be kept.
Heuristics are applied to only keep useful info from url.
1- Drop generic [sub]domains.
'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...'
'storage.googleapis.com/foo/...' -> 'foo/...'
'drive.google.com/bar/...' -> 'bar/...'
'github.com/baz/...' -> 'baz/...'
2- Remove leading '0's from url components:
'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords'
3- Truncate each component of url until total size fits or each component is
left with 4 chars (or total size is <= limit):
'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords'
(here truncate components to 4 chars per component max)
-> 'Move_64x6_png/trai-4-of-10.tfrecords'
4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def'
Args:
url: string, url to sanitize and shorten.
max_length: int, max length of result.
Returns:
(string, string): sanitized and shorted url, file extension.
"""
url = urllib.parse.urlparse(url)
netloc = url.netloc
for prefix in _NETLOC_COMMON_PREFIXES:
if netloc.startswith(prefix):
netloc = netloc[len(prefix):]
for suffix in _NETLOC_COMMON_SUFFIXES:
if netloc.endswith(suffix):
netloc = netloc[:-len(suffix)]
url = '%s%s%s%s' % (netloc, url.path, url.params, url.query)
# Get the extension:
for ext in _KNOWN_EXTENSIONS:
if url.endswith(ext):
extension = ext
url = url[:-len(extension)]
break
else:
url, extension = os.path.splitext(url)
max_length -= len(extension)
# Replace non authorized chars (including '/') by '_':
url = re.sub(r'[^a-zA-Z0-9\.\-_]+', '_', url)
# Remove parts with no info:
for common_part in _URL_COMMON_PARTS:
url = url.replace(common_part, '_')
url = url.strip('_')
# Remove leading zeros in groups of numbers:
url = re.sub('(?<![0-9])0+(?=[0-9])', '', url)
# Decrease max size of URL components:
c_size = max(len(c) for c in re.split(r'[\.\-_]', url))
while c_size > 4 and len(url) > max_length:
c_size -= 1
url = re.sub(r'[^\.\-_]{4,}', lambda match: match.group(0)[:c_size], url)
return url[:max_length], extension |
Returns name of file for (url, checksum).
The max length of linux and windows filenames is 255 chars.
Windows however expects short paths (260 chars), so we limit the file name
to an arbitrary 90 chars.
Naming pattern: '${url}${checksum}'.
- url: url sanitized and shortened to 46 chars.
- checksum: base64url encoded sha256: 44 chars (removing trailing '=').
Args:
url: `str`, url of the file.
checksum: `str` (hex), the sha256 hexdigest of file or url.
Returns:
string of 90 chars max.
def get_dl_fname(url, checksum):
"""Returns name of file for (url, checksum).
The max length of linux and windows filenames is 255 chars.
Windows however expects short paths (260 chars), so we limit the file name
to an arbitrary 90 chars.
Naming pattern: '${url}${checksum}'.
- url: url sanitized and shortened to 46 chars.
- checksum: base64url encoded sha256: 44 chars (removing trailing '=').
Args:
url: `str`, url of the file.
checksum: `str` (hex), the sha256 hexdigest of file or url.
Returns:
string of 90 chars max.
"""
checksum = base64.urlsafe_b64encode(_decode_hex(checksum))
checksum = tf.compat.as_text(checksum)[:-1]
name, extension = _sanitize_url(url, max_length=46)
return '%s%s%s' % (name, checksum, extension) |
Returns name of temp dir for given url.
def get_dl_dirname(url):
"""Returns name of temp dir for given url."""
checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest()
return get_dl_fname(url, checksum) |
Returns info dict or None.
def _read_info(info_path):
"""Returns info dict or None."""
if not tf.io.gfile.exists(info_path):
return None
with tf.io.gfile.GFile(info_path) as info_f:
return json.load(info_f) |
Write the INFO file next to local file.
Although the method is synchronized, there is still a risk two processes
running at the same time overlap here. Risk accepted, since potentially lost
data (`dataset_name`) is only for human consumption.
Args:
resource: resource for which to write the INFO file.
path: path of downloaded file.
dataset_name: data used to dl the file.
original_fname: name of file as downloaded.
def write_info_file(resource, path, dataset_name, original_fname):
"""Write the INFO file next to local file.
Although the method is synchronized, there is still a risk two processes
running at the same time overlap here. Risk accepted, since potentially lost
data (`dataset_name`) is only for human consumption.
Args:
resource: resource for which to write the INFO file.
path: path of downloaded file.
dataset_name: data used to dl the file.
original_fname: name of file as downloaded.
"""
info_path = _get_info_path(path)
info = _read_info(info_path) or {}
urls = set(info.get('urls', []) + [resource.url])
dataset_names = info.get('dataset_names', [])
if dataset_name:
dataset_names.append(dataset_name)
if 'original_fname' in info and info['original_fname'] != original_fname:
raise AssertionError(
'`original_fname` "%s" stored in %s does NOT match "%s".' % (
info['original_fname'], info_path, original_fname))
info = dict(urls=list(urls), dataset_names=list(set(dataset_names)),
original_fname=original_fname)
with py_utils.atomic_write(info_path, 'w') as info_f:
json.dump(info, info_f, sort_keys=True) |
Returns `ExtractMethod` to use on resource at path. Cannot be None.
def get_extract_method(path):
"""Returns `ExtractMethod` to use on resource at path. Cannot be None."""
info_path = _get_info_path(path)
info = _read_info(info_path)
fname = info.get('original_fname', path) if info else path
return _guess_extract_method(fname) |
Returns whether the resource exists locally, at `resource.path`.
def exists_locally(cls, path):
"""Returns whether the resource exists locally, at `resource.path`."""
# If INFO file doesn't exist, consider resource does NOT exist, as it would
# prevent guessing the `extract_method`.
return (tf.io.gfile.exists(path) and
tf.io.gfile.exists(_get_info_path(path))) |
Returns SplitGenerators.
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
root_url = "http://images.cocodataset.org/"
urls = {
# Train/validation set
"train_images": "zips/train2014.zip",
"val_images": "zips/val2014.zip",
"trainval_annotations": "annotations/annotations_trainval2014.zip",
# Testing set (no annotations) (2014)
"test_images": "zips/test2014.zip",
"test_annotations": "annotations/image_info_test2014.zip",
# Testing set (no annotations) (2015)
"test2015_images": "zips/test2015.zip",
"test2015_annotations": "annotations/image_info_test2015.zip",
}
extracted_paths = dl_manager.download_and_extract({
key: root_url + url for key, url in urls.items()
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["train_images"],
annotation_dir=extracted_paths["trainval_annotations"],
split_type="train2014",
)),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["val_images"],
annotation_dir=extracted_paths["trainval_annotations"],
split_type="val2014",
)),
# Warning: Testing split only contains the images without any annotation
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["test_images"],
annotation_dir=extracted_paths["test_annotations"],
split_type="test2014",
has_annotation=False,
)),
tfds.core.SplitGenerator(
name="test2015",
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["test2015_images"],
annotation_dir=extracted_paths["test2015_annotations"],
split_type="test2015",
has_annotation=False,
)),
] |
Generate examples as dicts.
Args:
image_dir: `str`, directory containing the images
annotation_dir: `str`, directory containing
split_type: `str`, <split_name><year> (ex: train2014)
has_annotation: `bool`, when False (for the testing set), the annotations
are not recorded
Yields:
Generator yielding the next samples
def _generate_examples(
self, image_dir, annotation_dir, split_type, has_annotation=True):
"""Generate examples as dicts.
Args:
image_dir: `str`, directory containing the images
annotation_dir: `str`, directory containing
split_type: `str`, <split_name><year> (ex: train2014)
has_annotation: `bool`, when False (for the testing set), the annotations
are not recorded
Yields:
Generator yielding the next samples
"""
if has_annotation:
instance_filename = "instances_{}.json"
else:
instance_filename = "image_info_{}.json"
# Load the label names and images
instance_path = os.path.join(
annotation_dir,
"annotations",
instance_filename.format(split_type),
)
coco_annotation = CocoAnnotation(instance_path)
# Each category is a dict:
# {
# 'id': 51, # From 1-91, some entry missing
# 'name': 'bowl',
# 'supercategory': 'kitchen',
# }
categories = coco_annotation.categories
# Each image is a dict:
# {
# 'id': 262145,
# 'file_name': 'COCO_train2014_000000262145.jpg'
# 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg',
# 'coco_url': 'http://images.cocodataset.org/train2014/xyz.jpg',
# 'license': 2,
# 'date_captured': '2013-11-20 02:07:55',
# 'height': 427,
# 'width': 640,
# }
images = coco_annotation.images
# TODO(b/121375022): ClassLabel names should also contains 'id' and
# and 'supercategory' (in addition to 'name')
# Warning: As Coco only use 80 out of the 91 labels, the c['id'] and
# dataset names ids won't match.
self.info.features["objects"]["label"].names = [
c["name"] for c in categories
]
# TODO(b/121375022): Conversion should be done by ClassLabel
categories_id2name = {c["id"]: c["name"] for c in categories}
# Iterate over all images
annotation_skipped = 0
for image_info in sorted(images, key=lambda x: x["id"]):
if has_annotation:
# Each instance annotation is a dict:
# {
# 'iscrowd': 0,
# 'bbox': [116.95, 305.86, 285.3, 266.03],
# 'image_id': 480023,
# 'segmentation': [[312.29, 562.89, 402.25, ...]],
# 'category_id': 58,
# 'area': 54652.9556,
# 'id': 86,
# }
instances = coco_annotation.get_annotations(img_id=image_info["id"])
else:
instances = [] # No annotations
if not instances:
annotation_skipped += 1
def build_bbox(x, y, width, height):
# pylint: disable=cell-var-from-loop
# build_bbox is only used within the loop so it is ok to use image_info
return tfds.features.BBox(
ymin=y / image_info["height"],
xmin=x / image_info["width"],
ymax=(y + height) / image_info["height"],
xmax=(x + width) / image_info["width"],
)
# pylint: enable=cell-var-from-loop
yield {
"image": os.path.join(image_dir, split_type, image_info["file_name"]),
"image/filename": image_info["file_name"],
"objects": [{
"bbox": build_bbox(*instance_info["bbox"]),
"label": categories_id2name[instance_info["category_id"]],
"is_crowd": bool(instance_info["iscrowd"]),
} for instance_info in instances],
}
logging.info(
"%d/%d images do not contains any annotations",
annotation_skipped,
len(images),
) |
Conversion string => encoded list[int].
def str2ints(self, str_value):
"""Conversion string => encoded list[int]."""
if not self._encoder:
raise ValueError(
"Text.str2ints is not available because encoder hasn't been defined.")
return self._encoder.encode(str_value) |
Conversion list[int] => decoded string.
def ints2str(self, int_values):
"""Conversion list[int] => decoded string."""
if not self._encoder:
raise ValueError(
"Text.ints2str is not available because encoder hasn't been defined.")
return self._encoder.decode(int_values) |
Call SubwordTextEncoder.build_from_corpus is encoder_cls is such.
def maybe_build_from_corpus(self, corpus_generator, **kwargs):
"""Call SubwordTextEncoder.build_from_corpus is encoder_cls is such."""
if self._encoder_cls is not text_lib.SubwordTextEncoder:
return
if self.encoder:
return
vocab_size = self._encoder_config.vocab_size
self.encoder = text_lib.SubwordTextEncoder.build_from_corpus(
corpus_generator=corpus_generator,
target_vocab_size=vocab_size,
**kwargs) |
Sharded filenames given prefix and number of shards.
def sharded_filenames(filename_prefix, num_shards):
"""Sharded filenames given prefix and number of shards."""
shard_suffix = "%05d-of-%05d"
return [
"%s-%s" % (filename_prefix, shard_suffix % (i, num_shards))
for i in range(num_shards)
] |
Walk an Omniglot directory and yield examples.
def _walk_omniglot_dir(directory):
"""Walk an Omniglot directory and yield examples."""
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0])
alphabets = sorted(tf.io.gfile.listdir(directory))
for alphabet in alphabets:
alphabet_dir = os.path.join(directory, alphabet)
characters = sorted(tf.io.gfile.listdir(alphabet_dir))
for character in characters:
character_id = int(character[len("character"):]) - 1
character_dir = os.path.join(alphabet_dir, character)
images = tf.io.gfile.listdir(character_dir)
for image in images:
label, _ = image.split("_")
label = int(label) - 1
image_path = os.path.join(character_dir, image)
yield alphabet, character_id, label, image_path |
Get alphabet and label names, union across all dirs.
def _get_names(dirs):
"""Get alphabet and label names, union across all dirs."""
alphabets = set()
label_names = {}
for d in dirs:
for example in _walk_omniglot_dir(d):
alphabet, alphabet_char_id, label, _ = example
alphabets.add(alphabet)
label_name = "%s_%d" % (alphabet, alphabet_char_id)
if label in label_names:
assert label_names[label] == label_name
else:
label_names[label] = label_name
label_names = [label_names[k] for k in sorted(label_names)]
return alphabets, label_names |
Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
def size_str(size_in_bytes):
"""Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
"""
if not size_in_bytes:
return "?? GiB"
size_in_bytes = float(size_in_bytes)
for (name, size_bytes) in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return "{:.2f} {}".format(value, name)
return "{} {}".format(int(size_in_bytes), "bytes") |
Add a progression bar for the current download.
def tqdm(self):
"""Add a progression bar for the current download."""
async_tqdm = utils.async_tqdm
with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url:
with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size:
self._pbar_url = pbar_url
self._pbar_dl_size = pbar_dl_size
yield |
Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
def download(self, url, destination_path):
"""Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
"""
self._pbar_url.update_total(1)
future = self._executor.submit(self._sync_download, url, destination_path)
return promise.Promise.resolve(future) |
Download with Kaggle API.
def _sync_kaggle_download(self, kaggle_url, destination_path):
"""Download with Kaggle API."""
kaggle_file = kaggle.KaggleFile.from_url(kaggle_url)
downloader = self.kaggle_downloader(kaggle_file.competition)
filepath = downloader.download_file(kaggle_file.filename, destination_path)
dl_size = tf.io.gfile.stat(filepath).length
checksum = self._checksumer()
with tf.io.gfile.GFile(filepath, 'rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), dl_size |
Returns url, possibly with confirmation token.
def _get_drive_url(self, url, session):
"""Returns url, possibly with confirmation token."""
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError(
'Failed to get url %s. HTTP code: %d.' % (url, response.status_code))
for k, v in response.cookies.items():
if k.startswith('download_warning'):
return url + '&confirm=' + v # v is the confirm token
# No token found, let's try with original URL:
return url |
Synchronous version of `download` method.
def _sync_download(self, url, destination_path):
"""Synchronous version of `download` method."""
proxies = {
'http': os.environ.get('TFDS_HTTP_PROXY', None),
'https': os.environ.get('TFDS_HTTPS_PROXY', None),
'ftp': os.environ.get('TFDS_FTP_PROXY', None)
}
if kaggle.KaggleFile.is_kaggle_url(url):
if proxies['http']:
os.environ['KAGGLE_PROXY'] = proxies['http']
return self._sync_kaggle_download(url, destination_path)
try:
# If url is on a filesystem that gfile understands, use copy. Otherwise,
# use requests.
if not url.startswith('http'):
return self._sync_file_copy(url, destination_path)
except tf.errors.UnimplementedError:
pass
session = requests.Session()
session.proxies = proxies
if _DRIVE_URL.match(url):
url = self._get_drive_url(url, session)
use_urllib = url.startswith('ftp')
if use_urllib:
if proxies['ftp']:
proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener) # pylint: disable=too-many-function-args
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
else:
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError('Failed to get url %s. HTTP code: %d.' %
(url, response.status_code))
fname = _get_filename(response)
path = os.path.join(destination_path, fname)
size = 0
size_mb = 0
unit_mb = units.MiB
self._pbar_dl_size.update_total(
int(response.headers.get('Content-length', 0)) // unit_mb)
with tf.io.gfile.GFile(path, 'wb') as file_:
checksum = self._checksumer()
if use_urllib:
iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'')
else:
iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)
for block in iterator:
size += len(block)
# Update the progress bar
size_mb += len(block)
if size_mb > unit_mb:
self._pbar_dl_size.update(size_mb // unit_mb)
size_mb %= unit_mb
checksum.update(block)
file_.write(block)
self._pbar_url.update(1)
return checksum.hexdigest(), size |
Resize an image to have (roughly) the given number of target pixels.
Args:
image_fobj: File object containing the original image.
target_pixels: If given, number of pixels that the image must have.
Returns:
A file object.
def _resize_image_if_necessary(image_fobj, target_pixels=None):
"""Resize an image to have (roughly) the given number of target pixels.
Args:
image_fobj: File object containing the original image.
target_pixels: If given, number of pixels that the image must have.
Returns:
A file object.
"""
if target_pixels is None:
return image_fobj
cv2 = tfds.core.lazy_imports.cv2
# Decode image using OpenCV2.
image = cv2.imdecode(
np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3)
# Get image height and width.
height, width, _ = image.shape
actual_pixels = height * width
if actual_pixels > target_pixels:
factor = np.sqrt(target_pixels / actual_pixels)
image = cv2.resize(image, dsize=None, fx=factor, fy=factor)
# Encode the image with quality=72 and store it in a BytesIO object.
_, buff = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), 72])
return io.BytesIO(buff.tostring()) |
Yields Example instances from given CSV.
Args:
images_dir_path: path to dir in which images are stored.
csv_path: optional, path to csv file with two columns: name of image and
label. If not provided, just scan image directory, don't set labels.
csv_usage: optional, subset of examples from the csv file to use based on
the "Usage" column from the csv.
def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None):
"""Yields Example instances from given CSV.
Args:
images_dir_path: path to dir in which images are stored.
csv_path: optional, path to csv file with two columns: name of image and
label. If not provided, just scan image directory, don't set labels.
csv_usage: optional, subset of examples from the csv file to use based on
the "Usage" column from the csv.
"""
if csv_path:
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
data = [(row["image"], int(row["level"]))
for row in reader
if csv_usage is None or row["Usage"] == csv_usage]
else:
data = [(fname[:-5], -1)
for fname in tf.io.gfile.listdir(images_dir_path)
if fname.endswith(".jpeg")]
for name, label in data:
yield {
"name": name,
"image": _resize_image_if_necessary(
tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name),
mode="rb"),
target_pixels=self.builder_config.target_pixels),
"label": label,
} |
Return the list of files and reading mask of the files to read.
def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info):
"""Return the list of files and reading mask of the files to read."""
instruction_dicts = []
for sliced_split_info in list_sliced_split_info:
mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value)
# Compute filenames from the given split
filepaths = list(sorted(self._build_split_filenames(
split_info_list=[sliced_split_info.split_info],
)))
# Compute the offsets
if sliced_split_info.split_info.num_examples:
shard_id2num_examples = splits_lib.get_shard_id2num_examples(
sliced_split_info.split_info.num_shards,
sliced_split_info.split_info.num_examples,
)
mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples)
else:
logging.warning(
"Statistics not present in the dataset. TFDS is not able to load "
"the total number of examples, so using the subsplit API may not "
"provide precise subsplits."
)
mask_offsets = [0] * len(filepaths)
for filepath, mask_offset in zip(filepaths, mask_offsets):
instruction_dicts.append({
"filepath": filepath,
"mask": mask,
"mask_offset": mask_offset,
})
return instruction_dicts |
Construct the split filenames associated with the split info.
The filenames correspond to the pre-processed datasets files present in
the root directory of the dataset.
Args:
split_info_list: (list[SplitInfo]) List of split from which generate the
filenames
Returns:
filenames: (list[str]) The list of filenames path corresponding to the
split info object
def _build_split_filenames(self, split_info_list):
"""Construct the split filenames associated with the split info.
The filenames correspond to the pre-processed datasets files present in
the root directory of the dataset.
Args:
split_info_list: (list[SplitInfo]) List of split from which generate the
filenames
Returns:
filenames: (list[str]) The list of filenames path corresponding to the
split info object
"""
filenames = []
for split_info in split_info_list:
filenames.extend(naming.filepaths_for_dataset_split(
dataset_name=self.name,
split=split_info.name,
num_shards=split_info.num_shards,
data_dir=self._data_dir,
filetype_suffix=self._file_format_adapter.filetype_suffix,
))
return filenames |
Generate MovingMnist sequences.
Args:
data_path (str): Path to the data file
Yields:
20 x 64 x 64 x 1 uint8 numpy arrays
def _generate_examples(self, data_path):
"""Generate MovingMnist sequences.
Args:
data_path (str): Path to the data file
Yields:
20 x 64 x 64 x 1 uint8 numpy arrays
"""
with tf.io.gfile.GFile(data_path, "rb") as fp:
images = np.load(fp)
images = np.transpose(images, (1, 0, 2, 3))
images = np.expand_dims(images, axis=-1)
for sequence in images:
yield dict(image_sequence=sequence) |
Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
def _parse_single_video(self, example_proto):
"""Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
"""
context_features = {
"game_duration_loops": tf.io.FixedLenFeature([1], tf.int64),
"game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32),
"n_steps": tf.io.FixedLenFeature([1], tf.int64),
"screen_size": tf.io.FixedLenFeature([2], tf.int64),
}
sequence_features = {
"rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string),
}
_, seq_feat = tf.io.parse_single_sequence_example(
example_proto,
context_features=context_features,
sequence_features=sequence_features)
video_frames = tf.map_fn(
tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8)
return video_frames |
Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values.
def _generate_examples(self, filepath):
"""Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file is >100x slower and the data set is small (26.7MB). Hence, we first
# load everything into memory before yielding the samples.
image_array, class_array, values_array = _load_data(filepath)
for image, classes, values in moves.zip(image_array, class_array,
values_array):
yield dict(
image=np.expand_dims(image, -1),
label_shape=classes[1],
label_scale=classes[2],
label_orientation=classes[3],
label_x_position=classes[4],
label_y_position=classes[5],
value_shape=values[1],
value_scale=values[2],
value_orientation=values[3],
value_x_position=values[4],
value_y_position=values[5]) |
Returns splits.
def _split_generators(self, dl_manager):
"""Returns splits."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=os.path.join(_BASE_URL, "images.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR),
"annotations": tfds.download.Resource(
url=os.path.join(_BASE_URL, "annotations.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR)
})
images_path_dir = os.path.join(dl_paths["images"], "images")
annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations")
# Setup train and test splits
train_split = tfds.core.SplitGenerator(
name="train",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"trainval.txt"),
},
)
test_split = tfds.core.SplitGenerator(
name="test",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"test.txt")
},
)
return [train_split, test_split] |
Returns objects listed within given CSV files.
def _load_objects(csv_paths, csv_positions, prefix):
"""Returns objects listed within given CSV files."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_paths, csv_positions, prefix)
objects = collections.defaultdict(list)
for i, labels_path in enumerate(csv_paths):
with tf.io.gfile.GFile(labels_path) as csv_f:
if csv_positions[i] > 0:
csv_f.seek(csv_positions[i])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for image_id, source, label, confidence in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[i] = csv_f.tell()
image_id = int(image_id, 16)
current_obj = _Object(label, int(float(confidence) * 10), source)
objects[image_id].append(current_obj)
return dict(objects) |
Returns bounded boxes listed within given CSV file.
def _load_bboxes(csv_path, csv_positions, prefix):
"""Returns bounded boxes listed within given CSV file."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_path, csv_positions, prefix)
boxes = collections.defaultdict(list)
with tf.io.gfile.GFile(csv_path) as csv_f:
if csv_positions[0] > 0:
csv_f.seek(csv_positions[0])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for (image_id, source, label, confidence, xmin, xmax, ymin, ymax,
is_occluded, is_truncated, is_group_of, is_depiction, is_inside,
) in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[0] = csv_f.tell()
image_id = int(image_id, 16)
del confidence # always 1 in bounding boxes.
current_row = _Bbox(
label, source, tfds.features.BBox(
float(ymin), float(xmin), float(ymax), float(xmax)),
int(is_occluded), int(is_truncated),
int(is_group_of), int(is_depiction), int(is_inside))
boxes[image_id].append(current_row)
return dict(boxes) |
Returns SplitGenerators.
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
paths = dl_manager.download_and_extract(_URLS)
# Load labels from CSVs:
def load(names):
csv_positions = [0] * len(names)
return functools.partial(_load_objects, [paths[name] for name in names],
csv_positions)
train_objects = load(['train_human_labels', 'train_machine_labels'])
test_objects = load(['test_human_labels', 'test_machine_labels'])
validation_objects = load(['validation_human_labels',
'validation_machine_labels'])
def load_boxes(name):
csv_positions = [0]
return functools.partial(_load_bboxes, paths[name], csv_positions)
train_bbox = load_boxes('train-annotations-bbox')
test_bbox = load_boxes('test-annotations-bbox')
validation_bbox = load_boxes('validation-annotations-bbox')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=512,
gen_kwargs=dict(archive_paths=paths['train_images'],
objects_getter=train_objects,
bboxes_getter=train_bbox,
prefixes='0123456789abcdef'),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=36,
gen_kwargs=dict(archive_paths=[paths['test_images']],
objects_getter=test_objects,
bboxes_getter=test_bbox),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=12,
gen_kwargs=dict(archive_paths=[paths['validation_images']],
objects_getter=validation_objects,
bboxes_getter=validation_bbox),
),
] |
Yields examples.
def _generate_examples(self, archive_paths, objects_getter, bboxes_getter,
prefixes=None):
"""Yields examples."""
trainable_classes = set(
self.info.features['objects_trainable']['label'].names)
for i, archive_path in enumerate(archive_paths):
prefix = prefixes[i] if prefixes else None
objects = objects_getter(prefix)
bboxes = bboxes_getter(prefix)
logging.info('Opening archive %s ...', archive_path)
archive = tfds.download.iter_archive(
archive_path, tfds.download.ExtractMethod.TAR_STREAM)
for fpath, fobj in archive:
fname = os.path.basename(fpath)
image_id = int(os.path.splitext(fname)[0], 16)
image_objects = [obj._asdict() for obj in objects.get(image_id, [])]
image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])]
image_objects_trainable = [
obj for obj in image_objects if obj['label'] in trainable_classes
]
yield {
'image': _resize_image_if_necessary(
fobj, target_pixels=self.builder_config.target_pixels),
'image/filename': fname,
'objects': image_objects,
'objects_trainable': image_objects_trainable,
'bobjects': image_bboxes,
} |
Generate IMDB examples.
def _generate_examples(self, archive, directory):
"""Generate IMDB examples."""
reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", ""))
for path, imdb_f in archive:
res = reg.match(path)
if not res:
continue
text = imdb_f.read().strip()
yield {
"text": text,
"label": res.groupdict()["label"],
} |
Get hashes of urls in file.
def _get_url_hashes(path):
"""Get hashes of urls in file."""
urls = _read_text_file(path)
def url_hash(u):
h = hashlib.sha1()
try:
u = u.encode('utf-8')
except UnicodeDecodeError:
logging.error('Cannot hash url: %s', u)
h.update(u)
return h.hexdigest()
return {url_hash(u): True for u in urls} |
Find files corresponding to urls.
def _find_files(dl_paths, publisher, url_dict):
"""Find files corresponding to urls."""
if publisher == 'cnn':
top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
elif publisher == 'dm':
top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
else:
logging.fatal('Unsupported publisher: %s', publisher)
files = tf.io.gfile.listdir(top_dir)
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0:basename.find('.story')] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files |
Get filenames for a particular split.
def _subset_filenames(dl_paths, split):
"""Get filenames for a particular split."""
assert isinstance(dl_paths, dict), dl_paths
# Get filenames for a split.
if split == tfds.Split.TRAIN:
urls = _get_url_hashes(dl_paths['train_urls'])
elif split == tfds.Split.VALIDATION:
urls = _get_url_hashes(dl_paths['val_urls'])
elif split == tfds.Split.TEST:
urls = _get_url_hashes(dl_paths['test_urls'])
else:
logging.fatal('Unsupported split: %s', split)
cnn = _find_files(dl_paths, 'cnn', urls)
dm = _find_files(dl_paths, 'dm', urls)
return cnn + dm |
Get abstract (highlights) and article from a story file path.
def _get_art_abs(story_file):
"""Get abstract (highlights) and article from a story file path."""
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
# make_datafiles.py
lines = _read_text_file(story_file)
# Lowercase everything
lines = [line.lower() for line in lines]
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
def fix_missing_period(line):
"""Adds a period to a line that is missing a period."""
if '@highlight' in line: return line
if not line: return line
if line[-1] in END_TOKENS: return line
return line + ' .'
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith('@highlight'):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = ' '.join(article_lines)
# Make abstract into a single string, putting <s> and </s> tags around
# the sentences.
abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent,
SENTENCE_END) for sent in highlights])
return article, abstract |
Export the results.
def exporter(directory, method, datasets):
"""Export the results."""
if method.lower() == 'json':
# Convert json_dict to a JSON styled string
json_string = json.dumps(datasets, indent=4)
savefile = open('{}/exported.json'.format(directory), 'w+')
savefile.write(json_string)
savefile.close()
if method.lower() == 'csv':
with open('{}/exported.csv'.format(directory), 'w+') as csvfile:
csv_writer = csv.writer(
csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, values in datasets.items():
if values is None:
csv_writer.writerow([key])
else:
csv_writer.writerow([key] + values)
csvfile.close() |
Query archive.org.
def time_machine(host, mode):
"""Query archive.org."""
now = datetime.datetime.now()
to = str(now.year) + str(now.day) + str(now.month)
if now.month > 6:
fro = str(now.year) + str(now.day) + str(now.month - 6)
else:
fro = str(now.year - 1) + str(now.day) + str(now.month + 6)
url = "http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s" % (host, mode, fro, to)
response = get(url).text
parsed = json.loads(response)[1:]
urls = []
for item in parsed:
urls.append(item[0])
return urls |
Extract links from robots.txt and sitemap.xml.
def zap(input_url, archive, domain, host, internal, robots, proxies):
"""Extract links from robots.txt and sitemap.xml."""
if archive:
print('%s Fetching URLs from archive.org' % run)
if False:
archived_urls = time_machine(domain, 'domain')
else:
archived_urls = time_machine(host, 'host')
print('%s Retrieved %i URLs from archive.org' % (
good, len(archived_urls) - 1))
for url in archived_urls:
verb('Internal page', url)
internal.add(url)
# Makes request to robots.txt
response = requests.get(input_url + '/robots.txt',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
# If you know it, you know it
matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response)
if matches:
# Iterating over the matches, match is a tuple here
for match in matches:
# One item in match will always be empty so will combine both
# items
match = ''.join(match)
# If the URL doesn't use a wildcard
if '*' not in match:
url = input_url + match
# Add the URL to internal list for crawling
internal.add(url)
# Add the URL to robots list
robots.add(url)
print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
# Makes request to sitemap.xml
response = requests.get(input_url + '/sitemap.xml',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
matches = xml_parser(response)
if matches: # if there are any matches
print('%s URLs retrieved from sitemap.xml: %s' % (
good, len(matches)))
for match in matches:
verb('Internal page', match)
# Cleaning up the URL and adding it to the internal list for
# crawling
internal.add(match) |
Handle the requests and return the response body.
def requester(
url,
main_url=None,
delay=0,
cook=None,
headers=None,
timeout=10,
host=None,
proxies=[None],
user_agents=[None],
failed=None,
processed=None
):
"""Handle the requests and return the response body."""
cook = cook or set()
headers = headers or set()
user_agents = user_agents or ['Photon']
failed = failed or set()
processed = processed or set()
# Mark the URL as crawled
processed.add(url)
# Pause/sleep the program for specified time
time.sleep(delay)
def make_request(url):
"""Default request"""
final_headers = headers or {
'Host': host,
# Selecting a random user-agent
'User-Agent': random.choice(user_agents),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip',
'DNT': '1',
'Connection': 'close',
}
try:
response = SESSION.get(
url,
cookies=cook,
headers=final_headers,
verify=False,
timeout=timeout,
stream=True,
proxies=random.choice(proxies)
)
except TooManyRedirects:
return 'dummy'
if 'text/html' in response.headers['content-type'] or \
'text/plain' in response.headers['content-type']:
if response.status_code != '404':
return response.text
else:
response.close()
failed.add(url)
return 'dummy'
else:
response.close()
return 'dummy'
return make_request(url) |
Extract intel from the response body.
def intel_extractor(url, response):
"""Extract intel from the response body."""
for rintel in rintels:
res = re.sub(r'<(script).*?</\1>(?s)', '', response)
res = re.sub(r'<[^<]+?>', '', res)
matches = rintel[0].findall(res)
if matches:
for match in matches:
verb('Intel', match)
bad_intel.add((match, rintel[1], url)) |
Extract js files from the response body
def js_extractor(response):
"""Extract js files from the response body"""
# Extract .js files
matches = rscript.findall(response)
for match in matches:
match = match[2].replace('\'', '').replace('"', '')
verb('JS file', match)
bad_scripts.add(match) |
Extract details from the response body.
def extractor(url):
"""Extract details from the response body."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
if clone:
mirror(url, response)
matches = rhref.findall(response)
for link in matches:
# Remove everything after a "#" to deal with in-page anchors
link = link[1].replace('\'', '').replace('"', '').split('#')[0]
# Checks if the URLs should be crawled
if is_link(link, processed, files):
if link[:4] == 'http':
if link.startswith(main_url):
verb('Internal page', link)
internal.add(link)
else:
verb('External page', link)
external.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
verb('Internal page', link)
internal.add(schema + '://' + link)
else:
verb('External page', link)
external.add(link)
elif link[:1] == '/':
verb('Internal page', link)
internal.add(remove_file(url) + link)
else:
verb('Internal page', link)
usable_url = remove_file(url)
if usable_url.endswith('/'):
internal.add(usable_url + link)
elif link.startswith('/'):
internal.add(usable_url + link)
else:
internal.add(usable_url + '/' + link)
if not only_urls:
intel_extractor(url, response)
js_extractor(response)
if args.regex and not supress_regex:
regxy(args.regex, response, supress_regex, custom)
if api:
matches = rentropy.findall(response)
for match in matches:
if entropy(match) >= 4:
verb('Key', match)
keys.add(url + ': ' + match) |
Extract endpoints from JavaScript code.
def jscanner(url):
"""Extract endpoints from JavaScript code."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
# Extract URLs/endpoints
matches = rendpoint.findall(response)
# Iterate over the matches, match is a tuple
for match in matches:
# Combining the items because one of them is always empty
match = match[0] + match[1]
# Making sure it's not some JavaScript code
if not re.search(r'[}{><"\']', match) and not match == '/':
verb('JS endpoint', match)
endpoints.add(match) |
Update the current installation.
git clones the latest version and merges it with the current directory.
def updater():
"""Update the current installation.
git clones the latest version and merges it with the current directory.
"""
print('%s Checking for updates' % run)
# Changes must be separated by ;
changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''
latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com')
# Just a hack to see if a new version is available
if changes not in latest_commit:
changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
# Splitting the changes to form a list
changelog = changelog.group(1).split(';')
print('%s A new version of Photon is available.' % good)
print('%s Changes:' % info)
for change in changelog: # print changes
print('%s>%s %s' % (green, end, change))
current_path = os.getcwd().split('/') # if you know it, you know it
folder = current_path[-1] # current directory name
path = '/'.join(current_path) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
print('%s Updating Photon' % run)
os.system('git clone --quiet https://github.com/s0md3v/Photon %s'
% (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'
% (path, folder, path, path, folder))
print('%s Update successful!' % good)
else:
print('%s Photon is up to date!' % good) |
Find subdomains according to the TLD.
def find_subdomains(domain):
"""Find subdomains according to the TLD."""
result = set()
response = get('https://findsubdomains.com/subdomains-of/' + domain).text
matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
for match in matches:
result.add(match.replace(' ', '').replace('\n', ''))
return list(result) |
Process the URLs and uses a threadpool to execute a function.
def flash(function, links, thread_count):
"""Process the URLs and uses a threadpool to execute a function."""
# Convert links (set) to list
links = list(links)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=thread_count)
futures = (threadpool.submit(function, link) for link in links)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(links) or (i + 1) % thread_count == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(links)),
end='\r')
print('') |
Extract a string based on regex pattern supplied by user.
def regxy(pattern, response, supress_regex, custom):
"""Extract a string based on regex pattern supplied by user."""
try:
matches = re.findall(r'%s' % pattern, response)
for match in matches:
verb('Custom regex', match)
custom.add(match)
except:
supress_regex = True |
Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled
def is_link(url, processed, files):
"""
Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled
"""
if url not in processed:
is_file = url.endswith(BAD_TYPES)
if is_file:
files.add(url)
return False
return True
return False |
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
def remove_regex(urls, regex):
"""
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
"""
if not regex:
return urls
# To avoid iterating over the characters of a string
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not re.search(regex, url)]
except TypeError:
return []
return non_matching_urls |
Write the results.
def writer(datasets, dataset_names, output_dir):
"""Write the results."""
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
filepath = output_dir + '/' + dataset_name + '.txt'
with open(filepath, 'w+') as out_file:
joined = '\n'.join(dataset)
out_file.write(str(joined.encode('utf-8').decode('utf-8')))
out_file.write('\n') |
Return the passed time.
def timer(diff, processed):
"""Return the passed time."""
# Changes seconds into minutes and seconds
minutes, seconds = divmod(diff, 60)
try:
# Finds average time taken by requests
time_per_request = diff / float(len(processed))
except ZeroDivisionError:
time_per_request = 0
return minutes, seconds, time_per_request |
Calculate the entropy of a string.
def entropy(string):
"""Calculate the entropy of a string."""
entropy = 0
for number in range(256):
result = float(string.encode('utf-8').count(
chr(number))) / len(string.encode('utf-8'))
if result != 0:
entropy = entropy - result * math.log(result, 2)
return entropy |
This function extracts valid headers from interactive input.
def extract_headers(headers):
"""This function extracts valid headers from interactive input."""
sorted_headers = {}
matches = re.findall(r'(.*):\s(.*)', headers)
for match in matches:
header = match[0]
value = match[1]
try:
if value[-1] == ',':
value = value[:-1]
sorted_headers[header] = value
except IndexError:
pass
return sorted_headers |
Extract the top level domain from an URL.
def top_level(url, fix_protocol=True):
"""Extract the top level domain from an URL."""
ext = tld.get_tld(url, fix_protocol=fix_protocol)
toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split(
ext)[0] + ext
return toplevel |
Match IP:PORT or DOMAIN:PORT in a losse manner
def proxy_type(v):
""" Match IP:PORT or DOMAIN:PORT in a losse manner """
proxies = []
if re.match(r"((http|socks5):\/\/.)?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})", v):
proxies.append({"http": v,
"https": v})
return proxies
elif re.match(r"((http|socks5):\/\/.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}:(\d{1,5})", v):
proxies.append({"http": v,
"https": v})
return proxies
elif is_proxy_list(v, proxies):
return proxies
else:
raise argparse.ArgumentTypeError(
"Proxy should follow IP:PORT or DOMAIN:PORT format") |
Query dnsdumpster.com.
def dnsdumpster(domain, output_dir):
"""Query dnsdumpster.com."""
response = requests.Session().get('https://dnsdumpster.com/').text
csrf_token = re.search(
r"name='csrfmiddlewaretoken' value='(.*?)'", response).group(1)
cookies = {'csrftoken': csrf_token}
headers = {'Referer': 'https://dnsdumpster.com/'}
data = {'csrfmiddlewaretoken': csrf_token, 'targetip': domain}
response = requests.Session().post(
'https://dnsdumpster.com/', cookies=cookies, data=data, headers=headers)
image = requests.get('https://dnsdumpster.com/static/map/%s.png' % domain)
if image.status_code == 200:
with open('%s/%s.png' % (output_dir, domain), 'wb') as f:
f.write(image.content) |
Present the user a prompt.
def prompt(default=None):
"""Present the user a prompt."""
editor = 'nano'
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
child_pid = os.fork()
is_child = child_pid == 0
if is_child:
os.execvp(editor, [editor, tmpfile.name])
else:
os.waitpid(child_pid, 0)
tmpfile.seek(0)
return tmpfile.read().strip() |
start the market thread and register backtest broker thread
QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承
def start_market(self):
"""
start the market thread and register backtest broker thread
QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承
"""
# 启动 trade_engine 线程
self.market.start()
# 注册 backtest_broker ,并且启动和它关联线程QAThread 存放在 kernels 词典中, { 'broker_name': QAThread }
#self.market.register(self.broker_name, self.broker)
self.market.connect(self.broker_name) |
generator driven data flow
def run(self):
"""generator driven data flow
"""
# 如果出现了日期的改变 才会进行结算的事件
_date = None
while QA_util_if_tradetime(self.now):
for data in self.ingest_data: # 对于在ingest_data中的数据
# <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'>
date = data.date[0]
if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场
if _date != date: # 如果新的date
# 前一天的交易日已经过去
# 往 broker 和 account 发送 settle 事件
try:
self.market.trade_engine.join()
# time.sleep(2)
self.market._settle(self.broker_name)
except Exception as e:
raise e
# 基金 指数 期货
elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]:
self.market._settle(self.broker_name)
# print(data)
self.broker.run(
QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data))
# 生成 UPCOMING_DATA 事件放到 队列中去执行
self.market.upcoming_data(self.broker_name, data)
self.market.trade_engine.join()
_date = date |
the standard message which can be transfer
def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
} |
带account_cookie的初始化持仓
Returns:
[type] -- [description]
def init_hold_with_account(self):
"""带account_cookie的初始化持仓
Returns:
[type] -- [description]
"""
return self.init_hold.reset_index().assign(
account_cookie=self.account_cookie
).set_index(['code',
'account_cookie']) |
账户的起始交易日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
def start_date(self):
"""账户的起始交易日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
"""
if self.start_==None:
if len(self.time_index_max) > 0:
return str(min(self.time_index_max))[0:10]
else:
print(
RuntimeWarning(
'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'
)
)
else:
return self.start_ |
账户的交易结束日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
def end_date(self):
"""账户的交易结束日期(只在回测中使用)
Raises:
RuntimeWarning -- [description]
Returns:
[type] -- [description]
"""
if self.start_==None:
if len(self.time_index_max) > 0:
return str(max(self.time_index_max))[0:10]
else:
print(
RuntimeWarning(
'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'
)
)
else:
return self.end_ |
区间交易历史的table
def history_table_min(self):
'区间交易历史的table'
if len(self.history_min) > 0:
lens = len(self.history_min[0])
else:
lens = len(self._history_headers)
return pd.DataFrame(
data=self.history_min,
columns=self._history_headers[:lens]
).sort_index() |
交易历史的table
def history_table(self):
'交易历史的table'
if len(self.history) > 0:
lens = len(self.history[0])
else:
lens = len(self._history_headers)
return pd.DataFrame(
data=self.history,
columns=self._history_headers[:lens]
).sort_index() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.