File size: 2,377 Bytes
24ee802
 
 
 
 
 
 
 
 
 
 
6117d42
24ee802
 
 
 
 
 
6117d42
 
 
24ee802
 
 
6117d42
24ee802
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6117d42
 
 
 
 
24ee802
 
 
6117d42
24ee802
1d55733
24ee802
 
 
6117d42
24ee802
 
 
 
 
6117d42
 
 
 
 
 
 
24ee802
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# llg/preprocess.py

import argparse, asyncio, os
from itertools import islice

from aiostream import stream
import h5py as h5
import numpy as np
import pandas as pd
from tqdm.asyncio import trange

from .loader import datafile_path, gen_samples, samples_count
from .crawler import run

script_dir = os.path.dirname(__file__)
outfile_path = os.path.join(script_dir, "../data/lld-processed.h5")


async def gen_processor(
    batch_size: int, limit: int, datafile_path: str = datafile_path
):
    count = min(limit, samples_count)
    batch_size = min(limit, batch_size)

    samples = gen_samples(datafile_path=datafile_path)
    steps = count // batch_size

    for step in trange(steps):
        batch = list(islice(samples, step * batch_size, (step + 1) * batch_size))

        urls = [f"http://{sample['meta_data/names'].decode()}.com" for sample in batch]
        descriptions = await run(urls, batch_size)

        for sample, description in zip(batch, descriptions):
            name = (sample["meta_data/names"].decode(),)
            images = sample["data"]

            data = (
                images,
                description,
                name,
            )

            yield data


async def preprocess(
    batch_size: int = 100,
    limit: int = samples_count + 1,
    datafile_path: str = datafile_path,
):

    columns = ["images", "description", "name"]

    processor = gen_processor(batch_size, limit, datafile_path=datafile_path)

    chunk_size = 1000
    async with stream.chunks(processor, chunk_size).stream() as chunks:
        async for chunk in chunks:
            df_chunk = pd.DataFrame(chunk, columns=columns)
            df_chunk.to_hdf(outfile_path, "data", data_columns=columns, mode="a")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "--datafile_path",
        help="Path to downloaded archive",
        type=str,
        default=datafile_path,
    )

    parser.add_argument(
        "--limit",
        help="Limit to total records processed",
        type=int,
        default=samples_count + 1,
    )

    parser.add_argument(
        "--batch_size",
        help="Batch size",
        type=int,
        nargs="?",
        const=10_000,
        default=10_000,
    )

    args = parser.parse_args()

    asyncio.run(preprocess(batch_size=args.batch_size, limit=args.limit))