Diwank Singh Tomer commited on
Commit
24ee802
·
1 Parent(s): a4b90f7

feat: Add data crawler and loader

Browse files

Signed-off-by: Diwank Singh Tomer <diwank.singh@gmail.com>

Files changed (7) hide show
  1. .gitattributes +38 -0
  2. lld/__init__.py +2 -0
  3. lld/crawler.py +71 -0
  4. lld/loader.py +48 -0
  5. lld/preprocess.py +81 -0
  6. poetry.lock +0 -0
  7. pyproject.toml +32 -0
.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.hdf5 filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.wasm filter=lfs diff=lfs merge=lfs -text
25
+ *.xz filter=lfs diff=lfs merge=lfs -text
26
+ *.zip filter=lfs diff=lfs merge=lfs -text
27
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
28
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
29
+ # Audio files - uncompressed
30
+ *.pcm filter=lfs diff=lfs merge=lfs -text
31
+ *.sam filter=lfs diff=lfs merge=lfs -text
32
+ *.raw filter=lfs diff=lfs merge=lfs -text
33
+ # Audio files - compressed
34
+ *.aac filter=lfs diff=lfs merge=lfs -text
35
+ *.flac filter=lfs diff=lfs merge=lfs -text
36
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
37
+ *.ogg filter=lfs diff=lfs merge=lfs -text
38
+ *.wav filter=lfs diff=lfs merge=lfs -text
lld/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # /usr/bin/env python3
2
+ # llg/__init__.py
lld/crawler.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lld/crawler.py
2
+
3
+ import asyncio
4
+ from urllib.parse import urlparse
5
+
6
+ import aiohttp
7
+ from extraction import Extractor
8
+ from tqdm.asyncio import tqdm, tqdm_asyncio
9
+
10
+
11
+ def get_domain(url: str):
12
+ parsed = urlparse(url)
13
+ return parsed.netloc
14
+
15
+
16
+ async def gather_with_concurrency(n: int, tasks):
17
+ semaphore = asyncio.Semaphore(n)
18
+
19
+ async def sem_task(task):
20
+ async with semaphore:
21
+ return await task
22
+
23
+ results = asyncio.as_completed(tasks)
24
+ return results
25
+
26
+
27
+ async def scrape_description(
28
+ url: str, session: aiohttp.ClientSession, timeout: int = 15
29
+ ) -> str:
30
+ """Scrape description of the given url from <head> and <meta> tags."""
31
+
32
+ extractor = Extractor()
33
+
34
+ domain = get_domain(url)
35
+
36
+ try:
37
+ async with session.get(url, timeout=timeout) as response:
38
+ html = await response.text()
39
+ data = extractor.extract(html, source_url=url)
40
+
41
+ collected = [
42
+ *data.titles,
43
+ data.description,
44
+ domain,
45
+ ]
46
+
47
+ results = "\n".join(filter(bool, collected))
48
+ return results
49
+ except:
50
+ return domain
51
+
52
+
53
+ async def crawl(urls: list[str], batch_size: int = 100):
54
+
55
+ connector = aiohttp.TCPConnector(limit=None, ttl_dns_cache=300)
56
+
57
+ async with aiohttp.ClientSession(connector=connector) as session:
58
+ gen_results = await gather_with_concurrency(
59
+ batch_size, [scrape_description(url, session) for url in urls]
60
+ )
61
+
62
+ for result in gen_results:
63
+ yield await result
64
+
65
+
66
+ async def run(urls: list[str], batch_size: int = 100):
67
+
68
+ crawler = crawl(urls, batch_size)
69
+ results = [c async for c in tqdm_asyncio(crawler, total=len(urls))]
70
+
71
+ return results
lld/loader.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # llg/loader.py
2
+
3
+ import os
4
+
5
+ import h5py
6
+ import numpy as np
7
+ import PIL.Image as Image
8
+
9
+ script_dir = os.path.dirname(__file__)
10
+ datafile_path = os.path.join(script_dir, "../data/LLD-logo.hdf5")
11
+
12
+ with h5py.File(datafile_path, "r") as throwaway:
13
+ samples_count: int = len(throwaway["data"])
14
+
15
+
16
+ def gen_samples(labels: list[str] = ["data", "meta_data/names"]):
17
+
18
+ # open hdf5 file
19
+ with h5py.File(datafile_path, "r") as hdf5_file:
20
+ count = len(hdf5_file["data"])
21
+
22
+ i = 0
23
+ while i < count:
24
+ result = {}
25
+
26
+ if "data" in labels:
27
+ shape = hdf5_file["shapes"][i]
28
+ images = hdf5_file["data"][i][:, : shape[1], : shape[2]]
29
+
30
+ result["data"] = images.astype(np.uint8)
31
+
32
+ for label in [l for l in labels if l != "data"]:
33
+ result[label] = hdf5_file[label][i]
34
+
35
+ yield result
36
+
37
+ i += 1
38
+
39
+
40
+ if __name__ == "__main__":
41
+ sample = next(gen_samples())
42
+ name = sample["meta_data/names"]
43
+ images = sample["data"]
44
+
45
+ print(name)
46
+
47
+ image_pil = Image.fromarray(images[2])
48
+ image_pil.show()
lld/preprocess.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # llg/preprocess.py
2
+
3
+ import argparse, asyncio, os
4
+ from itertools import islice
5
+
6
+ from aiostream import stream
7
+ import h5py as h5
8
+ import numpy as np
9
+ import pandas as pd
10
+ from tqdm.asyncio import trange
11
+
12
+ from .loader import gen_samples, samples_count
13
+ from .crawler import run
14
+
15
+ script_dir = os.path.dirname(__file__)
16
+ outfile_path = os.path.join(script_dir, "../data/lld-processed.h5")
17
+
18
+
19
+ async def gen_processor(batch_size: int, limit: int):
20
+ count = min(limit, samples_count)
21
+ batch_size = min(limit, batch_size)
22
+
23
+ samples = gen_samples()
24
+ steps = count // batch_size
25
+
26
+ for step in trange(steps):
27
+ batch = list(islice(samples, step * batch_size, (step + 1) * batch_size))
28
+
29
+ urls = [f"http://{sample['meta_data/names'].decode()}.com" for sample in batch]
30
+ descriptions = await run(urls, batch_size)
31
+
32
+ for sample, description in zip(batch, descriptions):
33
+ name = (sample["meta_data/names"].decode(),)
34
+ images = sample["data"]
35
+
36
+ data = (
37
+ images,
38
+ description,
39
+ name,
40
+ )
41
+
42
+ yield data
43
+
44
+
45
+ async def preprocess(batch_size: int = 100, limit: int = samples_count + 1):
46
+
47
+ columns = ["images", "description", "name"]
48
+
49
+ processor = gen_processor(batch_size, limit)
50
+
51
+ chunk_size = 10
52
+ async with stream.chunks(processor, chunk_size).stream() as chunks:
53
+ async for chunk in chunks:
54
+ df_chunk = pd.DataFrame(chunk, columns=columns)
55
+ df_chunk.to_hdf(
56
+ outfile_path, "data", data_columns=columns, mode="a"
57
+ )
58
+
59
+
60
+ if __name__ == "__main__":
61
+ parser = argparse.ArgumentParser()
62
+
63
+ parser.add_argument(
64
+ "--limit",
65
+ help="Limit to total records processed",
66
+ type=int,
67
+ default=samples_count + 1,
68
+ )
69
+
70
+ parser.add_argument(
71
+ "--batch_size",
72
+ help="Batch size",
73
+ type=int,
74
+ nargs="?",
75
+ const=10_000,
76
+ default=10_000,
77
+ )
78
+
79
+ args = parser.parse_args()
80
+
81
+ asyncio.run(preprocess(batch_size=args.batch_size, limit=args.limit))
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "lld"
3
+ version = "0.1.0"
4
+ description = "LLD (Large Logo Dataset)"
5
+ authors = ["Diwank Singh Tomer <diwank.singh@gmail.com>"]
6
+ license = "MIT"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.10"
10
+ numpy = "^1.23.1"
11
+ h5py = "^3.7.0"
12
+ datasets = "^2.3.2"
13
+ Pillow = "^9.2.0"
14
+ requests = "^2.28.1"
15
+ bs4 = "^0.0.1"
16
+ html5lib = "^1.1"
17
+ extraction = "^0.3"
18
+ aiohttp = {extras = ["speedups"], version = "^3.8.1"}
19
+ tqdm = "^4.64.0"
20
+ pandas = "^1.4.3"
21
+ tables = "^3.7.0"
22
+ pyarrow = "^8.0.0"
23
+ modin = {extras = ["dask"], version = "^0.15.2"}
24
+ aiostream = "^0.4.4"
25
+
26
+ [tool.poetry.dev-dependencies]
27
+ ipython = "^8.4.0"
28
+ black = "^22.6.0"
29
+
30
+ [build-system]
31
+ requires = ["poetry-core>=1.0.0"]
32
+ build-backend = "poetry.core.masonry.api"