add scraper source
Browse files- requirements.txt +3 -0
- scrape.py +101 -0
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
trafilatura
|
| 2 |
+
datasets
|
| 3 |
+
zstandard
|
scrape.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from trafilatura import fetch_url, extract, extract_metadata
|
| 2 |
+
from datasets import load_dataset, Features, Value, Sequence
|
| 3 |
+
from typing import Dict, List, Any
|
| 4 |
+
from trafilatura.settings import DEFAULT_CONFIG
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
|
| 7 |
+
my_config = deepcopy(DEFAULT_CONFIG)
|
| 8 |
+
my_config["DEFAULT"]["DOWNLOAD_TIMEOUT"] = "3"
|
| 9 |
+
my_config["DEFAULT"]["SLEEP_TIME"] = "0"
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def is_target(batch: Dict[str, List]) -> List[bool]:
|
| 13 |
+
result = []
|
| 14 |
+
for tpe, dead, deleted, url in zip(
|
| 15 |
+
batch["type"], batch["dead"], batch["deleted"], batch["url"]
|
| 16 |
+
):
|
| 17 |
+
if (
|
| 18 |
+
tpe == "story"
|
| 19 |
+
and dead is None
|
| 20 |
+
and deleted is None
|
| 21 |
+
and url is not None
|
| 22 |
+
and len(url) > 0
|
| 23 |
+
):
|
| 24 |
+
result.append(True)
|
| 25 |
+
else:
|
| 26 |
+
result.append(False)
|
| 27 |
+
return result
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def fetch_one(doc: Dict[str, Any]) -> Dict[str, Any]:
|
| 31 |
+
downloaded = fetch_url(doc["url"], config=my_config)
|
| 32 |
+
result = {
|
| 33 |
+
"id": doc["id"],
|
| 34 |
+
"title": None,
|
| 35 |
+
"author": None,
|
| 36 |
+
"markdown": None,
|
| 37 |
+
"downloaded": False,
|
| 38 |
+
"meta_extracted": False,
|
| 39 |
+
"parsed": False,
|
| 40 |
+
"description": None,
|
| 41 |
+
"filedate": None,
|
| 42 |
+
"date": None,
|
| 43 |
+
"image": None,
|
| 44 |
+
"pagetype": None,
|
| 45 |
+
"hostname": None,
|
| 46 |
+
"sitename": None,
|
| 47 |
+
"categories": None,
|
| 48 |
+
"tags": None,
|
| 49 |
+
}
|
| 50 |
+
if downloaded:
|
| 51 |
+
result["downloaded"] = True
|
| 52 |
+
try:
|
| 53 |
+
raw_meta = extract_metadata(downloaded)
|
| 54 |
+
if raw_meta:
|
| 55 |
+
result["meta_extracted"] = True
|
| 56 |
+
meta = raw_meta.as_dict()
|
| 57 |
+
result["title"] = meta.get("title", None)
|
| 58 |
+
result["author"] = meta.get("author", None)
|
| 59 |
+
result["description"] = meta.get("description", None)
|
| 60 |
+
result["filedate"] = meta.get("filedate", None)
|
| 61 |
+
result["date"] = meta.get("date", None)
|
| 62 |
+
result["image"] = meta.get("image", None)
|
| 63 |
+
result["pagetype"] = meta.get("pagetype", None)
|
| 64 |
+
result["hostname"] = meta.get("hostname", None)
|
| 65 |
+
result["sitename"] = meta.get("sitename", None)
|
| 66 |
+
md = extract(downloaded, output_format="markdown", with_metadata=False)
|
| 67 |
+
if md:
|
| 68 |
+
result["parsed"] = True
|
| 69 |
+
result["markdown"] = md
|
| 70 |
+
except Exception:
|
| 71 |
+
print("failed to extract metadata")
|
| 72 |
+
return result
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
ds = load_dataset("nixiesearch/hackernews-comments", split="train", num_proc=16)
|
| 77 |
+
ds = ds.filter(is_target, num_proc=32, batched=True, desc="selecting stories")
|
| 78 |
+
ds = ds.select_columns(["id", "url"]).shuffle()
|
| 79 |
+
schema = Features(
|
| 80 |
+
{
|
| 81 |
+
"id": Value("int64"),
|
| 82 |
+
"url": Value("string"),
|
| 83 |
+
"title": Value("string"),
|
| 84 |
+
"author": Value("string"),
|
| 85 |
+
"markdown": Value("string"),
|
| 86 |
+
"downloaded": Value("bool"),
|
| 87 |
+
"meta_extracted": Value("bool"),
|
| 88 |
+
"parsed": Value("bool"),
|
| 89 |
+
"description": Value("string"),
|
| 90 |
+
"filedate": Value("string"),
|
| 91 |
+
"date": Value("string"),
|
| 92 |
+
"image": Value("string"),
|
| 93 |
+
"pagetype": Value("string"),
|
| 94 |
+
"hostname": Value("string"),
|
| 95 |
+
"sitename": Value("string"),
|
| 96 |
+
"categories": Sequence(Value("string")),
|
| 97 |
+
"tags": Sequence(Value("string")),
|
| 98 |
+
}
|
| 99 |
+
)
|
| 100 |
+
ds = ds.map(fetch_one, num_proc=128, desc="downloading", features=schema)
|
| 101 |
+
ds.save_to_disk("/tmp/hnstories")
|