Walter
commited on
Commit
·
3b897ff
1
Parent(s):
864c3ff
remove dataset.py and update README
Browse files- README.md +11 -11
- dataset.py +0 -50
README.md
CHANGED
|
@@ -42,20 +42,20 @@ pretty_name: Hong Kong content-based (Cantonese & Traditional Chinese) text corp
|
|
| 42 |
This dataset contains eight cleaned source-specific corpora of Hong Kong Cantonese and Traditional Chinese text, crawled from public websites and platforms.
|
| 43 |
Each file stores plain UTF-8 text, where **each record occupies one line**, and **blank lines serve as separators**.
|
| 44 |
This dataset is also available at Zenodo: https://doi.org/10.5281/zenodo.16882352
|
| 45 |
-
We only change file extension from .corpus to .csv here for HuggingFace's dataset viewer function
|
| 46 |
|
| 47 |
### Files
|
| 48 |
|
| 49 |
-
| Filename
|
| 50 |
-
|
| 51 |
-
| `appledaily_article_dedup.csv`
|
| 52 |
-
| `hkcitizenmedia_article_dedup.csv`
|
| 53 |
-
| `hkcnews_article_dedup.csv`
|
| 54 |
-
| `inmedia_article_dedup.csv`
|
| 55 |
-
| `lihkg_posts_dedup_demoji_128.csv`
|
| 56 |
-
| `openrice_review_dedup_demoji.csv`
|
| 57 |
-
| `thestandnews_article_dedup.csv`
|
| 58 |
-
| `wiki_hk.csv`
|
| 59 |
|
| 60 |
## Intended Uses
|
| 61 |
|
|
|
|
| 42 |
This dataset contains eight cleaned source-specific corpora of Hong Kong Cantonese and Traditional Chinese text, crawled from public websites and platforms.
|
| 43 |
Each file stores plain UTF-8 text, where **each record occupies one line**, and **blank lines serve as separators**.
|
| 44 |
This dataset is also available at Zenodo: https://doi.org/10.5281/zenodo.16882352
|
| 45 |
+
We only change file extension from .corpus to .csv and add header row here for HuggingFace's dataset viewer function
|
| 46 |
|
| 47 |
### Files
|
| 48 |
|
| 49 |
+
| Filename | Description | SHA256 hash (without header row, same content as corpus file@Zenodo) |
|
| 50 |
+
|----------------------------------------|------------------------------------|----------------------------------------------------------------------|
|
| 51 |
+
| `appledaily_article_dedup.csv` | Apple Daily news articles | `0b6ad22b7a73230fd0e44af904c0cff1773cc871417f6ad3af11a783564bca15` |
|
| 52 |
+
| `hkcitizenmedia_article_dedup.csv` | HK Citizen Media articles | `373a5b369d5e402e58760861e2bff2e618e7ee7fd4494988a39be1156f9dba84` |
|
| 53 |
+
| `hkcnews_article_dedup.csv` | Hong Kong Citizen News | `67f909cfaf7d7a67df1dc79448f24622ec525a31b73b2e3293fbaad147470e69` |
|
| 54 |
+
| `inmedia_article_dedup.csv` | InMedia.hk articles | `135251f3e8ba7587018b7b2814b9ea5bbebf69c98adca73d3ea4c9f9b5571957` |
|
| 55 |
+
| `lihkg_posts_dedup_demoji_128.csv` | Lihkg forum posts (emoji removed) | `8e5e6de9c219aeccdaf13e9162b00cce2eeb7f595023a7bf19d4b5660395a3ee` |
|
| 56 |
+
| `openrice_review_dedup_demoji.csv` | OpenRice user reviews | `dd5835a7effe49bb96a31e0c0eab43dea17ef23b3e1d9cefdc186aca276897ce` |
|
| 57 |
+
| `thestandnews_article_dedup.csv` | Stand News articles | `847ef0f5809481caf4bf21100e4b34207513d724dafb9f42b162ef49079e7dba` |
|
| 58 |
+
| `wiki_hk.csv` | Wikipedia (zh-hk) | `bd33008802797b33df8484cf1113be6a0b38547fe13515f5c4edbf4ccad270db` |
|
| 59 |
|
| 60 |
## Intended Uses
|
| 61 |
|
dataset.py
DELETED
|
@@ -1,50 +0,0 @@
|
|
| 1 |
-
import datasets
|
| 2 |
-
|
| 3 |
-
_DESCRIPTION = "Cleaned Hong Kong Cantonese / Traditional Chinese corpora from multiple sources."
|
| 4 |
-
_HOMEPAGE = "https://huggingface.co/datasets/SolarisCipher/hk_content_corpus"
|
| 5 |
-
_CORPUS_FILES = {
|
| 6 |
-
"appledaily_article": "appledaily_article_dedup.txt",
|
| 7 |
-
"hkcitizenmedia_article": "hkcitizenmedia_article_dedup.txt",
|
| 8 |
-
"hkcnews_article": "hkcnews_article_dedup.txt",
|
| 9 |
-
"inmedia_article": "inmedia_article_dedup.txt",
|
| 10 |
-
"lihkg_posts": "lihkg_posts_dedup_demoji_128.txt",
|
| 11 |
-
"openrice_review": "openrice_review_dedup_demoji.txt",
|
| 12 |
-
"thestandnews_article": "thestandnews_article_dedup.txt",
|
| 13 |
-
"wiki_hk": "wiki_hk.txt",
|
| 14 |
-
}
|
| 15 |
-
|
| 16 |
-
class HKContentCorpus(datasets.GeneratorBasedBuilder):
|
| 17 |
-
|
| 18 |
-
def _info(self):
|
| 19 |
-
return datasets.DatasetInfo(
|
| 20 |
-
description=_DESCRIPTION,
|
| 21 |
-
features=datasets.Features({"text": datasets.Value("string")}),
|
| 22 |
-
supervised_keys=None,
|
| 23 |
-
homepage=_HOMEPAGE,
|
| 24 |
-
citation="",
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
def _split_generators(self, dl_manager):
|
| 28 |
-
return [
|
| 29 |
-
datasets.SplitGenerator(
|
| 30 |
-
name=split_name,
|
| 31 |
-
gen_kwargs={"filepath": filepath},
|
| 32 |
-
)
|
| 33 |
-
for split_name, filepath in _CORPUS_FILES.items()
|
| 34 |
-
]
|
| 35 |
-
|
| 36 |
-
def _generate_examples(self, filepath):
|
| 37 |
-
with open(filepath, encoding="utf-8") as f:
|
| 38 |
-
buffer = []
|
| 39 |
-
example_id = 0
|
| 40 |
-
for line in f:
|
| 41 |
-
line = line.strip()
|
| 42 |
-
if line:
|
| 43 |
-
buffer.append(line)
|
| 44 |
-
else:
|
| 45 |
-
if buffer:
|
| 46 |
-
yield example_id, {"text": "\n".join(buffer)}
|
| 47 |
-
example_id += 1
|
| 48 |
-
buffer = []
|
| 49 |
-
if buffer:
|
| 50 |
-
yield example_id, {"text": "\n".join(buffer)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|