lihao57 commited on
Commit
9ab8dbe
·
1 Parent(s): 9d9dea1

update README.md

Browse files
Files changed (4) hide show
  1. README.md +64 -7
  2. dataset.py +0 -90
  3. test/test.pkl +0 -3
  4. train/train.pkl +0 -3
README.md CHANGED
@@ -11,22 +11,79 @@ size_categories: 1K<n<10K
11
 
12
  # Wireframe Dataset
13
 
14
- This is the **Wireframe dataset** hosted on Hugging Face Hub.
15
 
16
- ## Dataset Summary
17
 
18
  Wireframe dataset with image annotations including line segments.
19
- The dataset is stored as pickle files (`train.pkl`, `test.pkl`) and images.
20
- A custom `dataset.py` is provided for loading.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  ## Usage
23
 
 
 
24
  ```python
25
  from datasets import load_dataset
26
 
27
- # Load the dataset from Hugging Face Hub
28
  ds = load_dataset("lh9171338/Wireframe")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- # Access samples
 
 
 
 
 
31
  print(ds)
32
- print(ds["train"][0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Wireframe Dataset
13
 
14
+ This is the [**Wireframe dataset**](https://github.com/huangkuns/wireframe) hosted on Hugging Face Hub.
15
 
16
+ ## Summary
17
 
18
  Wireframe dataset with image annotations including line segments.
19
+ The dataset is stored as jsonl files (`train/metadata.jsonl`, `test/metadata.jsonl`) and images.
20
+
21
+ ## Download
22
+
23
+ - Download with huggingface-hub
24
+
25
+ ```shell
26
+ python3 -m pip install huggingface-hub
27
+ huggingface-cli download --repo-type dataset lh9171338/Wireframe --local-dir ./
28
+ ```
29
+
30
+ - Download with Git
31
+
32
+ ```shell
33
+ git lfs install
34
+ git clone https://huggingface.co/datasets/lh9171338/Wireframe
35
+ ```
36
 
37
  ## Usage
38
 
39
+ - Load the dataset from Hugging Face Hub
40
+
41
  ```python
42
  from datasets import load_dataset
43
 
 
44
  ds = load_dataset("lh9171338/Wireframe")
45
+ print(ds)
46
+ # DatasetDict({
47
+ # train: Dataset({
48
+ # features: ['image', 'image_file', 'image_size', 'lines'],
49
+ # num_rows: 5000
50
+ # })
51
+ # test: Dataset({
52
+ # features: ['image', 'image_file', 'image_size', 'lines'],
53
+ # num_rows: 462
54
+ # })
55
+ # })
56
+ print(ds["train"][0].keys())
57
+ # dict_keys(['image', 'image_file', 'image_size', 'lines'])
58
+ ```
59
 
60
+ - Load the dataset from local
61
+
62
+ ```python
63
+ from datasets import load_dataset
64
+
65
+ ds = load_dataset("imagefolder", data_dir=".")
66
  print(ds)
67
+ # DatasetDict({
68
+ # train: Dataset({
69
+ # features: ['image', 'image_file', 'image_size', 'lines'],
70
+ # num_rows: 5000
71
+ # })
72
+ # test: Dataset({
73
+ # features: ['image', 'image_file', 'image_size', 'lines'],
74
+ # num_rows: 462
75
+ # })
76
+ # })
77
+ print(ds["train"][0].keys())
78
+ # dict_keys(['image', 'image_file', 'image_size', 'lines'])
79
+ ```
80
+
81
+ - Load the dataset with jsonl files
82
+ ```python
83
+ import jsonlines
84
+
85
+ with jsonlines.open("train/metadata.jsonl") as reader:
86
+ infos = list(reader)
87
+ print(infos[0].keys())
88
+ # dict_keys(['file_name', 'image_file', 'image_size', 'lines'])
89
+ ```
dataset.py DELETED
@@ -1,90 +0,0 @@
1
- # -*- encoding: utf-8 -*-
2
-
3
- """
4
- @File : dataset.py
5
- @Time : 2025/08/31 23:00:00
6
- @Author : lh9171338
7
- @Version : 1.0
8
- @Contact : 2909171338@qq.com
9
- """
10
-
11
- import os
12
- import tqdm
13
- import pickle
14
- import jsonlines
15
- from datasets import Dataset, Features, Sequence, Value
16
-
17
-
18
- def convert_pkl_to_parquet(split):
19
- """
20
- Convert pkl file to parquet file
21
-
22
- Args:
23
- split (str): split name
24
-
25
- Returns:
26
- None
27
- """
28
- # 加载 pkl 文件
29
- with open(f"{split}/{split}.pkl", "rb") as f:
30
- annotations = pickle.load(f)
31
-
32
- # 新增 file_name 字段
33
- new_annotations = []
34
- for ann in tqdm.tqdm(annotations):
35
- new_ann = dict(
36
- file_name=ann["image_file"],
37
- image_file=os.path.basename(ann["image_file"]),
38
- image_size=ann["image_size"],
39
- lines=ann["lines"].tolist(),
40
- )
41
- new_annotations.append(new_ann)
42
-
43
- # 转 Dataset
44
- features = Features(
45
- {
46
- "file_name": Value("string"),
47
- "image_file": Value("string"),
48
- "image_size": Sequence(Value("int32")),
49
- "lines": Sequence(Sequence(Sequence(Value("float")))),
50
- }
51
- )
52
- ds = Dataset.from_list(new_annotations, features=features)
53
-
54
- # 保存 parquet
55
- ds.to_parquet(f"{split}/metadata.parquet")
56
-
57
-
58
- def convert_pkl_to_jsonl(split):
59
- """
60
- Convert pkl file to jsonl file
61
-
62
- Args:
63
- split (str): split name
64
-
65
- Returns:
66
- None
67
- """
68
- # 加载 pkl 文件
69
- with open(f"{split}/{split}.pkl", "rb") as f:
70
- annotations = pickle.load(f)
71
-
72
- # 新增 file_name 字段
73
- new_annotations = []
74
- for ann in tqdm.tqdm(annotations):
75
- new_ann = dict(
76
- file_name=ann["image_file"],
77
- image_file=os.path.basename(ann["image_file"]),
78
- image_size=ann["image_size"],
79
- lines=ann["lines"].tolist(),
80
- )
81
- new_annotations.append(new_ann)
82
-
83
- # 保存 jsonl文件
84
- with jsonlines.open(f"{split}/metadata.jsonl", "w") as f:
85
- f.write_all(new_annotations)
86
-
87
-
88
- if __name__ == "__main__":
89
- convert_pkl_to_jsonl("train")
90
- convert_pkl_to_jsonl("test")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test/test.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:098be958c6eb01c29ff781c3a6ea3433326aa2ddc24faa54aa5c9797d143a0df
3
- size 584354
 
 
 
 
train/train.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:56b5d251e90eb925b0ac8414eb21907ccc00cb642b7214269f518458d8bc5aa1
3
- size 6383995