Charliesgt commited on
Commit
cdc7c4e
·
verified ·
1 Parent(s): d6e1527
Files changed (1) hide show
  1. pollen_detection_loader.py +0 -169
pollen_detection_loader.py DELETED
@@ -1,169 +0,0 @@
1
-
2
- import collections
3
- import json
4
- import os
5
-
6
- import datasets
7
-
8
-
9
- _HOMEPAGE = "https://www.kaggle.com/datasets/nataliakhanzhina/pollen20ldet"
10
- _LICENSE = "CC BY 4.0"
11
- _CITATION = """\
12
- @misc{ pollen20ldet,
13
- title = { Combating data incompetence in pollen images detection and classification for pollinosis prevention },
14
- type = { Open Source Dataset },
15
- author = { Khanzhina, Natalia and Filchenkov, Andrey and Minaeva, Natalia and Novoselova, Larisa and Petukhov, Maxim and Kharisova, Irina and Pinaeva, Julia and Zamorin, Georgiy and Putin, Evgeny and Zamyatina, Elena and others},
16
- howpublished = { \\url{ https://www.kaggle.com/datasets/nataliakhanzhina/pollen20ldet } },
17
- url = { https://www.kaggle.com/datasets/nataliakhanzhina/pollen20ldet },
18
- journal = { Computers in biology and medicine },
19
- volume={140},
20
- pages={105064},
21
- publisher = { Elsevier },
22
- year = { 2022 },
23
- }
24
- """
25
-
26
- ### I want to look at multiple ways to load categories. Pollen has multiple classification levels and is usually classified via latin version of the term
27
-
28
- _CATEGORIES = [
29
- 'buckwheat',
30
- 'clover',
31
- 'angelica',
32
- 'angelica_garden',
33
- 'willow',
34
- 'hill_mustard',
35
- 'linden',
36
- 'meadow_pink',
37
- 'alder',
38
- 'birch',
39
- 'fireweed',
40
- 'nettle',
41
- 'pigweed',
42
- 'plantain',
43
- 'sorrel',
44
- 'grass',
45
- 'pine',
46
- 'maple',
47
- 'hazel',
48
- 'mugwort'
49
- ]
50
-
51
- _ANNOTATION_FILENAME = "_annotations.json"
52
-
53
-
54
- class POLLENDETECTIONConfig(datasets.BuilderConfig):
55
- """Builder Config for pollen-detection"""
56
-
57
- def __init__(self, data_urls, **kwargs):
58
- """
59
- BuilderConfig for pollen-detection.
60
- Args:
61
- data_urls: `dict`, name to url to download the zip file from.
62
- **kwargs: keyword arguments forwarded to super.
63
- """
64
- super(POLLENDETECTIONConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
65
- self.data_urls = data_urls
66
-
67
-
68
- class POLLENDETECTION(datasets.GeneratorBasedBuilder):
69
- """pollen-detection object detection dataset"""
70
-
71
- VERSION = datasets.Version("1.0.0")
72
- BUILDER_CONFIGS = [
73
- POLLENDETECTIONConfig(
74
- name="full",
75
- description="Full version of pollen-detection dataset.",
76
- data_urls={
77
- "train": "https://huggingface.co/datasets/Charliesgt/Pollen20LDet/resolve/main/data/train.zip",
78
- "valid": "https://huggingface.co/datasets/Charliesgt/Pollen20LDet/resolve/main/data/valid.zip",
79
- "test": "https://huggingface.co/datasets/Charliesgt/Pollen20LDet/resolve/main/data/test.zip",
80
- },
81
- ),
82
- ]
83
-
84
- def _info(self):
85
- features = datasets.Features(
86
- {
87
- "image_id": datasets.Value("int64"),
88
- "image": datasets.Image(),
89
- "width": datasets.Value("int32"),
90
- "height": datasets.Value("int32"),
91
- "objects": datasets.Sequence(
92
- {
93
- "id": datasets.Value("int64"),
94
- "area": datasets.Value("int64"),
95
- "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
96
- "category": datasets.ClassLabel(names=_CATEGORIES),
97
- }
98
- ),
99
- }
100
- )
101
- return datasets.DatasetInfo(
102
- features=features,
103
- homepage=_HOMEPAGE,
104
- citation=_CITATION,
105
- license=_LICENSE,
106
- )
107
-
108
- def _split_generators(self, dl_manager):
109
- data_files = dl_manager.download_and_extract(self.config.data_urls)
110
- return [
111
- datasets.SplitGenerator(
112
- name=datasets.Split.TRAIN,
113
- gen_kwargs={
114
- "folder_dir": data_files["train"],
115
- },
116
- ),
117
- datasets.SplitGenerator(
118
- name=datasets.Split.VALIDATION,
119
- gen_kwargs={
120
- "folder_dir": data_files["valid"],
121
- },
122
- ),
123
- datasets.SplitGenerator(
124
- name=datasets.Split.TEST,
125
- gen_kwargs={
126
- "folder_dir": data_files["test"],
127
- },
128
- ),
129
- ]
130
-
131
- def _generate_examples(self, folder_dir):
132
- def process_annot(annot, category_id_to_category):
133
- return {
134
- "id": annot["id"],
135
- "area": annot["area"],
136
- "bbox": annot["bbox"],
137
- "category": category_id_to_category[annot["category_id"]],
138
- }
139
-
140
- image_id_to_image = {}
141
- idx = 0
142
-
143
- annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
144
- with open(annotation_filepath, "r") as f:
145
- annotations = json.load(f)
146
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
147
- image_id_to_annotations = collections.defaultdict(list)
148
- for annot in annotations["annotations"]:
149
- image_id_to_annotations[annot["image_id"]].append(annot)
150
- filename_to_image = {image["file_name"]: image for image in annotations["images"]}
151
-
152
- for filename in os.listdir(folder_dir):
153
- filepath = os.path.join(folder_dir, filename)
154
- if filename in filename_to_image:
155
- image = filename_to_image[filename]
156
- objects = [
157
- process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
158
- ]
159
- with open(filepath, "rb") as f:
160
- image_bytes = f.read()
161
- yield idx, {
162
- "image_id": image["id"],
163
- "image": {"path": filepath, "bytes": image_bytes},
164
- "width": image["width"],
165
- "height": image["height"],
166
- "objects": objects,
167
- }
168
- idx += 1
169
-