File size: 3,309 Bytes
996850f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ca34e9
996850f
 
7d4ec06
996850f
 
 
36e11cd
996850f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2742d47
996850f
 
36e11cd
996850f
 
36e11cd
996850f
2742d47
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import csv
import json
import os
import pdb
import datasets
import pandas as pd

# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
Dataset for commy test eye diabetic
"""

# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "NawinCom/CommyTesting"

# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""

_URL = "https://huggingface.co/datasets/NawinCom/CommyTesting/resolve/main/images.zip"
# classes = [0,0]
# create class
train = pd.read_csv('./Train.csv')
lis1 = train['id_code']
lis2 = train['diagnosis']
dic = dict(zip(lis1, lis2))
# classes = [0] * 4209

# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class ImagesDemo(datasets.GeneratorBasedBuilder):
    """TODO: Short description of my dataset."""

    def _info(self):
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=datasets.Features(
                {
                    "image":  datasets.Image(),
                    "label": datasets.Value("string"),
                }
            ),  # Here we define them above because they are different between the two configurations
            # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
            # specify them. They'll be used if as_supervised=True in builder.as_dataset.
            # supervised_keys=("sentence", "label"),
            # Homepage of the dataset for documentation
            supervised_keys=None,
            homepage=_HOMEPAGE,
        )

    def _split_generators(self, dl_manager):
        # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
        # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name

        # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
        # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
        # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive

        data_dir = dl_manager.download(_URL)
        image_iters = dl_manager.iter_archive(data_dir)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "images": image_iters
                },
            ),
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, images):
        idx = 0
        for filepath, image in images:
            print(filepath)
            check = filepath.split('/')[-1].replace('.jpg', '')
            yield idx, {
                "image" : {"path": filepath, "bytes": image.read()},
                "label" : dic[check]
            }
            idx+=1