CommyTesting / CommyTesting.py
NawinCom's picture
Update CommyTesting.py
7d4ec06
import csv
import json
import os
import pdb
import datasets
import pandas as pd
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
Dataset for commy test eye diabetic
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "NawinCom/CommyTesting"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_URL = "https://huggingface.co/datasets/NawinCom/CommyTesting/resolve/main/images.zip"
# classes = [0,0]
# create class
train = pd.read_csv('./Train.csv')
lis1 = train['id_code']
lis2 = train['diagnosis']
dic = dict(zip(lis1, lis2))
# classes = [0] * 4209
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class ImagesDemo(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.Value("string"),
}
), # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
supervised_keys=None,
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
data_dir = dl_manager.download(_URL)
image_iters = dl_manager.iter_archive(data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"images": image_iters
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, images):
idx = 0
for filepath, image in images:
print(filepath)
check = filepath.split('/')[-1].replace('.jpg', '')
yield idx, {
"image" : {"path": filepath, "bytes": image.read()},
"label" : dic[check]
}
idx+=1