unsplash-image-text / unsplash-image-text.py
jamescalam's picture
Update unsplash-image-text.py
4b27237
import os
import urllib.request
import requests
import datasets
import json
import pandas as pd
from PIL import Image
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {Unsplash Lite Dataset Images},
author={Unsplash},
year={2022}
}
"""
_DESCRIPTION = """\
This is a dataset that streams photos data from the Unsplash 25K servers.
"""
_HOMEPAGE = "https://github.com/unsplash/datasets/"
_LICENSE = ""
_REPO = "https://huggingface.co/datasets/jamescalam/unsplash-image-text"
_URL = "https://unsplash.com/data/lite/latest"
URL_JSON = "https://huggingface.co/datasets/jamescalam/unsplash-image-text/raw/main/url.json"
_IMAGE_TGZ = [f"images_{i}.tgz" for i in range(25)]
_COLS = [
'photo_id',
'photo_url',
'photo_width',
'photo_height',
'photo_aspect_ratio',
'photo_description',
'ai_description',
'image'
]
class Unsplash(datasets.GeneratorBasedBuilder):
"""The Unsplash 25K dataset for photos"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
'photo_id': datasets.Value("string"),
'photo_url': datasets.Value("string"),
'photo_width': datasets.Value("int32"),
'photo_height': datasets.Value("int32"),
'photo_aspect_ratio': datasets.Value("float32"),
'photo_description': datasets.Value("string"),
'ai_description': datasets.Value("string"),
'image': datasets.Image(),
}
),
supervised_keys=None,
homepage="https://github.com/unsplash/datasets/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
new_url = dl_manager.download_and_extract(_URL)
tgz_urls = [f"{_REPO}/resolve/main/images/{file}" for file in _IMAGE_TGZ]
archive_paths = dl_manager.download_and_extract(tgz_urls)
tar_paths = {}
for path in archive_paths:
image_tar = os.listdir(f"{path}/subdirs")[0]
tar_paths[image_tar] = f"{path}/subdirs/{image_tar}"
# remove extra files
for file in os.listdir(new_url):
if os.path.isfile(new_url+"/"+file):
if file not in ['photos.tsv000', 'url.json']:
os.remove(new_url+'/'+file)
# load URLs
res = requests.get(f"{_REPO}/raw/main/url.json")
urls = json.loads(res.text)
# load mapping dict
res = requests.get(f"{_REPO}/raw/main/maps.json")
maps = json.loads(res.text)
# format into dataframe
keys = list(urls.keys())
values = list(urls.values())
urls = pd.DataFrame({
'photo_id': keys,
'photo_url': values
})
# load the unsplash 25k data
unsplash = pd.read_csv(new_url+"/photos.tsv000", sep='\t')
# drop rows where there is no text description
unsplash = unsplash[
(unsplash['photo_description'] != "") | (unsplash['ai_description'] != "")
]
# keep only required cols
cols = [col for col in _COLS if col not in ['photo_url', 'image']]
unsplash = unsplash[cols]
# merge with urls
unsplash = unsplash.merge(urls, on='photo_id', how='inner')
# save to tsv
unsplash.to_csv(new_url+'/unsplash.tsv', sep='\t', index=False)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": new_url+"/unsplash.tsv",
"tar_paths": tar_paths,
"maps": maps,
}
),
]
def _generate_examples(self, filepath, tar_paths, maps):
"""This function returns the examples in the raw (text) form."""
with open(filepath, "r") as f:
id_ = 0
for line in f:
if id_ == 0:
cols = line.strip().split("\t")
id_ += 1
else:
values = line.strip().split("\t")
if len(values) != len(cols):
values.extend(['']*(len(cols)-len(values)))
data = {
cols[i]: values[i] for i in range(len(cols))
}
try:
photo_id = data['photo_id']
try:
tar = maps[photo_id]
tar_path = tar_paths[tar]
image = Image.open(f"{tar_path}/{data['photo_id']}.jpg")
data['image'] = image
yield id_, data
except KeyError:
pass
except OSError:
# in this case don't return the record
pass
id_ += 1