semeval2022-task5 / generator.py
shijli's picture
Rename semeval2022-task5.py to generator.py
16ea148 verified
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
from pathlib import Path
import datasets
import pandas as pd
# Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """
@inproceedings{fersini2022semeval,
title={SemEval-2022 Task 5: Multimedia automatic misogyny identification},
author={Fersini, Elisabetta and
Gasparini, Francesca and
Rizzi, Giulia and
Saibene, Aurora and
Chulvi, Berta and
Rosso, Paolo and
Lees, Alyssa and
Sorensen, Jeffrey},
booktitle={Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)},
pages={533--549},
year={2022}
}
"""
# Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """These are the datasets for Multimodal Misogyny Detection (MAMI), Task 5 of SemEval-2022."""
# Add a link to an official homepage for the dataset here
_HOMEPAGE = "https://competitions.codalab.org/competitions/34175"
# Add the licence for the dataset here if you can find it
_LICENSE = ""
# Add link to the official dataset URLs here
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs = ""
_HEADER = ["file_name", "misogynous", "shaming", "stereotype", "objectification", "violence", "Text Transcription"]
class SemEval2022Task5(datasets.GeneratorBasedBuilder):
"""These are the datasets for Multimodal Misogyny Detection (MAMI), Task 5 of SemEval-2022."""
def __init__(self, training_dir=None, test_dir=None, validation_dir=None, **kwargs):
super().__init__(**kwargs)
assert training_dir is not None, "Training directory must be specified"
assert test_dir is not None, "Test directory must be specified"
self.training_dir = training_dir
self.test_dir = test_dir
self.validation_dir = validation_dir
# Ensure that labels are correctly set up
train_csv = pd.read_csv(Path(self.training_dir, "training.csv"), delimiter="\t", encoding="utf-8-sig")
assert train_csv.columns.tolist() == _HEADER, (
f"Training header is not correct. Expected: {_HEADER}, got: {train_csv.columns.tolist()}"
)
try:
test_csv = pd.read_csv(Path(self.test_dir, "test_with_labels.csv"), delimiter="\t", encoding="utf-8-sig")
assert test_csv.columns.tolist() == _HEADER, (
f"Test header is not correct. Expected: {_HEADER}, got: {test_csv.columns.tolist()}"
)
except FileNotFoundError:
test_csv = pd.read_csv(Path(self.test_dir, "test.csv"), delimiter="\t", encoding="utf-8-sig")
assert test_csv.columns.tolist() == _HEADER[:1] + _HEADER[-1:], (
f"Test Header is not correct. Expected: {_HEADER[:1] + _HEADER[-1:]}, got: {test_csv.columns.tolist()}"
)
labels = pd.read_csv(Path(self.test_dir, "test_labels.txt"), delimiter="\t", header=None)
labels.columns = _HEADER[:-1]
test_with_labels = pd.merge(labels, test_csv, on=_HEADER[0])
assert len(test_with_labels) == len(test_csv)
assert test_with_labels.columns.tolist() == _HEADER
test_with_labels.to_csv(Path(self.test_dir, "test_with_labels.csv"), index=False, sep="\t")
if self.validation_dir is not None:
validation_csv = pd.read_csv(Path(self.validation_dir, "trial.csv"), delimiter="\t", encoding="utf-8-sig")
assert validation_csv.columns.tolist() == _HEADER, (
f"Validation header is not correct. Expected: {_HEADER}, got: {validation_csv.columns.tolist()}"
)
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image_path": datasets.Value("string"),
"image_name": datasets.Value("string"),
"misogynous": datasets.Value("int32"),
"shaming": datasets.Value("int32"),
"stereotype": datasets.Value("int32"),
"objectification": datasets.Value("int32"),
"violence": datasets.Value("int32"),
"text": datasets.Value("string"),
},
),
)
def _split_generators(self, dl_manager):
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images_dir": self.training_dir,
"metadata": Path(self.training_dir, "training.csv").resolve().as_posix(),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images_dir": self.test_dir,
"metadata": Path(self.test_dir, "test_with_labels.csv").resolve().as_posix(),
},
),
]
if self.validation_dir is not None:
splits.append(
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"images_dir": self.validation_dir,
"metadata": Path(self.validation_dir, "trial.csv").resolve().as_posix(),
},
)
)
return splits
def _generate_examples(self, images_dir, metadata):
with open(metadata, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t")
next(reader) # skip header
for id_, row in enumerate(reader):
yield (
id_,
{
"image_path": Path(images_dir, row[0]).resolve().as_posix(),
"image_name": row[0],
"misogynous": row[1],
"shaming": row[2],
"stereotype": row[3],
"objectification": row[4],
"violence": row[5],
"text": row[6],
},
)