| """FaQUAD-NLI dataset""" |
|
|
| import datasets |
| import pandas as pd |
| import json |
|
|
| _CITATION = """ |
| """ |
|
|
| _DESCRIPTION = """ |
| """ |
|
|
| _URLS = { |
| "data": "https://raw.githubusercontent.com/liafacom/faquad/6ad978f20672bb41625b3b71fbe4a88b893d0a86/data/dataset.json", |
| "spans": "https://huggingface.co/datasets/ruanchaves/faquad-nli/raw/main/spans.csv" |
| } |
|
|
| def check_overlap(interval1, interval2): |
| """Check for overlap between two integer intervals""" |
| return not (interval1[1] < interval2[0] or interval2[1] < interval1[0]) |
|
|
|
|
| class Faquad(datasets.GeneratorBasedBuilder): |
|
|
| VERSION = datasets.Version("1.0.0") |
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "document_index": datasets.Value("int32"), |
| "document_title": datasets.Value("string"), |
| "paragraph_index": datasets.Value("int32"), |
| "question": datasets.Value("string"), |
| "answer": datasets.Value("string"), |
| "label": datasets.Value("int32") |
| }), |
| supervised_keys=None, |
| homepage="https://github.com/franciellevargas/HateBR", |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| downloaded_files = dl_manager.download(_URLS) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "data": downloaded_files["data"], |
| "spans": downloaded_files["spans"], |
| "split": "train" |
| } |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "data": downloaded_files["data"], |
| "spans": downloaded_files["spans"], |
| "split": "validation" |
| } |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "data": downloaded_files["data"], |
| "spans": downloaded_files["spans"], |
| "split": "test" |
| } |
| ) |
| ] |
|
|
| def _generate_examples(self, data, spans, split): |
|
|
| with open(data, 'r') as f: |
| json_data = json.load(f) |
| |
| spans = pd.read_csv(spans).to_dict("records") |
| counter = 0 |
| for span_row in spans: |
| if span_row["split"] != split: |
| continue |
|
|
| document_title = json_data["data"][ |
| span_row["document_index"] |
| ]["title"] |
|
|
| sentence = json_data["data"][ |
| span_row["document_index"] |
| ]["paragraphs"][ |
| span_row["paragraph_index"] |
| ]["context"][ |
| span_row["sentence_start_char"]:span_row["sentence_end_char"] |
| ] |
| sentence_interval = (span_row["sentence_start_char"], span_row["sentence_end_char"]) |
|
|
| for qas_row in json_data["data"][ |
| span_row["document_index"] |
| ]["paragraphs"][ |
| span_row["paragraph_index"] |
| ]["qas"]: |
| question = qas_row["question"] |
| question_spans = [] |
| for qas_answer in qas_row["answers"]: |
| qas_answer_start_span = qas_answer["answer_start"] |
| qas_answer_end_span = qas_answer["answer_start"] + len(qas_answer["text"]) |
| question_spans.append((qas_answer_start_span, qas_answer_end_span)) |
| for question_interval in question_spans: |
| if check_overlap(sentence_interval, question_interval): |
| yield counter, { |
| "document_index": span_row["document_index"], |
| "document_title": document_title, |
| "paragraph_index": span_row["paragraph_index"], |
| "question": question, |
| "answer": sentence, |
| "label": 1 |
| } |
| counter += 1 |
| break |
| else: |
| yield counter, { |
| "document_index": span_row["document_index"], |
| "document_title": document_title, |
| "paragraph_index": span_row["paragraph_index"], |
| "question": question, |
| "answer": sentence, |
| "label": 0 |
| } |
| counter += 1 |