Sourab Mangrulkar
commited on
Commit
·
8601501
1
Parent(s):
f52011b
updating code
Browse files- chat-t5-remove.py +0 -81
chat-t5-remove.py
DELETED
|
@@ -1,81 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import sys
|
| 3 |
-
import pandas as pd
|
| 4 |
-
import numpy as np
|
| 5 |
-
import datasets
|
| 6 |
-
import pathlib
|
| 7 |
-
|
| 8 |
-
_DESCRIPTION = """\
|
| 9 |
-
This script is used to generate a dataset for the chatbot task. The dataset is combination of datasets used to train BlenderBot."""
|
| 10 |
-
|
| 11 |
-
_VERSION = "1.0.0"
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
class ChatT5Config(datasets.BuilderConfig):
|
| 15 |
-
"""BuilderConfig for TapacoConfig."""
|
| 16 |
-
|
| 17 |
-
def __init__(self, **kwargs):
|
| 18 |
-
super(ChatT5Config, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
class ChatT5(datasets.GeneratorBasedBuilder):
|
| 22 |
-
|
| 23 |
-
BUILDER_CONFIGS = [
|
| 24 |
-
ChatT5Config(
|
| 25 |
-
name="chat_t5",
|
| 26 |
-
description="The dataset is combination of datasets used to train BlenderBot.",
|
| 27 |
-
)
|
| 28 |
-
]
|
| 29 |
-
BUILDER_CONFIG_CLASS = ChatT5Config
|
| 30 |
-
DEFAULT_CONFIG_NAME = "chat_t5"
|
| 31 |
-
|
| 32 |
-
def _info(self):
|
| 33 |
-
features = datasets.Features({"texts": datasets.Value("string"), "labels": datasets.Value("string")})
|
| 34 |
-
|
| 35 |
-
return datasets.DatasetInfo(description=_DESCRIPTION, features=features, supervised_keys=None)
|
| 36 |
-
|
| 37 |
-
def _split_generators(self, dl_manager):
|
| 38 |
-
"""Returns SplitGenerators."""
|
| 39 |
-
train_files = [
|
| 40 |
-
"empathetic_dialogues_train.parquet",
|
| 41 |
-
"blended_skill_talk_train.parquet",
|
| 42 |
-
"wizard_of_wikipedia_train.parquet",
|
| 43 |
-
"conv_ai_2_train.parquet",
|
| 44 |
-
]
|
| 45 |
-
validation_files = [
|
| 46 |
-
"conv_ai_2_validation.parquet",
|
| 47 |
-
"empathetic_dialogues_validation.parquet",
|
| 48 |
-
"blended_skill_talk_validation.parquet",
|
| 49 |
-
"wizard_of_wikipedia_validation.parquet",
|
| 50 |
-
]
|
| 51 |
-
test_files = [
|
| 52 |
-
"blended_skill_talk_test.parquet",
|
| 53 |
-
"empathetic_dialogues_test.parquet",
|
| 54 |
-
"wizard_of_wikipedia_test.parquet",
|
| 55 |
-
]
|
| 56 |
-
urls_to_download = "./data/train/conv_ai_2_train.parquet"
|
| 57 |
-
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
| 58 |
-
return [
|
| 59 |
-
datasets.SplitGenerator(
|
| 60 |
-
name=datasets.Split.TRAIN,
|
| 61 |
-
gen_kwargs={"data_dir": downloaded_files["train"]},
|
| 62 |
-
),
|
| 63 |
-
datasets.SplitGenerator(
|
| 64 |
-
name=datasets.Split.TEST,
|
| 65 |
-
gen_kwargs={"data_dir": downloaded_files["test"]},
|
| 66 |
-
),
|
| 67 |
-
datasets.SplitGenerator(
|
| 68 |
-
name=datasets.Split.VALIDATION,
|
| 69 |
-
gen_kwargs={"data_dir": downloaded_files["validation"]},
|
| 70 |
-
),
|
| 71 |
-
]
|
| 72 |
-
|
| 73 |
-
def _generate_examples(self, data_dir):
|
| 74 |
-
"""Yields examples."""
|
| 75 |
-
data = pd.DataFrame()
|
| 76 |
-
for file_path in os.listdir(data_dir):
|
| 77 |
-
data = pd.concat([data, pd.read_parquet(os.path.join(data_dir, file_path))])
|
| 78 |
-
id_ = -1
|
| 79 |
-
for _, row in data.iterrows():
|
| 80 |
-
id_ += 1
|
| 81 |
-
yield id_, {"texts": row["texts"], "labels": row["labels"]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|