diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 3c46468b46cadda3503e948330cf01f7f655e257..0000000000000000000000000000000000000000 --- a/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -tweet_topic_multi -clean \ No newline at end of file diff --git a/README.md b/README.md index 4feddc0aeda3437eee6dcf226c98b2952d2d0049..78a0f1aa3676ab1df840b7ff35408e62ac800d3f 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,1440 @@ --- configs: - config_name: default +- config_name: en + data_files: + - split: train + path: en/train-* + - split: test + path: en/test-* + - split: validation + path: en/validation-* +- config_name: en_2022 + data_files: + - split: train + path: en_2022/train-* + - split: validation + path: en_2022/validation-* +- config_name: en_cross_validation_0 + data_files: + - split: train + path: en_cross_validation_0/train-* + - split: test + path: en_cross_validation_0/test-* + - split: validation + path: en_cross_validation_0/validation-* +- config_name: en_cross_validation_1 + data_files: + - split: train + path: en_cross_validation_1/train-* + - split: test + path: en_cross_validation_1/test-* + - split: validation + path: en_cross_validation_1/validation-* +- config_name: en_cross_validation_2 + data_files: + - split: train + path: en_cross_validation_2/train-* + - split: test + path: en_cross_validation_2/test-* + - split: validation + path: en_cross_validation_2/validation-* +- config_name: en_cross_validation_3 + data_files: + - split: train + path: en_cross_validation_3/train-* + - split: test + path: en_cross_validation_3/test-* + - split: validation + path: en_cross_validation_3/validation-* +- config_name: en_cross_validation_4 + data_files: + - split: train + path: en_cross_validation_4/train-* + - split: test + path: en_cross_validation_4/test-* + - split: validation + path: en_cross_validation_4/validation-* +- config_name: es + data_files: + - split: train + path: es/train-* + - split: test + path: es/test-* + - split: validation + path: es/validation-* +- config_name: es_cross_validation_0 + data_files: + - split: train + path: es_cross_validation_0/train-* + - split: test + path: es_cross_validation_0/test-* + - split: validation + path: es_cross_validation_0/validation-* +- config_name: es_cross_validation_1 + data_files: + - split: train + path: es_cross_validation_1/train-* + - split: test + path: es_cross_validation_1/test-* + - split: validation + path: es_cross_validation_1/validation-* +- config_name: es_cross_validation_2 + data_files: + - split: train + path: es_cross_validation_2/train-* + - split: test + path: es_cross_validation_2/test-* + - split: validation + path: es_cross_validation_2/validation-* +- config_name: es_cross_validation_3 + data_files: + - split: train + path: es_cross_validation_3/train-* + - split: test + path: es_cross_validation_3/test-* + - split: validation + path: es_cross_validation_3/validation-* +- config_name: es_cross_validation_4 + data_files: + - split: train + path: es_cross_validation_4/train-* + - split: test + path: es_cross_validation_4/test-* + - split: validation + path: es_cross_validation_4/validation-* +- config_name: gr + data_files: + - split: train + path: gr/train-* + - split: test + path: gr/test-* + - split: validation + path: gr/validation-* +- config_name: gr_cross_validation_0 + data_files: + - split: train + path: gr_cross_validation_0/train-* + - split: test + path: gr_cross_validation_0/test-* + - split: validation + path: gr_cross_validation_0/validation-* +- config_name: gr_cross_validation_1 + data_files: + - split: train + path: gr_cross_validation_1/train-* + - split: test + path: gr_cross_validation_1/test-* + - split: validation + path: gr_cross_validation_1/validation-* +- config_name: gr_cross_validation_2 + data_files: + - split: train + path: gr_cross_validation_2/train-* + - split: test + path: gr_cross_validation_2/test-* + - split: validation + path: gr_cross_validation_2/validation-* +- config_name: gr_cross_validation_3 + data_files: + - split: train + path: gr_cross_validation_3/train-* + - split: test + path: gr_cross_validation_3/test-* + - split: validation + path: gr_cross_validation_3/validation-* +- config_name: gr_cross_validation_4 + data_files: + - split: train + path: gr_cross_validation_4/train-* + - split: test + path: gr_cross_validation_4/test-* + - split: validation + path: gr_cross_validation_4/validation-* +- config_name: ja + data_files: + - split: train + path: ja/train-* + - split: test + path: ja/test-* + - split: validation + path: ja/validation-* +- config_name: ja_cross_validation_0 + data_files: + - split: train + path: ja_cross_validation_0/train-* + - split: test + path: ja_cross_validation_0/test-* + - split: validation + path: ja_cross_validation_0/validation-* +- config_name: ja_cross_validation_1 + data_files: + - split: train + path: ja_cross_validation_1/train-* + - split: test + path: ja_cross_validation_1/test-* + - split: validation + path: ja_cross_validation_1/validation-* +- config_name: ja_cross_validation_2 + data_files: + - split: train + path: ja_cross_validation_2/train-* + - split: test + path: ja_cross_validation_2/test-* + - split: validation + path: ja_cross_validation_2/validation-* +- config_name: ja_cross_validation_3 + data_files: + - split: train + path: ja_cross_validation_3/train-* + - split: test + path: ja_cross_validation_3/test-* + - split: validation + path: ja_cross_validation_3/validation-* +- config_name: ja_cross_validation_4 + data_files: + - split: train + path: ja_cross_validation_4/train-* + - split: test + path: ja_cross_validation_4/test-* + - split: validation + path: ja_cross_validation_4/validation-* +- config_name: mix + data_files: + - split: train + path: mix/train-* + - split: validation + path: mix/validation-* +- config_name: mix_2022 + data_files: + - split: train + path: mix_2022/train-* + - split: validation + path: mix_2022/validation-* data_files: - split: train_en - path: "dataset/en/en_train.jsonl" + path: dataset/en/en_train.jsonl language: - - en - - ja - - el - - es +- en +- ja +- el +- es license: - - other +- other multilinguality: - - monolingual +- monolingual size_categories: - - 1K>>") -for i in models_filtered: - api.update_repo_visibility(repo_id=i, repo_type='model', private=True) diff --git a/hf_remove_models.py b/hf_remove_models.py deleted file mode 100644 index 0b817818849a6847188e8beacd2a9698b78acf0e..0000000000000000000000000000000000000000 --- a/hf_remove_models.py +++ /dev/null @@ -1,11 +0,0 @@ -from pprint import pprint -from huggingface_hub import ModelFilter, HfApi - -api = HfApi() -filt = ModelFilter(author='cardiffnlp') -models = api.list_models(filter=filt) -models_filtered = [i.modelId for i in models if 'cross-validation' in i.modelId] -pprint(sorted(models_filtered)) -input("delete all? >>>") -for i in models_filtered: - api.delete_repo(repo_id=i, repo_type='model') diff --git a/ja/test-00000-of-00001.parquet b/ja/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e36359500f71cb844b10775974c36b62eaffbbc7 --- /dev/null +++ b/ja/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:919c29ede453fa2d38b0efe8d43339abe8c27eb65bf16ac8adaa40b0cbad2d6f +size 95809 diff --git a/ja/train-00000-of-00001.parquet b/ja/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b849761c309a5a956dfb9696fd7d3b3173dc8d3e --- /dev/null +++ b/ja/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3c4576f3a8af7e3c14e6712bf76ac93a878ec2afebece612fa371a09e23bcc5 +size 25915 diff --git a/ja/validation-00000-of-00001.parquet b/ja/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d252c4723bb618475e01d000c760cf8c7bab822c --- /dev/null +++ b/ja/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:635cc28ecb374ab2df694cd21bb1ea11d2baf0dba6bd12db90a49ed1a4d3fd23 +size 7292 diff --git a/ja_cross_validation_0/test-00000-of-00001.parquet b/ja_cross_validation_0/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..61b8818c4315b6c491b4da33c2aa02bfd04b6392 --- /dev/null +++ b/ja_cross_validation_0/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da3e5ee7db282b4ebba006e619ff8ce459b44190ba805c60a03fc2ff68d4622f +size 27867 diff --git a/ja_cross_validation_0/train-00000-of-00001.parquet b/ja_cross_validation_0/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b94a3fc9ffeb9ed2612ddfda34a458c05c4e4ca1 --- /dev/null +++ b/ja_cross_validation_0/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72afd52c2b2e599dd3d20308fa00ad95c9e75c928c15cea2997fe75903ad4db8 +size 87350 diff --git a/ja_cross_validation_0/validation-00000-of-00001.parquet b/ja_cross_validation_0/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3b9fe78d1c24d2d2759468d53729867cb268d9fa --- /dev/null +++ b/ja_cross_validation_0/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfbcd1b9de3de05a04f84a0c7472811f2a673b650afcd5a8b929b67aff243643 +size 14508 diff --git a/ja_cross_validation_1/test-00000-of-00001.parquet b/ja_cross_validation_1/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..043eb04a32a746f76620999adba32521507aad1c --- /dev/null +++ b/ja_cross_validation_1/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea5bcceb76dd5a3bbe513c1d200419741f86b3f3ab90b974c1e1a7f7642090b8 +size 28038 diff --git a/ja_cross_validation_1/train-00000-of-00001.parquet b/ja_cross_validation_1/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6052611c5dda944d13ac993daf2b8816a89e52f7 --- /dev/null +++ b/ja_cross_validation_1/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c99bac44f4af9fd78ff8ad8e59e3dc3379630d7c1f9f3a31ec840c0f05a3f52 +size 86698 diff --git a/ja_cross_validation_1/validation-00000-of-00001.parquet b/ja_cross_validation_1/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..deb8ba9994fa7093b0724599956ce5f72e24a64f --- /dev/null +++ b/ja_cross_validation_1/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90bbaa659551c4af1cee74ad3b25c62cb23231b6cd00371a2e8238af4d0b10bb +size 15080 diff --git a/ja_cross_validation_2/test-00000-of-00001.parquet b/ja_cross_validation_2/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c94b29cb502bd09b10fc429515d452235a866b02 --- /dev/null +++ b/ja_cross_validation_2/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59678cd9ce06dd0ac35cf5548f91001d9d4d175357fe065a31a53f220bc85c07 +size 26873 diff --git a/ja_cross_validation_2/train-00000-of-00001.parquet b/ja_cross_validation_2/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..57051853a1ece205f5243498a3642a62681d271e --- /dev/null +++ b/ja_cross_validation_2/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94921ebc3961c349371b20faaba2829d6fe62e7b87d72c1871fe35d15c36e85f +size 87245 diff --git a/ja_cross_validation_2/validation-00000-of-00001.parquet b/ja_cross_validation_2/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1763dc8f01b34df861c85db7c4c91f27d6f2cec9 --- /dev/null +++ b/ja_cross_validation_2/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:134aa2f21923c916bf1c081aa993c9640f917991d1b25d85c7d58b93be900c03 +size 14946 diff --git a/ja_cross_validation_3/test-00000-of-00001.parquet b/ja_cross_validation_3/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5e1dfa1627fdee0bbfac251e15ee6fda47df8896 --- /dev/null +++ b/ja_cross_validation_3/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cfc525f0851037fc2b8786359e2ddab7a4cda46835ce1b0c0a1971d980596c7 +size 29198 diff --git a/ja_cross_validation_3/train-00000-of-00001.parquet b/ja_cross_validation_3/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..81581eb0d393d60880b9f641df69bb4c6a1e7412 --- /dev/null +++ b/ja_cross_validation_3/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccc8995786a502b8b08d0976bcec06f4aa2465ae66b36f39723c39e73fe7248b +size 84724 diff --git a/ja_cross_validation_3/validation-00000-of-00001.parquet b/ja_cross_validation_3/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..86aa3c5f2fc7159eada8d380249243206424ea80 --- /dev/null +++ b/ja_cross_validation_3/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77763c0ded84861c26c633c653ec68dcdfb2d71b64f76a5dc7986f97f06f55eb +size 15488 diff --git a/ja_cross_validation_4/test-00000-of-00001.parquet b/ja_cross_validation_4/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e5a48cfae40535f932b16c605be29bcf581a4121 --- /dev/null +++ b/ja_cross_validation_4/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cdd44bde7e213aa53745aee9b5a8437f18cb269fd31c63067195bf683687512 +size 27727 diff --git a/ja_cross_validation_4/train-00000-of-00001.parquet b/ja_cross_validation_4/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..089109975e7715f7988d70d088493ff7b221f05b --- /dev/null +++ b/ja_cross_validation_4/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6135f18f6f59dc17e6d9cc5f967afd0cf325d84eabf059e8beb740f95dfeb392 +size 86529 diff --git a/ja_cross_validation_4/validation-00000-of-00001.parquet b/ja_cross_validation_4/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1cc3fb8447062bed7a59b3645996e2f6818581f1 --- /dev/null +++ b/ja_cross_validation_4/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e151c15a1bacd3c46f043762f34b66bb2228451f3b8f2594b30c32ba15234ba7 +size 15418 diff --git a/mix/train-00000-of-00001.parquet b/mix/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d791bb33af16e0e151a94cd655dfa788d62301a4 --- /dev/null +++ b/mix/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21af56bb66c24c79f888b82f08ec938b599e34e83f8c7e413194fd04335b98a1 +size 109162 diff --git a/mix/validation-00000-of-00001.parquet b/mix/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f2466ed8a98a32f7324eeeff41c8875e3edea07d --- /dev/null +++ b/mix/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dd744237bf4ce2a58f8a895596b230178d05a69681b148a15b5ccaebe9985fb +size 19362 diff --git a/mix_2022/train-00000-of-00001.parquet b/mix_2022/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..76e89bbfcddb7914bd8c10f49a664ed2389c9aa7 --- /dev/null +++ b/mix_2022/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ea21be4b086d8eb8de8612798cf9ab69539cf2ba5c3ab1ace968a29cf763ec4 +size 1660312 diff --git a/mix_2022/validation-00000-of-00001.parquet b/mix_2022/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8cfe4f6ea592df1ea395291e7440fbd4fea1469b --- /dev/null +++ b/mix_2022/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a7d8191a798f85bae4a976601d5ed7da3965c812f42689340a3a483e55f5aee +size 194088 diff --git a/training_scripts/README.md b/training_scripts/README.md deleted file mode 100644 index bbac5dc144535af93d6da52d38fe01bb10fab872..0000000000000000000000000000000000000000 --- a/training_scripts/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Experiment - -- train \ No newline at end of file diff --git a/training_scripts/finetune_t5.py b/training_scripts/finetune_t5.py deleted file mode 100644 index da32209ffc4934332be0a2662ba0093bb37289a4..0000000000000000000000000000000000000000 --- a/training_scripts/finetune_t5.py +++ /dev/null @@ -1,348 +0,0 @@ -""" Fine-tune T5 on topic classification (multi-label multi-class classification) -``` -python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp --low-cpu-mem-usage -``` -""" -import json -import logging -import os -import argparse -import gc -from glob import glob -from typing import List, Set -from shutil import copyfile -from statistics import mean -from distutils.dir_util import copy_tree - -import torch -import transformers -from datasets import load_dataset -from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline -from huggingface_hub import Repository - - -os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn-off the warning message -os.environ['WANDB_DISABLED'] = 'true' # disable wandb -_LR = [1e-6, 1e-5, 1e-4] -_BATCH = 32 -_EPOCH = 5 -_CLASS_MAP = { - 'Arts & Culture': ['Τέχνες & Πολιτισμός', 'Arte y cultura', 'アート&カルチャー'], - 'Business & Entrepreneurs': ['Επιχειρήσεις & Επιχειρηματίες', 'Negocios y emprendedores', 'ビジネス'], - 'Celebrity & Pop Culture': ['Διασημότητες & Ποπ κουλτούρα', 'Celebridades y cultura pop', '芸能'], - 'Diaries & Daily Life': ['Ημερολόγια & Καθημερινή ζωή', 'Diarios y vida diaria', '日常'], - 'Family': ['Οικογένεια', 'Familia', '家族'], - 'Fashion & Style': ['Μόδα & Στυλ', 'Moda y estilo', 'ファッション'], - 'Film, TV & Video': ['Ταινίες, τηλεόραση & βίντεο', 'Cine, televisión y video', '映画&ラジオ'], - 'Fitness & Health': ['Γυμναστική & Υεία', 'Estado físico y salud', 'フィットネス&健康'], - 'Food & Dining': ['Φαγητό & Δείπνο', 'Comida y comedor', '料理'], - 'Learning & Educational': ['Μάθηση & Εκπαίδευση', 'Aprendizaje y educación', '教育関連'], - 'News & Social Concern': ['Ειδήσεις & Κοινωνία', 'Noticias e interés social', '社会'], - 'Relationships': ['Σχέσεις', 'Relaciones', '人間関係'], - 'Science & Technology': ['Επιστήμη & Τεχνολογία', 'Ciencia y Tecnología', 'サイエンス'], - 'Youth & Student Life': ['Νεανική & Φοιτητική ζωή', 'Juventud y Vida Estudiantil', '学校'], - 'Music': ['Μουσική', 'Música', '音楽'], - 'Gaming': ['Παιχνίδια', 'Juegos', 'ゲーム'], - 'Sports': ['Αθλητισμός', 'Deportes', 'スポーツ'], - 'Travel & Adventure': ['Ταξίδια & Περιπέτεια', 'Viajes y aventuras', '旅行'], - 'Other Hobbies': ['Άλλα χόμπι', 'Otros pasatiempos', 'その他'] -} - - -def load_model( - model_name: str, - use_auth_token: bool = False, - low_cpu_mem_usage: bool = False) -> transformers.PreTrainedModel: - """Load language model from huggingface model hub.""" - # config & tokenizer - config = transformers.AutoConfig.from_pretrained(model_name, use_auth_token=use_auth_token) - if config.model_type == 't5': # T5 model requires T5ForConditionalGeneration class - model_class = transformers.T5ForConditionalGeneration.from_pretrained - elif config.model_type == 'mt5': - model_class = transformers.MT5ForConditionalGeneration.from_pretrained - elif config.model_type == 'bart': - model_class = transformers.BartForConditionalGeneration.from_pretrained - elif config.model_type == 'mbart': - model_class = transformers.MBartForConditionalGeneration.from_pretrained - else: - raise ValueError(f'unsupported model type: {config.model_type}') - param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage} - return model_class(model_name, **param) - - -def train( - model_name: str, - model_low_cpu_mem_usage: bool, - dataset: str, - dataset_name: str, - dataset_column_label: str, - dataset_column_text: str, - random_seed: int, - use_auth_token: bool): - """Fine-tune seq2seq model.""" - logging.info(f'[TRAIN]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})') - output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' - - tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token) - dataset_instance = load_dataset(dataset, dataset_name, split="train", use_auth_token=use_auth_token) - tokenized_dataset = [] - for d in dataset_instance: - model_inputs = tokenizer(d[dataset_column_text], truncation=True) - model_inputs['labels'] = tokenizer(text_target=d[dataset_column_label], truncation=True)['input_ids'] - tokenized_dataset.append(model_inputs) - - for n, lr_tmp in enumerate(_LR): - logging.info(f"[TRAIN {n}/{len(_LR)}] lr: {lr_tmp}") - output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}" - if os.path.exists(f"{output_dir_tmp}/pytorch_model.bin"): - continue - model = load_model( - model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage - ) - trainer = Seq2SeqTrainer( - model=model, - args=Seq2SeqTrainingArguments( - num_train_epochs=_EPOCH, - learning_rate=lr_tmp, - output_dir=output_dir_tmp, - save_strategy="epoch", - evaluation_strategy="no", - seed=random_seed, - per_device_train_batch_size=_BATCH, - ), - data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model), - train_dataset=tokenized_dataset.copy(), - ) - # train - trainer.train() - del trainer - del model - gc.collect() - torch.cuda.empty_cache() - - for model_path in glob(f"{output_dir}/*/*"): - tokenizer.save_pretrained(model_path) - - -def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> float: - scores = [] - for g, r in zip(references, predictions): - tp = len(set(g).intersection(set(r))) - fp = len([_g for _g in g if _g not in r]) - fn = len([_r for _r in r if _r not in g]) - f1 = 0 if tp == 0 else 2 * tp / (2 * tp + fp + fn) - scores.append(f1) - return mean(scores) - - -def unify_label(label: Set[str]): - new_label = [] - for label_tmp in label: - label_en = [k for k, v in _CLASS_MAP.items() if label_tmp in v] - if label_en: - new_label.append(label_en[0]) - return set(new_label) - - -def get_metric( - prediction_file: str, - metric_file: str, - model_path: str, - data: List[str], - label: List[str]) -> float: - if os.path.exists(metric_file): - with open(metric_file) as f: - eval_metric = json.load(f) - return eval_metric['f1'] - if not os.path.exists(prediction_file): - pipe = pipeline( - 'text2text-generation', - model=model_path, - device='cuda:0' if torch.cuda.is_available() else 'cpu', - ) - output = pipe(data, batch_size=_BATCH) - output = [i['generated_text'] for i in output] - with open(prediction_file, 'w') as f: - f.write('\n'.join(output)) - with open(prediction_file) as f: - output = [unify_label(set(i.split(','))) for i in f.read().split('\n')] - label = [unify_label(set(i.split(','))) for i in label] - eval_metric = {'f1': get_f1_score(label, output)} - logging.info(json.dumps(eval_metric, indent=4)) - with open(metric_file, 'w') as f: - json.dump(eval_metric, f) - return eval_metric['f1'] - - -def validate( - model_name: str, - dataset: str, - dataset_name: str, - dataset_column_text: str, - use_auth_token: bool, - dataset_column_label: str): - logging.info(f'[VALIDATE]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})') - output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' - dataset_instance = load_dataset(dataset, dataset_name, split='validation', use_auth_token=use_auth_token) - label = [i[dataset_column_label] for i in dataset_instance] - data = [i[dataset_column_text] for i in dataset_instance] - model_score = [] - for model_path in glob(f"{output_dir}/*/*/pytorch_model.bin"): - model_path = os.path.dirname(model_path) - prediction_file = f"{model_path}/prediction.validate.{os.path.basename(dataset)}.{dataset_name}.txt" - metric_file = f"{model_path}/metric.validate.{os.path.basename(dataset)}.{dataset_name}.json" - metric = get_metric( - prediction_file=prediction_file, - metric_file=metric_file, - model_path=model_path, - label=label, - data=data - ) - model_score.append([model_path, metric]) - model_score = sorted(model_score, key=lambda x: x[1]) - logging.info('Validation Result') - for k, v in model_score: - logging.info(f'{k}: {v}') - best_model = model_score[-1][0] - best_model_path = f'{output_dir}/best_model' - copy_tree(best_model, best_model_path) - - -def test( - model_name: str, - dataset: str, - dataset_name: str, - dataset_column_text: str, - use_auth_token: bool, - dataset_column_label: str): - logging.info(f'[TEST]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})') - output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' - dataset_instance = load_dataset(dataset, dataset_name, split='test', use_auth_token=use_auth_token) - label = [i[dataset_column_label] for i in dataset_instance] - data = [i[dataset_column_text] for i in dataset_instance] - model_path = f'{output_dir}/best_model' - if not os.path.exists(model_path): - model_path = os.path.basename(model_name) - - prediction_file = f"{model_path}/prediction.{os.path.basename(dataset)}.{dataset_name}.txt" - metric_file = f"{model_path}/metric.{os.path.basename(dataset)}.{dataset_name}.json" - metric = get_metric( - prediction_file=prediction_file, - metric_file=metric_file, - model_path=model_path, - label=label, - data=data - ) - logging.info(f'Test Result: {metric}') - - -def upload( - model_name: str, - dataset: str, - dataset_name: str, - dataset_column_text: str, - use_auth_token: bool, - model_alias: str, - model_organization: str): - assert model_alias is not None and model_organization is not None,\ - 'model_organization must be specified when model_alias is specified' - logging.info('uploading to huggingface') - output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' - args = {'use_auth_token': use_auth_token, 'organization': model_organization} - model_path = f'{output_dir}/best_model' - if not os.path.exists(model_path): - model_path = os.path.basename(model_name) - model = load_model(model_name=model_path) - tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token) - model.push_to_hub(model_alias, **args) - tokenizer.push_to_hub(model_alias, **args) - repo = Repository(model_alias, f'{model_organization}/{model_alias}') - for i in glob(f'{model_path}/*'): - if not os.path.exists(f'{model_alias}/{os.path.basename(i)}'): - copyfile(i, f'{model_alias}/{os.path.basename(i)}') - dataset_instance = load_dataset(dataset, dataset_name, split='validation', use_auth_token=use_auth_token) - sample = [i[dataset_column_text] for i in dataset_instance] - sample = [i for i in sample if "'" not in i and '"' not in i][:3] - widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)]) - with open(f'{model_alias}/README.md', 'w') as f: - f.write(f""" ---- -widget: -{widget} ---- - -# {model_organization}/{model_alias} - -This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}). - -### Usage - -```python -from transformers import pipeline - -pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}') -output = pipe('{sample[0]}') -``` - """) - repo.push_to_hub() - - -if __name__ == '__main__': - # arguments - logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S') - parser = argparse.ArgumentParser(description='Seq2Seq LM Fine-tuning on topic classification.') - parser.add_argument('-m', '--model-name', default='google/mt5-small', type=str) - parser.add_argument('--low-cpu-mem-usage', action='store_true') - parser.add_argument('-d', '--dataset', default='cardiffnlp/tweet_topic_multilingual', type=str) - parser.add_argument('--dataset-name', default='ja', type=str) - parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str) - parser.add_argument('--dataset-column-text', default='text', type=str) - parser.add_argument('--random-seed', default=42, type=int) - parser.add_argument('--use-auth-token', action='store_true') - parser.add_argument('--model-alias', default=None, type=str) - parser.add_argument('--model-organization', default=None, type=str) - parser.add_argument('--skip-train', action='store_true') - parser.add_argument('--skip-validate', action='store_true') - parser.add_argument('--skip-test', action='store_true') - parser.add_argument('--skip-upload', action='store_true') - opt = parser.parse_args() - - if not opt.skip_train: - train( - model_name=opt.model_name, - model_low_cpu_mem_usage=opt.low_cpu_mem_usage, - dataset=opt.dataset, - dataset_name=opt.dataset_name, - dataset_column_label=opt.dataset_column_label, - dataset_column_text=opt.dataset_column_text, - random_seed=opt.random_seed, - use_auth_token=opt.use_auth_token, - ) - if not opt.skip_validate: - validate( - model_name=opt.model_name, - dataset=opt.dataset, - dataset_name=opt.dataset_name, - dataset_column_label=opt.dataset_column_label, - dataset_column_text=opt.dataset_column_text, - use_auth_token=opt.use_auth_token - ) - if not opt.skip_test: - test( - model_name=opt.model_name, - dataset=opt.dataset, - dataset_name=opt.dataset_name, - dataset_column_label=opt.dataset_column_label, - dataset_column_text=opt.dataset_column_text, - use_auth_token=opt.use_auth_token - ) - if not opt.skip_upload: - upload( - model_name=opt.model_name, - dataset=opt.dataset, - dataset_name=opt.dataset_name, - dataset_column_text=opt.dataset_column_text, - use_auth_token=opt.use_auth_token, - model_alias=opt.model_alias, - model_organization=opt.model_organization - ) diff --git a/training_scripts/requirements.txt b/training_scripts/requirements.txt deleted file mode 100644 index 74a813603c4158a8a42fd52a61e2cd02fafd4f12..0000000000000000000000000000000000000000 --- a/training_scripts/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -ray -ray[tune] -torch -datasets -transformers -huggingface_hub \ No newline at end of file diff --git a/training_scripts/script.sh b/training_scripts/script.sh deleted file mode 100644 index f827ec0e2d51212170c24e6a97fcc9a03330e232..0000000000000000000000000000000000000000 --- a/training_scripts/script.sh +++ /dev/null @@ -1,56 +0,0 @@ - -# training -## en_2022 -python finetune_t5.py --dataset-name en_2022 --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token --skip-test -git clone https://huggingface.co/cardiffnlp/mt5-small-tweet-topic-multi-en-2022 -python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022 --dataset-name en --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022 --dataset-name ja --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022 --dataset-name gr --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022 --dataset-name es --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token -cd mt5-small-tweet-topic-multi-en-2022 && git add . && git commit -m "update" && git push origin main && cd .. -## mix -python finetune_t5.py --dataset-name mix --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token --skip-test -git clone https://huggingface.co/cardiffnlp/mt5-small-tweet-topic-multi-mix -python finetune_t5.py -m mt5-small-tweet-topic-multi-mix --dataset-name en --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-mix --dataset-name ja --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-mix --dataset-name gr --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-mix --dataset-name es --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token -cd mt5-small-tweet-topic-multi-mix && git add . && git commit -m "update" && git push origin main && cd .. - -## en_2022 -> mix -python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name mix --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-mix --model-organization cardiffnlp --use-auth-token --skip-test -git clone https://huggingface.co/cardiffnlp/mt5-small-tweet-topic-multi-en-2022-mix -python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022-mix --dataset-name en --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-mix --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022-mix --dataset-name ja --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-mix --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022-mix --dataset-name gr --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-mix --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022-mix --dataset-name es --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-mix --model-organization cardiffnlp --use-auth-token -cd mt5-small-tweet-topic-multi-en-2022-mix && git add . && git commit -m "update" && git push origin main && cd .. - - -## single -python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en --model-organization cardiffnlp --use-auth-token -python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-ja --model-organization cardiffnlp --use-auth-token -python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-gr --model-organization cardiffnlp --use-auth-token -python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-es --model-organization cardiffnlp --use-auth-token -## en_2022 -> single -python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-es --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-en --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-ja --model-organization cardiffnlp --use-auth-token -python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-gr --model-organization cardiffnlp --use-auth-token - -# cross-validation -for N in 0 1 2 3 4 -do - for LA in en ja gr es - do - python finetune_t5.py --dataset-name ${LA}_cross_validation_${N} --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-${LA}-cross-validation-${N} --model-organization cardiffnlp --use-auth-token - done -done - - -LA=en -for N in 0 1 2 3 4 -do - python finetune_t5.py -m t5-small --dataset-name ${LA}_cross_validation_${N} --low-cpu-mem-usage --model-alias t5-small-tweet-topic-multi-${LA}-cross-validation-${N} --model-organization cardiffnlp --use-auth-token -done - diff --git a/tweet_topic_multilingual.py b/tweet_topic_multilingual.py deleted file mode 100644 index 492d3d1d06f01d9329145aaa19879c34cf643272..0000000000000000000000000000000000000000 --- a/tweet_topic_multilingual.py +++ /dev/null @@ -1,173 +0,0 @@ -""" TweetTopicMultilingual Dataset """ -import json -from typing import List - -import datasets - -logger = datasets.logging.get_logger(__name__) -_DESCRIPTION = """[TweetTopicMultilingual](TBA)""" -_VERSION = "0.0.91" -_CITATION = """TBA""" -_HOME_PAGE = "https://cardiffnlp.github.io" -_NAME = "tweet_topic_multilingual" -_ROOT_URL = f"https://huggingface.co/datasets/cardiffnlp/{_NAME}/resolve/main/dataset" -_LANGUAGES = ["en", "es", "ja", "gr"] -_CLASS_MAPPING = { - "en": [ - "Arts & Culture", - "Business & Entrepreneurs", - "Celebrity & Pop Culture", - "Diaries & Daily Life", - "Family", - "Fashion & Style", - "Film, TV & Video", - "Fitness & Health", - "Food & Dining", - "Learning & Educational", - "News & Social Concern", - "Relationships", - "Science & Technology", - "Youth & Student Life", - "Music", - "Gaming", - "Sports", - "Travel & Adventure", - "Other Hobbies" - ], - "gr": [ - "Τέχνες & Πολιτισμός", - "Επιχειρήσεις & Επιχειρηματίες", - "Διασημότητες & Ποπ κουλτούρα", - "Ημερολόγια & Καθημερινή ζωή", - "Οικογένεια", - "Μόδα & Στυλ", - "Ταινίες, τηλεόραση & βίντεο", - "Γυμναστική & Υεία", - "Φαγητό & Δείπνο", - "Μάθηση & Εκπαίδευση", - "Ειδήσεις & Κοινωνία", - "Σχέσεις", - "Επιστήμη & Τεχνολογία", - "Νεανική & Φοιτητική ζωή", - "Μουσική", - "Παιχνίδια", - "Αθλητισμός", - "Ταξίδια & Περιπέτεια", - "Άλλα χόμπι" - ], - "es": [ - "Arte y cultura", - "Negocios y emprendedores", - "Celebridades y cultura pop", - "Diarios y vida diaria", - "Familia", - "Moda y estilo", - "Cine, televisión y video", - "Estado físico y salud", - "Comida y comedor", - "Aprendizaje y educación", - "Noticias e interés social", - "Relaciones", - "Ciencia y Tecnología", - "Juventud y Vida Estudiantil", - "Música", - "Juegos", - "Deportes", - "Viajes y aventuras", - "Otros pasatiempos" - ], - "ja": [ - "アート&カルチャー", - "ビジネス", - "芸能", - "日常", - "家族", - "ファッション", - "映画&ラジオ", - "フィットネス&健康", - "料理", - "教育関連", - "社会", - "人間関係", - "サイエンス", - "学校", - "音楽", - "ゲーム", - "スポーツ", - "旅行", - "その他" - ] -} - -_URL = {} -# plain split -for lan in _LANGUAGES: - _URL[lan] = {split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl"] for split in ["train", "test", "validation"]} -_URL["en_2022"] = {split: [f"{_ROOT_URL}/en_2022/{split}.jsonl"] for split in ["train", "validation"]} -_URL["mix"] = { - split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for lan in _LANGUAGES] for split in ["train", "validation"] -} -_URL["mix_2022"] = { - split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for lan in _LANGUAGES] + [f"{_ROOT_URL}/en_2022/{split}.jsonl"] - for split in ["train", "validation"] -} -# cross validation -for lan in _LANGUAGES: - _URL.update({ - f"{lan}_cross_validation_{n}": { - split: [f"{_ROOT_URL}/{lan}/cross_validation/{lan}_{split}_{n}.jsonl"] - for split in ["train", "test", "validation"] - } for n in range(5) - }) - - -class Config(datasets.BuilderConfig): - """BuilderConfig""" - - def __init__(self, **kwargs): - """BuilderConfig. - - Args: - **kwargs: keyword arguments forwarded to super. - """ - super(Config, self).__init__(**kwargs) - - -class TweetTopicMultilingual(datasets.GeneratorBasedBuilder): - """Dataset.""" - - BUILDER_CONFIGS = [ - Config(name=i, version=datasets.Version(_VERSION), description=_DESCRIPTION) for i in _URL.keys() - ] - - def _split_generators(self, dl_manager): - downloaded_file = dl_manager.download_and_extract(_URL[self.config.name]) - splits = _URL[self.config.name].keys() - return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[i]}) for i in splits] - - def _generate_examples(self, filepath: List[str]): - _key = 0 - for _file in filepath: - logger.info("generating examples from = %s", _file) - with open(_file, encoding="utf-8") as f: - _list = [json.loads(i) for i in f.read().split("\n") if len(i) > 0] - for i in _list: - yield _key, i - _key += 1 - - def _info(self): - return datasets.DatasetInfo( - description=_DESCRIPTION, - features=datasets.Features( - { - "id": datasets.Value("string"), - "text": datasets.Value("string"), - "label_name_flatten": datasets.Value("string"), - "label": datasets.Sequence(datasets.features.ClassLabel(names=_CLASS_MAPPING["en"])), - "label_name": datasets.Sequence(datasets.Value("string")) - } - ), - supervised_keys=None, - homepage=_HOME_PAGE, - citation=_CITATION, - ) \ No newline at end of file