{ "cells": [ { "cell_type": "code", "execution_count": 34, "id": "c88056d1-bf3c-477e-9b36-50e7398f9058", "metadata": {}, "outputs": [], "source": [ "import pandas as pd" ] }, { "cell_type": "code", "execution_count": 35, "id": "186faae4-de4a-4df5-b914-203a4e6296b0", "metadata": {}, "outputs": [], "source": [ "df = pd.read_csv(\"step5.csv\", encoding='utf-8-sig', sep=';')" ] }, { "cell_type": "code", "execution_count": null, "id": "e672cf21-1f91-424c-b944-5d0a12ac69fd", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from sklearn.metrics import classification_report, confusion_matrix\n", "\n", "classification_label_set = {\n", " \"auto\": 0,\n", " \"Business and Industry\": 1,\n", " \"Crime and Justice\": 2,\n", " \"Disaster and Emergency News\": 3,\n", " \"Economics and Finance\": 4,\n", " \"Education\": 5,\n", " \"Entertainment and Culture\": 6,\n", " \"Environment and Climate\": 7,\n", " \"Family and Relationships\": 8,\n", " \"Fashion\": 9,\n", " \"Food and Drink\": 10,\n", " \"Health and Medicine\": 11,\n", " \"Transportation and Infrastructure\": 12,\n", " \"Mental Health and Wellness\": 13,\n", " \"Politics and Government\": 14,\n", " \"Religion\": 15,\n", " \"Sports\": 16,\n", " \"Travel and Leisure\": 17,\n", " \"Technology and Science\": 18\n", "}\n", "\n", "\n", "classification_label_dict = {idx: label for idx, label in enumerate(classification_label_set)}\n", "classification_label_dict_to_index = {v: k for k, v in classification_label_dict.items()}\n", "\n", "\n", "ner_label_set = [\"PAD\",\"O\",\n", " \"B-ORG\", \"I-ORG\", \"B-PERSON\", \"I-PERSON\", \"B-CARDINAL\", \"I-CARDINAL\",\n", " \"B-GPE\", \"I-GPE\", \"B-DATE\", \"I-DATE\", \"B-ORDINAL\", \"I-ORDINAL\",\n", " \"B-PERCENT\", \"I-PERCENT\", \"B-LOC\", \"I-LOC\", \"B-NORP\", \"I-NORP\",\n", " \"B-MONEY\", \"I-MONEY\", \"B-TIME\", \"I-TIME\", \"B-EVENT\", \"I-EVENT\",\n", " \"B-PRODUCT\", \"I-PRODUCT\", \"B-FAC\", \"I-FAC\", \"B-QUANTITY\", \"I-QUANTITY\"\n", "]\n", "\n", "\n", "ner_label_dict = {label: idx for idx, label in enumerate(ner_label_set)}\n", "\n", "ner_label_dict_reverse = {idx: label for label, idx in ner_label_dict.items()}" ] }, { "cell_type": "code", "execution_count": 37, "id": "ced20ed4-b3ca-4401-9221-82bc6e42cfd9", "metadata": {}, "outputs": [], "source": [ "from transformers import AutoTokenizer\n", "model_name = \"nlpaueb/bert-base-greek-uncased-v1\"\n", "tokenizer = AutoTokenizer.from_pretrained(model_name)\n" ] }, { "cell_type": "code", "execution_count": 38, "id": "6360e73b-ddea-4c9b-ac30-366e5f843498", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
urltitletextlabelclean_textner_resultsner_sequencewordsentitiesentities_parsedentities_numberslabel_numbers
0https://www.sport-fm.gr/article/o-roueda-sti-S...Ο πρώην της Ferrari πήγε στην SauberΤον πρώην επικεφαλής στρατηγικής και αθλητικό ...autoΟ πρώην της Ferrari πήγε στην Sauber Τον πρώην...[{'text': 'ferrari', 'type': 'ORG', 'start_pos...[{'word': 'Ο', 'entity': 'O'}, {'word': 'πρώην...['Ο', 'πρώην', 'της', 'Ferrari', 'πήγε', 'στην...['O', 'O', 'O', 'B-ORG', 'O', 'O', 'B-ORG', 'O...['O', 'O', 'O', 'B-ORG', 'O', 'O', 'B-ORG', 'O...[1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, ...0
\n", "
" ], "text/plain": [ " url \\\n", "0 https://www.sport-fm.gr/article/o-roueda-sti-S... \n", "\n", " title \\\n", "0 Ο πρώην της Ferrari πήγε στην Sauber \n", "\n", " text label \\\n", "0 Τον πρώην επικεφαλής στρατηγικής και αθλητικό ... auto \n", "\n", " clean_text \\\n", "0 Ο πρώην της Ferrari πήγε στην Sauber Τον πρώην... \n", "\n", " ner_results \\\n", "0 [{'text': 'ferrari', 'type': 'ORG', 'start_pos... \n", "\n", " ner_sequence \\\n", "0 [{'word': 'Ο', 'entity': 'O'}, {'word': 'πρώην... \n", "\n", " words \\\n", "0 ['Ο', 'πρώην', 'της', 'Ferrari', 'πήγε', 'στην... \n", "\n", " entities \\\n", "0 ['O', 'O', 'O', 'B-ORG', 'O', 'O', 'B-ORG', 'O... \n", "\n", " entities_parsed \\\n", "0 ['O', 'O', 'O', 'B-ORG', 'O', 'O', 'B-ORG', 'O... \n", "\n", " entities_numbers label_numbers \n", "0 [1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, ... 0 " ] }, "execution_count": 38, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.head(1)" ] }, { "cell_type": "code", "execution_count": 39, "id": "7eb5ce65-3cbf-4ba4-9521-7d13ba3687d0", "metadata": {}, "outputs": [], "source": [ "df['entities_numbers'] = df['entities_numbers'].apply(ast.literal_eval)\n", "df['labels'] = df['label_numbers']\n", "df['ner_labels'] = df['entities_numbers']" ] }, { "cell_type": "code", "execution_count": 40, "id": "68d22607-e2d3-4228-b35b-97842b5e64e3", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import random\n", "import torch\n", "\n", "def set_seed(seed=42):\n", " random.seed(seed)\n", " np.random.seed(seed)\n", " torch.manual_seed(seed)\n", " torch.cuda.manual_seed_all(seed)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "21ee5eb1-8832-4afe-80aa-27989e824853", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "--- Sample Inspection ---\n", "Tokens:\n", "['[CLS]', 'ανεμος', 'σαι', '##νθ', 'στο', 'μεξικο', 'κερδισμενο', '##ς', 'ο', 'νορ', '##ις', 'κυριαρχια', 'της', 'ferrari', 'κυριαρχο', '##ς', 'σαι', '##νθ', 'και', 'κυριαρχη', 'ferrari', 'στο', 'μεξικο', '.', 'ο', 'ισπανος', ',', 'ο', 'οποιος', 'ετοιμαζεται', 'για', 'το', 'νεο', 'κεφαλαιο', 'της', 'καριερας', 'του', ',', 'τερματισε', 'πρωτος', 'στο', '19', '##ο', 'grand', 'prix', 'της', 'σεζον', ',', 'σημειωνοντας', 'τη', '2', '##η', 'νικη', 'του', '(', 'επειτα', 'απο', 'την', 'αυστραλια', ')', ',', 'στη', '2', '##η', 'νικη', 'της', 'ferrari', 'επειτα', 'απο', 'εκεινη', 'του', 'λε', '##κλε', '##ρ', 'που', 'τερματισε', '3', '##ος', '.', 'κερδισμενο', '##ς', 'βγηκε', 'ο', 'νορ', '##ις', ',', 'ο', 'οποιος', 'μειωσε', 'στους', '47', 'την', 'αποσταση', 'του', 'απο', 'τον', 'πρωτοπορο', 'φερ', '##στα', '##πεν', '.', 'ο', '27', '##χρονος', 'τερματισε', 'στην', '6', '##η', 'θεση', 'λογω', 'ποινη', '##ς', '20', 'δευτερολεπτ', '##ων', '.', 'διοτι', 'σε', 'κουρσα', 'με', 'αντιπαλο', 'τον', 'νορ', '##ις', 'τον', 'ωθησε', 'εκτος', 'πιστα', '##ς', 'και', 'επειτα', 'βγηκε', 'και', 'ο', 'ιδιος', ',', 'αποκτωντας', 'πλεονεκτημα', '.', 'ο', 'σαι', '##νθ', 'ζει', 'τις', 'τελευταιες', 'του', 'στιγμες', 'ως', 'μελος', 'της', 'ferrari', 'και', 'θελει', 'να', 'την', 'αποχαιρετησ', '##ει', 'με', 'τον', 'καλυτερο', 'δυνατο', 'τροπο', '##ενα', 'λαθος', 'του', 'λε', '##κλε', '##ρ', 'πεντε', 'στροφες', 'πριν', 'το', 'φιναλε', 'του', 'στερ', '##ησε', 'τη', '2', '##η', 'θεση', 'και', 'επιπλεον', 'τρεις', 'ποντους', '.', 'παροτι', 'σχεδον', 'σε', 'ολοκληρο', 'τον', 'αγωνα', 'ειχε', 'το', 'πανω', 'χερι', ',', 'αλλα', 'το', 'σφαλμα', 'λιγο', 'πριν', 'το', 'τελος', 'του', 'κοστισε', 'ακριβα', '.', 'παραλληλα', ',', 'στους', 'κατασκευαστες', 'η', 'διαφορα', 'ειναι', 'στους', '29', 'ποντους', ',', 'με', 'τη', 'mclaren', 'να', 'βρισκεται', 'πρωτη', 'με', '56', '##6', ',', 'ενω', 'η', 'ferrari', 'εφτασε', 'τους', '53', '##7', '.', '[SEP]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]']\n", "\n", "Transformed Word IDs:\n", "[-100, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 9, 10, 11, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 36, 37, 38, 39, 40, 41, 42, 43, 44, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 55, 56, 57, 58, 59, 60, 61, 62, 63, 63, 63, 64, 65, 66, 66, 67, 68, 68, 69, 70, 71, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 84, 84, 85, 86, 87, 87, 88, 89, 90, 90, 91, 92, 93, 93, 94, 95, 95, 96, 97, 98, 99, 100, 101, 102, 103, 103, 104, 105, 106, 107, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 138, 139, 140, 141, 141, 141, 142, 143, 144, 145, 146, 147, 148, 148, 149, 150, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 197, 198, 199, 200, 201, 202, 203, 204, 204, 205, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]\n", "\n", "Aligned NER labels:\n", "[-100, 1, 4, -100, 1, 8, 1, -100, 1, 4, -100, 1, 1, 2, 1, -100, 4, -100, 1, 1, 2, 1, 8, 1, 1, 18, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 12, 1, 24, -100, 25, 25, 1, 1, 1, 1, 1, 12, -100, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 12, -100, 1, 1, 2, 1, 1, 1, 1, 4, -100, -100, 1, 1, 12, -100, 1, 1, -100, 1, 1, 4, -100, 1, 1, 1, 1, 1, 6, 1, 1, 1, 1, 1, 1, 4, -100, -100, 1, 1, 1, -100, 1, 1, 12, -100, 1, 1, 1, -100, 22, 23, -100, 1, 1, 1, 1, 1, 1, 1, 4, -100, 1, 1, 1, 1, -100, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, -100, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, -100, 1, 1, 1, 1, 1, -100, 1, 1, 4, -100, -100, 22, 23, 1, 1, 1, 1, 1, -100, 1, 12, -100, 1, 1, 1, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6, 1, 1, 1, 1, 2, 1, 1, 12, 1, 6, -100, 1, 1, 1, 2, 1, 1, 6, -100, 1, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]\n" ] } ], "source": [ "import torch\n", "from torch.utils.data import DataLoader, Dataset\n", "import ast\n", "import pandas as pd\n", "from sklearn.model_selection import train_test_split\n", "from transformers import AutoTokenizer\n", "\n", "ner_label_dict = {label: idx for idx, label in enumerate(ner_label_set)}\n", "ner_label_dict_reverse = {v: k for k, v in ner_label_dict.items()}\n", "\n", "def chunk_list(items, chunk_size):\n", " \"\"\"Splits the list 'items' into chunks of size 'chunk_size'.\"\"\"\n", " return [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]\n", "\n", "class NewsNERChunkedDataset(Dataset):\n", " def __init__(self, dataframe, tokenizer, word_chunk_size=412, target_token_length=512):\n", " self.tokenizer = tokenizer\n", " self.word_chunk_size = word_chunk_size\n", " self.target_token_length = target_token_length\n", " \n", " self.samples = []\n", " for _, row in dataframe.iterrows():\n", " words = row['words']\n", " if isinstance(words, str):\n", " words = ast.literal_eval(words)\n", " \n", " # Classification label\n", " label_class = int(row['labels'])\n", " \n", " # NER labels\n", " ner_labels_val = row['ner_labels']\n", " if isinstance(ner_labels_val, str):\n", " ner_labels_val = ast.literal_eval(ner_labels_val)\n", " \n", " # Split into chunks\n", " word_chunks = chunk_list(words, self.word_chunk_size)\n", " ner_label_chunks = chunk_list(ner_labels_val, self.word_chunk_size)\n", " \n", " for word_chunk, ner_chunk in zip(word_chunks, ner_label_chunks):\n", " encoding = self.tokenizer(\n", " word_chunk,\n", " is_split_into_words=True,\n", " truncation=True,\n", " max_length=self.target_token_length,\n", " padding='max_length',\n", " return_offsets_mapping=True\n", " )\n", " \n", " raw_word_ids = encoding.word_ids() \n", " transformed_word_ids = []\n", " for w_id in raw_word_ids:\n", " if w_id is None:\n", " transformed_word_ids.append(-100) \n", " else:\n", " transformed_word_ids.append(w_id + 1) \n", "\n", " aligned_labels = []\n", " previous_word_idx = None\n", " for w_idx in encoding.word_ids():\n", " if w_idx is None:\n", " aligned_labels.append(-100) \n", " elif w_idx != previous_word_idx:\n", " if w_idx < len(ner_chunk):\n", " aligned_labels.append(ner_chunk[w_idx])\n", " else:\n", " aligned_labels.append(ner_label_dict[\"O\"])\n", " previous_word_idx = w_idx\n", " else:\n", " # Subword of the *same* word -> ignore for NER\n", " aligned_labels.append(-100)\n", " \n", " sample = {\n", " 'input_ids': torch.tensor(encoding['input_ids']),\n", " 'attention_mask': torch.tensor(encoding['attention_mask']),\n", " 'labels_class': torch.tensor(label_class),\n", " 'labels_ner': torch.tensor(aligned_labels),\n", " 'word_ids': torch.tensor(transformed_word_ids) \n", " }\n", " self.samples.append(sample)\n", "\n", " def __len__(self):\n", " return len(self.samples)\n", " \n", " def __getitem__(self, idx):\n", " return self.samples[idx]\n", "\n", "\n", "train_df, temp_df = train_test_split(\n", " df, \n", " test_size=0.3, \n", " random_state=42, \n", " stratify=df['labels']\n", ")\n", "\n", "val_df, test_df = train_test_split(\n", " temp_df, \n", " test_size=0.5, \n", " random_state=42, \n", " stratify=temp_df['labels']\n", ")\n", "# -------------------------------\n", "# Tokenizer\n", "tokenizer = AutoTokenizer.from_pretrained(model_name)\n", "\n", "# Datasets\n", "train_dataset = NewsNERChunkedDataset(train_df, tokenizer)\n", "val_dataset = NewsNERChunkedDataset(val_df, tokenizer)\n", "test_dataset = NewsNERChunkedDataset(test_df, tokenizer)\n", "\n", "# sample\n", "sample = train_dataset[0]\n", "ids = sample['input_ids'].tolist()\n", "tokens = tokenizer.convert_ids_to_tokens(ids)\n", "wids = sample['word_ids'].tolist()\n", "nerlabs = sample['labels_ner'].tolist()\n", "\n", "print(\"\\n--- Sample Inspection ---\")\n", "print(\"Tokens:\")\n", "print(tokens)\n", "print(\"\\nTransformed Word IDs:\")\n", "print(wids)\n", "print(\"\\nAligned NER labels:\")\n", "print(nerlabs)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "1bbd67c1-6922-48c7-9a60-1036f736cb99", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "40a3dd23-cae5-4f09-935a-61fc13dbbcd3", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "b0eb64f8-88be-4369-aa2d-9652fd253e25", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.21" } }, "nbformat": 4, "nbformat_minor": 5 }