|
|
|
|
|
|
|
|
|
|
|
|
|
|
from collections import defaultdict |
|
|
import random |
|
|
|
|
|
def contiene_b(frase): |
|
|
return any(label.startswith("B-") for _, label in frase) |
|
|
|
|
|
def procesar_training_set_balanceado(archivo_entrada, archivo_salida, archivo_ids): |
|
|
frases = defaultdict(list) |
|
|
|
|
|
with open(archivo_entrada, encoding="utf-8") as f: |
|
|
for linea in f: |
|
|
if linea.strip(): |
|
|
partes = linea.strip().split() |
|
|
if len(partes) == 5: |
|
|
token, doc_id, _, _, label = partes |
|
|
frases[doc_id].append((token, label)) |
|
|
|
|
|
con_b_ids = [id_ for id_, frase in frases.items() if contiene_b(frase)] |
|
|
sin_b_ids = [id_ for id_, frase in frases.items() if not contiene_b(frase)] |
|
|
|
|
|
|
|
|
n = min(len(con_b_ids), len(sin_b_ids)) |
|
|
selected_ids = con_b_ids[:n] + sin_b_ids[:n] |
|
|
random.shuffle(selected_ids) |
|
|
|
|
|
|
|
|
with open(archivo_ids, "w") as f_ids: |
|
|
for id_ in selected_ids: |
|
|
f_ids.write(f"{id_}\n") |
|
|
|
|
|
|
|
|
with open(archivo_salida, "w", encoding="utf-8") as out: |
|
|
for id_ in selected_ids: |
|
|
for token, label in frases[id_]: |
|
|
out.write(f"{token} {label}\n") |
|
|
out.write("\n") |
|
|
|
|
|
def procesar_dev_test_balanceado(archivo_entrada, archivo_salida_dev, archivo_salida_test, archivo_ids_dev, archivo_ids_test): |
|
|
frases = defaultdict(list) |
|
|
|
|
|
with open(archivo_entrada, encoding="utf-8") as f: |
|
|
for linea in f: |
|
|
if linea.strip(): |
|
|
partes = linea.strip().split() |
|
|
if len(partes) == 5: |
|
|
token, doc_id, _, _, label = partes |
|
|
frases[doc_id].append((token, label)) |
|
|
|
|
|
|
|
|
con_b_ids = [id_ for id_, frase in frases.items() if contiene_b(frase)] |
|
|
sin_b_ids = [id_ for id_, frase in frases.items() if not contiene_b(frase)] |
|
|
|
|
|
|
|
|
random.shuffle(con_b_ids) |
|
|
mitad_b = len(con_b_ids) // 2 |
|
|
dev_ids_b = con_b_ids[:mitad_b] |
|
|
test_ids_b = con_b_ids[mitad_b:] |
|
|
|
|
|
random.shuffle(sin_b_ids) |
|
|
mitad_sin = len(sin_b_ids) // 2 |
|
|
dev_ids_sin = sin_b_ids[:mitad_sin] |
|
|
test_ids_sin = sin_b_ids[mitad_sin:] |
|
|
|
|
|
dev_ids = dev_ids_b + dev_ids_sin |
|
|
test_ids = test_ids_b + test_ids_sin |
|
|
random.shuffle(dev_ids) |
|
|
random.shuffle(test_ids) |
|
|
|
|
|
|
|
|
with open(archivo_ids_dev, "w") as f_dev, open(archivo_ids_test, "w") as f_test: |
|
|
for id_ in dev_ids: |
|
|
f_dev.write(f"{id_}\n") |
|
|
for id_ in test_ids: |
|
|
f_test.write(f"{id_}\n") |
|
|
|
|
|
|
|
|
def escribir(ids, archivo_salida): |
|
|
with open(archivo_salida, "w", encoding="utf-8") as out: |
|
|
for id_ in ids: |
|
|
for token, label in frases[id_]: |
|
|
out.write(f"{token} {label}\n") |
|
|
out.write("\n") |
|
|
|
|
|
escribir(dev_ids, archivo_salida_dev) |
|
|
escribir(test_ids, archivo_salida_test) |
|
|
|
|
|
|
|
|
procesar_training_set_balanceado("train_spacy.txt", "train_conll.txt","train_ids.txt") |
|
|
procesar_dev_test_balanceado( |
|
|
"valid_spacy.txt", |
|
|
"dev_conll.txt", |
|
|
"test_conll.txt", |
|
|
"dev_ids.txt", |
|
|
"test_ids.txt" |
|
|
) |
|
|
|
|
|
|
|
|
from datasets import load_dataset, ClassLabel, DatasetDict |
|
|
|
|
|
from datasets import load_dataset, Dataset, DatasetDict |
|
|
from collections import defaultdict |
|
|
|
|
|
def normalizar_etiqueta(label): |
|
|
if label.startswith("B-"): |
|
|
return "B-PROFESION" |
|
|
elif label.startswith("I-"): |
|
|
return "I-PROFESION" |
|
|
return label |
|
|
|
|
|
def cargar_y_preparar_conll(paths): |
|
|
def parse_conll_dataset(file_path): |
|
|
raw = load_dataset("text", data_files=file_path)["train"] |
|
|
|
|
|
tokens = [] |
|
|
ner_tags = [] |
|
|
current_tokens = [] |
|
|
current_tags = [] |
|
|
|
|
|
for example in raw: |
|
|
line = example["text"] |
|
|
if not line.strip(): |
|
|
if current_tokens: |
|
|
tokens.append(current_tokens) |
|
|
ner_tags.append(current_tags) |
|
|
current_tokens = [] |
|
|
current_tags = [] |
|
|
else: |
|
|
token, tag = line.strip().split() |
|
|
current_tokens.append(token) |
|
|
current_tags.append(normalizar_etiqueta(tag)) |
|
|
|
|
|
return {"tokens": tokens, "ner_tags": ner_tags} |
|
|
|
|
|
|
|
|
parsed = {split: parse_conll_dataset(path) for split, path in paths.items()} |
|
|
|
|
|
|
|
|
all_labels = set(tag for split_data in parsed.values() for seq in split_data["ner_tags"] for tag in seq) |
|
|
label_list = sorted(all_labels) |
|
|
label2id = {label: i for i, label in enumerate(label_list)} |
|
|
id2label = {i: label for label, i in label2id.items()} |
|
|
|
|
|
def tag_ids(ner_tags): |
|
|
return [[label2id[tag] for tag in seq] for seq in ner_tags] |
|
|
|
|
|
dataset = DatasetDict({ |
|
|
split: Dataset.from_dict({ |
|
|
"tokens": parsed_data["tokens"], |
|
|
"ner_tags": tag_ids(parsed_data["ner_tags"]) |
|
|
}) |
|
|
for split, parsed_data in parsed.items() |
|
|
}) |
|
|
|
|
|
return dataset, label2id, id2label |
|
|
|
|
|
paths = { |
|
|
"train": "train_conll.txt", |
|
|
"validation": "dev_conll.txt", |
|
|
"test": "test_conll.txt" |
|
|
} |
|
|
|
|
|
dataset, label2id, id2label = cargar_y_preparar_conll(paths) |
|
|
|
|
|
|
|
|
from datasets import Features, Sequence, ClassLabel, Value |
|
|
|
|
|
|
|
|
ner_feature = Sequence(ClassLabel(names=list(label2id.keys()))) |
|
|
features = Features({ |
|
|
"tokens": Sequence(Value("string")), |
|
|
"ner_tags": ner_feature |
|
|
}) |
|
|
|
|
|
|
|
|
for split in dataset: |
|
|
dataset[split] = dataset[split].cast(features) |
|
|
|