File size: 5,991 Bytes
79f9cc8
 
 
 
 
 
 
 
 
 
87a8538
79f9cc8
 
 
 
 
 
 
 
 
 
87a8538
 
79f9cc8
 
87a8538
 
 
 
 
 
 
 
79f9cc8
87a8538
79f9cc8
87a8538
 
79f9cc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87a8538
79f9cc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
#This was generated using the train_spacy.txt and valida_spacy.txt files. 
# We have replace all the lbels by B-PROFESION or I-PROFESION 
# Validation and training set half of the sentences with labels and the other ones without them.

from collections import defaultdict
import random

def contiene_b(frase):
    return any(label.startswith("B-") for _, label in frase)

def procesar_training_set_balanceado(archivo_entrada, archivo_salida, archivo_ids):
    frases = defaultdict(list)

    with open(archivo_entrada, encoding="utf-8") as f:
        for linea in f:
            if linea.strip():
                partes = linea.strip().split()
                if len(partes) == 5:
                    token, doc_id, _, _, label = partes
                    frases[doc_id].append((token, label))

    con_b_ids = [id_ for id_, frase in frases.items() if contiene_b(frase)]
    sin_b_ids = [id_ for id_, frase in frases.items() if not contiene_b(frase)]

    # Queremos aproximadamente la mitad con B
    n = min(len(con_b_ids), len(sin_b_ids))
    selected_ids = con_b_ids[:n] + sin_b_ids[:n]
    random.shuffle(selected_ids)

    # Guardar los IDs seleccionados
    with open(archivo_ids, "w") as f_ids:
        for id_ in selected_ids:
            f_ids.write(f"{id_}\n")

    # Guardar el archivo con formato CoNLL
    with open(archivo_salida, "w", encoding="utf-8") as out:
        for id_ in selected_ids:
            for token, label in frases[id_]:
                out.write(f"{token} {label}\n")
            out.write("\n")

def procesar_dev_test_balanceado(archivo_entrada, archivo_salida_dev, archivo_salida_test, archivo_ids_dev, archivo_ids_test):
    frases = defaultdict(list)

    with open(archivo_entrada, encoding="utf-8") as f:
        for linea in f:
            if linea.strip():
                partes = linea.strip().split()
                if len(partes) == 5:
                    token, doc_id, _, _, label = partes
                    frases[doc_id].append((token, label))

    # Separar documentos con y sin B
    con_b_ids = [id_ for id_, frase in frases.items() if contiene_b(frase)]
    sin_b_ids = [id_ for id_, frase in frases.items() if not contiene_b(frase)]

    # Balancear la división
    random.shuffle(con_b_ids)
    mitad_b = len(con_b_ids) // 2
    dev_ids_b = con_b_ids[:mitad_b]
    test_ids_b = con_b_ids[mitad_b:]

    random.shuffle(sin_b_ids)
    mitad_sin = len(sin_b_ids) // 2
    dev_ids_sin = sin_b_ids[:mitad_sin]
    test_ids_sin = sin_b_ids[mitad_sin:]

    dev_ids = dev_ids_b + dev_ids_sin
    test_ids = test_ids_b + test_ids_sin
    random.shuffle(dev_ids)
    random.shuffle(test_ids)

    # Guardar los IDs
    with open(archivo_ids_dev, "w") as f_dev, open(archivo_ids_test, "w") as f_test:
        for id_ in dev_ids:
            f_dev.write(f"{id_}\n")
        for id_ in test_ids:
            f_test.write(f"{id_}\n")

    # Escribir los archivos
    def escribir(ids, archivo_salida):
        with open(archivo_salida, "w", encoding="utf-8") as out:
            for id_ in ids:
                for token, label in frases[id_]:
                    out.write(f"{token} {label}\n")
                out.write("\n")

    escribir(dev_ids, archivo_salida_dev)
    escribir(test_ids, archivo_salida_test)

# 🛠️ Uso
procesar_training_set_balanceado("train_spacy.txt", "train_conll.txt","train_ids.txt")
procesar_dev_test_balanceado(
    "valid_spacy.txt", 
    "dev_conll.txt", 
    "test_conll.txt", 
    "dev_ids.txt", 
    "test_ids.txt"
)


from datasets import load_dataset, ClassLabel, DatasetDict

from datasets import load_dataset, Dataset, DatasetDict
from collections import defaultdict

def normalizar_etiqueta(label):
    if label.startswith("B-"):
        return "B-PROFESION"
    elif label.startswith("I-"):
        return "I-PROFESION"
    return label

def cargar_y_preparar_conll(paths):
    def parse_conll_dataset(file_path):
        raw = load_dataset("text", data_files=file_path)["train"]

        tokens = []
        ner_tags = []
        current_tokens = []
        current_tags = []

        for example in raw:
            line = example["text"]
            if not line.strip():
                if current_tokens:
                    tokens.append(current_tokens)
                    ner_tags.append(current_tags)
                    current_tokens = []
                    current_tags = []
            else:
                token, tag = line.strip().split()
                current_tokens.append(token)
                current_tags.append(normalizar_etiqueta(tag))

        return {"tokens": tokens, "ner_tags": ner_tags}

    # Cargar y procesar cada división
    parsed = {split: parse_conll_dataset(path) for split, path in paths.items()}

    # Generar label2id
    all_labels = set(tag for split_data in parsed.values() for seq in split_data["ner_tags"] for tag in seq)
    label_list = sorted(all_labels)
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for label, i in label2id.items()}

    def tag_ids(ner_tags):
        return [[label2id[tag] for tag in seq] for seq in ner_tags]

    dataset = DatasetDict({
        split: Dataset.from_dict({
            "tokens": parsed_data["tokens"],
            "ner_tags": tag_ids(parsed_data["ner_tags"])
        })
        for split, parsed_data in parsed.items()
    })

    return dataset, label2id, id2label

paths = {
    "train": "train_conll.txt",
    "validation": "dev_conll.txt",
    "test": "test_conll.txt"
}

dataset, label2id, id2label = cargar_y_preparar_conll(paths)


from datasets import Features, Sequence, ClassLabel, Value

# Asumimos que label_list ya está definido (por ejemplo: ['B-PROFESION', 'I-PROFESION', 'O'])
ner_feature = Sequence(ClassLabel(names=list(label2id.keys())))
features = Features({
    "tokens": Sequence(Value("string")),
    "ner_tags": ner_feature
})

# Aplicar a cada split del dataset
for split in dataset:
    dataset[split] = dataset[split].cast(features)