Update generation_script.py
Browse files- generation_script.py +15 -9
generation_script.py
CHANGED
|
@@ -8,7 +8,7 @@ import random
|
|
| 8 |
def contiene_b(frase):
|
| 9 |
return any(label.startswith("B-") for _, label in frase)
|
| 10 |
|
| 11 |
-
def procesar_training_set_balanceado(archivo_entrada, archivo_salida):
|
| 12 |
frases = defaultdict(list)
|
| 13 |
|
| 14 |
with open(archivo_entrada, encoding="utf-8") as f:
|
|
@@ -19,17 +19,23 @@ def procesar_training_set_balanceado(archivo_entrada, archivo_salida):
|
|
| 19 |
token, doc_id, _, _, label = partes
|
| 20 |
frases[doc_id].append((token, label))
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
|
| 25 |
# Queremos aproximadamente la mitad con B
|
| 26 |
-
n = min(len(
|
| 27 |
-
|
| 28 |
-
random.shuffle(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
|
|
|
| 30 |
with open(archivo_salida, "w", encoding="utf-8") as out:
|
| 31 |
-
for
|
| 32 |
-
for token, label in
|
| 33 |
out.write(f"{token} {label}\n")
|
| 34 |
out.write("\n")
|
| 35 |
|
|
@@ -83,7 +89,7 @@ def procesar_dev_test_balanceado(archivo_entrada, archivo_salida_dev, archivo_sa
|
|
| 83 |
escribir(test_ids, archivo_salida_test)
|
| 84 |
|
| 85 |
# 🛠️ Uso
|
| 86 |
-
procesar_training_set_balanceado("train_spacy.txt", "train_conll.txt")
|
| 87 |
procesar_dev_test_balanceado(
|
| 88 |
"valid_spacy.txt",
|
| 89 |
"dev_conll.txt",
|
|
|
|
| 8 |
def contiene_b(frase):
|
| 9 |
return any(label.startswith("B-") for _, label in frase)
|
| 10 |
|
| 11 |
+
def procesar_training_set_balanceado(archivo_entrada, archivo_salida, archivo_ids):
|
| 12 |
frases = defaultdict(list)
|
| 13 |
|
| 14 |
with open(archivo_entrada, encoding="utf-8") as f:
|
|
|
|
| 19 |
token, doc_id, _, _, label = partes
|
| 20 |
frases[doc_id].append((token, label))
|
| 21 |
|
| 22 |
+
con_b_ids = [id_ for id_, frase in frases.items() if contiene_b(frase)]
|
| 23 |
+
sin_b_ids = [id_ for id_, frase in frases.items() if not contiene_b(frase)]
|
| 24 |
|
| 25 |
# Queremos aproximadamente la mitad con B
|
| 26 |
+
n = min(len(con_b_ids), len(sin_b_ids))
|
| 27 |
+
selected_ids = con_b_ids[:n] + sin_b_ids[:n]
|
| 28 |
+
random.shuffle(selected_ids)
|
| 29 |
+
|
| 30 |
+
# Guardar los IDs seleccionados
|
| 31 |
+
with open(archivo_ids, "w") as f_ids:
|
| 32 |
+
for id_ in selected_ids:
|
| 33 |
+
f_ids.write(f"{id_}\n")
|
| 34 |
|
| 35 |
+
# Guardar el archivo con formato CoNLL
|
| 36 |
with open(archivo_salida, "w", encoding="utf-8") as out:
|
| 37 |
+
for id_ in selected_ids:
|
| 38 |
+
for token, label in frases[id_]:
|
| 39 |
out.write(f"{token} {label}\n")
|
| 40 |
out.write("\n")
|
| 41 |
|
|
|
|
| 89 |
escribir(test_ids, archivo_salida_test)
|
| 90 |
|
| 91 |
# 🛠️ Uso
|
| 92 |
+
procesar_training_set_balanceado("train_spacy.txt", "train_conll.txt","train_ids.txt")
|
| 93 |
procesar_dev_test_balanceado(
|
| 94 |
"valid_spacy.txt",
|
| 95 |
"dev_conll.txt",
|