luisgasco commited on
Commit
79f9cc8
·
verified ·
1 Parent(s): ebea1fd

Create generation_script.py

Browse files
Files changed (1) hide show
  1. generation_script.py +174 -0
generation_script.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #This was generated using the train_spacy.txt and valida_spacy.txt files.
2
+ # We have replace all the lbels by B-PROFESION or I-PROFESION
3
+ # Validation and training set half of the sentences with labels and the other ones without them.
4
+
5
+ from collections import defaultdict
6
+ import random
7
+
8
+ def contiene_b(frase):
9
+ return any(label.startswith("B-") for _, label in frase)
10
+
11
+ def procesar_training_set_balanceado(archivo_entrada, archivo_salida):
12
+ frases = defaultdict(list)
13
+
14
+ with open(archivo_entrada, encoding="utf-8") as f:
15
+ for linea in f:
16
+ if linea.strip():
17
+ partes = linea.strip().split()
18
+ if len(partes) == 5:
19
+ token, doc_id, _, _, label = partes
20
+ frases[doc_id].append((token, label))
21
+
22
+ con_b = [f for f in frases.values() if contiene_b(f)]
23
+ sin_b = [f for f in frases.values() if not contiene_b(f)]
24
+
25
+ # Queremos aproximadamente la mitad con B
26
+ n = min(len(con_b), len(sin_b))
27
+ seleccionadas = con_b[:n] + sin_b[:n]
28
+ random.shuffle(seleccionadas)
29
+
30
+ with open(archivo_salida, "w", encoding="utf-8") as out:
31
+ for frase in seleccionadas:
32
+ for token, label in frase:
33
+ out.write(f"{token} {label}\n")
34
+ out.write("\n")
35
+
36
+ def procesar_dev_test_balanceado(archivo_entrada, archivo_salida_dev, archivo_salida_test, archivo_ids_dev, archivo_ids_test):
37
+ frases = defaultdict(list)
38
+
39
+ with open(archivo_entrada, encoding="utf-8") as f:
40
+ for linea in f:
41
+ if linea.strip():
42
+ partes = linea.strip().split()
43
+ if len(partes) == 5:
44
+ token, doc_id, _, _, label = partes
45
+ frases[doc_id].append((token, label))
46
+
47
+ # Separar documentos con y sin B
48
+ con_b_ids = [id_ for id_, frase in frases.items() if contiene_b(frase)]
49
+ sin_b_ids = [id_ for id_, frase in frases.items() if not contiene_b(frase)]
50
+
51
+ # Balancear la división
52
+ random.shuffle(con_b_ids)
53
+ mitad_b = len(con_b_ids) // 2
54
+ dev_ids_b = con_b_ids[:mitad_b]
55
+ test_ids_b = con_b_ids[mitad_b:]
56
+
57
+ random.shuffle(sin_b_ids)
58
+ mitad_sin = len(sin_b_ids) // 2
59
+ dev_ids_sin = sin_b_ids[:mitad_sin]
60
+ test_ids_sin = sin_b_ids[mitad_sin:]
61
+
62
+ dev_ids = dev_ids_b + dev_ids_sin
63
+ test_ids = test_ids_b + test_ids_sin
64
+ random.shuffle(dev_ids)
65
+ random.shuffle(test_ids)
66
+
67
+ # Guardar los IDs
68
+ with open(archivo_ids_dev, "w") as f_dev, open(archivo_ids_test, "w") as f_test:
69
+ for id_ in dev_ids:
70
+ f_dev.write(f"{id_}\n")
71
+ for id_ in test_ids:
72
+ f_test.write(f"{id_}\n")
73
+
74
+ # Escribir los archivos
75
+ def escribir(ids, archivo_salida):
76
+ with open(archivo_salida, "w", encoding="utf-8") as out:
77
+ for id_ in ids:
78
+ for token, label in frases[id_]:
79
+ out.write(f"{token} {label}\n")
80
+ out.write("\n")
81
+
82
+ escribir(dev_ids, archivo_salida_dev)
83
+ escribir(test_ids, archivo_salida_test)
84
+
85
+ # 🛠️ Uso
86
+ procesar_training_set_balanceado("train_spacy.txt", "train_conll.txt")
87
+ procesar_dev_test_balanceado(
88
+ "valid_spacy.txt",
89
+ "dev_conll.txt",
90
+ "test_conll.txt",
91
+ "dev_ids.txt",
92
+ "test_ids.txt"
93
+ )
94
+
95
+
96
+ from datasets import load_dataset, ClassLabel, DatasetDict
97
+
98
+ from datasets import load_dataset, Dataset, DatasetDict
99
+ from collections import defaultdict
100
+
101
+ def normalizar_etiqueta(label):
102
+ if label.startswith("B-"):
103
+ return "B-PROFESION"
104
+ elif label.startswith("I-"):
105
+ return "I-PROFESION"
106
+ return label
107
+
108
+ def cargar_y_preparar_conll(paths):
109
+ def parse_conll_dataset(file_path):
110
+ raw = load_dataset("text", data_files=file_path)["train"]
111
+
112
+ tokens = []
113
+ ner_tags = []
114
+ current_tokens = []
115
+ current_tags = []
116
+
117
+ for example in raw:
118
+ line = example["text"]
119
+ if not line.strip():
120
+ if current_tokens:
121
+ tokens.append(current_tokens)
122
+ ner_tags.append(current_tags)
123
+ current_tokens = []
124
+ current_tags = []
125
+ else:
126
+ token, tag = line.strip().split()
127
+ current_tokens.append(token)
128
+ current_tags.append(normalizar_etiqueta(tag))
129
+
130
+ return {"tokens": tokens, "ner_tags": ner_tags}
131
+
132
+ # Cargar y procesar cada división
133
+ parsed = {split: parse_conll_dataset(path) for split, path in paths.items()}
134
+
135
+ # Generar label2id
136
+ all_labels = set(tag for split_data in parsed.values() for seq in split_data["ner_tags"] for tag in seq)
137
+ label_list = sorted(all_labels)
138
+ label2id = {label: i for i, label in enumerate(label_list)}
139
+ id2label = {i: label for label, i in label2id.items()}
140
+
141
+ def tag_ids(ner_tags):
142
+ return [[label2id[tag] for tag in seq] for seq in ner_tags]
143
+
144
+ dataset = DatasetDict({
145
+ split: Dataset.from_dict({
146
+ "tokens": parsed_data["tokens"],
147
+ "ner_tags": tag_ids(parsed_data["ner_tags"])
148
+ })
149
+ for split, parsed_data in parsed.items()
150
+ })
151
+
152
+ return dataset, label2id, id2label
153
+
154
+ paths = {
155
+ "train": "train_conll.txt",
156
+ "validation": "dev_conll.txt",
157
+ "test": "test_conll.txt"
158
+ }
159
+
160
+ dataset, label2id, id2label = cargar_y_preparar_conll(paths)
161
+
162
+
163
+ from datasets import Features, Sequence, ClassLabel, Value
164
+
165
+ # Asumimos que label_list ya está definido (por ejemplo: ['B-PROFESION', 'I-PROFESION', 'O'])
166
+ ner_feature = Sequence(ClassLabel(names=list(label2id.keys())))
167
+ features = Features({
168
+ "tokens": Sequence(Value("string")),
169
+ "ner_tags": ner_feature
170
+ })
171
+
172
+ # Aplicar a cada split del dataset
173
+ for split in dataset:
174
+ dataset[split] = dataset[split].cast(features)