luisgasco commited on
Commit
bb98a04
·
verified ·
1 Parent(s): e9e7ae3

Create generation_script.py

Browse files
Files changed (1) hide show
  1. generation_script.py +76 -0
generation_script.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Con el output del generation script de https://huggingface.co/datasets/luisgasco/profner_ner_master
2
+ # Y añadiendo los archivos valid.tsv y train.tsv de la task 1 del dataset de Profner
3
+
4
+ import pandas as pd
5
+ from collections import defaultdict
6
+ from datasets import Dataset, DatasetDict
7
+
8
+ # 1. Cargar TSVs de etiquetas
9
+ df_train_labels = pd.read_csv("/content/train.tsv", sep="\t")
10
+ df_valid_labels = pd.read_csv("/content/valid.tsv", sep="\t")
11
+
12
+ # Unificar etiquetas en un solo dict
13
+ labels_dict = dict(zip(df_train_labels["tweet_id"], df_train_labels["label"]))
14
+ labels_dict.update(dict(zip(df_valid_labels["tweet_id"], df_valid_labels["label"])))
15
+
16
+ # 2. Cargar IDs de cada split
17
+ def load_ids(path):
18
+ with open(path, encoding="utf-8") as f:
19
+ return set(line.strip() for line in f if line.strip())
20
+
21
+ train_ids = load_ids("/content/train_ids.txt")
22
+ dev_ids = load_ids("/content/dev_ids.txt")
23
+ test_ids = load_ids("/content/test_ids.txt")
24
+
25
+ labels_dict = {str(k): v for k, v in labels_dict.items()}
26
+ train_ids = set(str(id_) for id_ in train_ids)
27
+ dev_ids = set(str(id_) for id_ in dev_ids)
28
+ test_ids = set(str(id_) for id_ in test_ids)
29
+ # 3. Leer los archivos .spacy estilo CoNLL (train + valid juntos)
30
+ def cargar_textos_conll(path):
31
+ textos = defaultdict(list)
32
+ with open(path, encoding="utf-8") as f:
33
+ for line in f:
34
+ if line.strip():
35
+ parts = line.strip().split()
36
+ if len(parts) == 5:
37
+ token, doc_id, *_ = parts
38
+ textos[doc_id].append(token)
39
+ return textos
40
+
41
+ textos_train = cargar_textos_conll("/content/train_spacy.txt")
42
+ textos_valid = cargar_textos_conll("/content/valid_spacy.txt")
43
+ textos = {**textos_train, **textos_valid}
44
+
45
+ # 4. Construir datasets por split
46
+ def construir_split(ids):
47
+ data = []
48
+ for doc_id in ids:
49
+ if doc_id in textos and doc_id in labels_dict:
50
+ text = " ".join(textos[doc_id])
51
+ label = int(labels_dict[doc_id])
52
+ data.append({"tweet_id": doc_id, "text": text, "label": label})
53
+ return Dataset.from_list(data)
54
+
55
+ # 5. Crear DatasetDict
56
+ dataset = DatasetDict({
57
+ "train": construir_split(train_ids),
58
+ "validation": construir_split(dev_ids),
59
+ "test": construir_split(test_ids),
60
+ })
61
+
62
+ from datasets import ClassLabel, Features, Value
63
+
64
+ # Definir las etiquetas de texto
65
+ label_names = ["SIN_PROFESION", "CON_PROFESION"]
66
+
67
+ # Crear esquema con ClassLabel
68
+ features = Features({
69
+ "tweet_id": Value("string"),
70
+ "text": Value("string"),
71
+ "label": ClassLabel(names=label_names)
72
+ })
73
+
74
+ # Aplicar el esquema a cada división
75
+ for split in dataset:
76
+ dataset[split] = dataset[split].cast(features)