File size: 2,654 Bytes
bb98a04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# Con el output del generation script de https://huggingface.co/datasets/luisgasco/profner_ner_master
# Y añadiendo los archivos valid.tsv y train.tsv de la task 1 del dataset de Profner

import pandas as pd
from collections import defaultdict
from datasets import Dataset, DatasetDict

# 1. Cargar TSVs de etiquetas
df_train_labels = pd.read_csv("/content/train.tsv", sep="\t")
df_valid_labels = pd.read_csv("/content/valid.tsv", sep="\t")

# Unificar etiquetas en un solo dict
labels_dict = dict(zip(df_train_labels["tweet_id"], df_train_labels["label"]))
labels_dict.update(dict(zip(df_valid_labels["tweet_id"], df_valid_labels["label"])))

# 2. Cargar IDs de cada split
def load_ids(path):
    with open(path, encoding="utf-8") as f:
        return set(line.strip() for line in f if line.strip())

train_ids = load_ids("/content/train_ids.txt")
dev_ids = load_ids("/content/dev_ids.txt")
test_ids = load_ids("/content/test_ids.txt")

labels_dict = {str(k): v for k, v in labels_dict.items()}
train_ids = set(str(id_) for id_ in train_ids)
dev_ids = set(str(id_) for id_ in dev_ids)
test_ids = set(str(id_) for id_ in test_ids)
# 3. Leer los archivos .spacy estilo CoNLL (train + valid juntos)
def cargar_textos_conll(path):
    textos = defaultdict(list)
    with open(path, encoding="utf-8") as f:
        for line in f:
            if line.strip():
                parts = line.strip().split()
                if len(parts) == 5:
                    token, doc_id, *_ = parts
                    textos[doc_id].append(token)
    return textos

textos_train = cargar_textos_conll("/content/train_spacy.txt")
textos_valid = cargar_textos_conll("/content/valid_spacy.txt")
textos = {**textos_train, **textos_valid}

# 4. Construir datasets por split
def construir_split(ids):
    data = []
    for doc_id in ids:
        if doc_id in textos and doc_id in labels_dict:
            text = " ".join(textos[doc_id])
            label = int(labels_dict[doc_id])
            data.append({"tweet_id": doc_id, "text": text, "label": label})
    return Dataset.from_list(data)

# 5. Crear DatasetDict
dataset = DatasetDict({
    "train": construir_split(train_ids),
    "validation": construir_split(dev_ids),
    "test": construir_split(test_ids),
})

from datasets import ClassLabel, Features, Value

# Definir las etiquetas de texto
label_names = ["SIN_PROFESION", "CON_PROFESION"]

# Crear esquema con ClassLabel
features = Features({
    "tweet_id": Value("string"),
    "text": Value("string"),
    "label": ClassLabel(names=label_names)
})

# Aplicar el esquema a cada división
for split in dataset:
    dataset[split] = dataset[split].cast(features)