| """ |
| Módulo de procesamiento masivo para el enlazado semántico con DBpedia. |
| Gestiona la ejecución concurrente y el flujo de limpieza de entidades. |
| """ |
|
|
| import concurrent.futures |
| from tqdm.auto import tqdm |
| import pandas as pd |
| import logging |
| from src.nlp.dbpedia_linker import DBpediaLinker |
| from src.utils.nlp_utils import filter_entities_with_context |
|
|
| logger = logging.getLogger(__name__) |
|
|
| class DBpediaProcessor: |
| def __init__(self, language='es', max_workers=5): |
| """ |
| Inicializa el procesador con el linker y la configuración de hilos. |
| """ |
| self.linker = DBpediaLinker(language=language) |
| self.max_workers = max_workers |
|
|
| def run_full_pipeline(self, df: pd.DataFrame, text_column: str) -> pd.DataFrame: |
| """ |
| Ejecuta el ciclo completo: extracción concurrente y filtrado ontológico. |
| """ |
| df_result = df.copy() |
| texts = df_result[text_column].tolist() |
| |
| |
| print(f"Iniciando extracción concurrente ({self.max_workers} hilos)...") |
| with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor: |
| |
| tarea_extraccion = lambda t: self.linker.annotate_text(t, confidence=0.6) |
| |
| resultados_brutos = list(tqdm( |
| executor.map(tarea_extraccion, texts), |
| total=len(texts), |
| desc="Llamadas API DBpedia" |
| )) |
| |
| df_result['dbpedia_entities_raw'] = resultados_brutos |
|
|
| |
| print("Aplicando criterios de validación contextual y ontológica...") |
| |
| |
| df_result['named_entities_dbpedia'] = df_result.apply( |
| lambda row: filter_entities_with_context(row['dbpedia_entities_raw'], row), |
| axis=1 |
| ) |
| |
| |
| df_result = df_result.drop(columns=['dbpedia_entities_raw']) |
| |
| return df_result |