SpanishMicroEvents / src /events /batch_processor.py
martsola's picture
Initial dataset release for ACM MM 2026
77fb120 verified
"""
Módulo para procesamiento por lotes de extracción de eventos 5W1H.
Incluye checkpoints, logging, batching para GPU y capacidad de retomar procesos.
"""
import json
import ast
import os
import logging
from typing import List, Dict, Any, Optional
from tqdm.auto import tqdm
from .classic import ClassicExtractorV3
logger = logging.getLogger(__name__)
def parse_dbpedia_entities(raw_entities) -> List[Dict]:
"""Parsea las entidades DBpedia de forma segura."""
if not raw_entities:
return []
if isinstance(raw_entities, list):
return raw_entities
if isinstance(raw_entities, str):
try:
return ast.literal_eval(raw_entities)
except (ValueError, SyntaxError):
return []
return []
def extract_events_from_doc(doc, extractor: ClassicExtractorV3, dbpedia_ents: List[Dict]) -> List[Dict]:
"""
Extrae eventos 5W1H de un documento Stanza ya procesado.
Procesa TODAS las oraciones del documento.
"""
events = []
for sent in doc.sentences:
event = extractor.extract(sent, dbpedia_ents=dbpedia_ents)
# Filtrar eventos vacíos
if event['who'][0]['start'] != -1 or event['what'][0]['start'] != -1:
events.append(event)
return events
def process_corpus_batched(
df,
nlp,
output_path: str,
text_column: str = 'articleBody',
entities_column: str = 'named_entities_dbpedia',
checkpoint_interval: int = 500,
resume_from: int = 0,
batch_size: int = 32
):
"""
Procesa corpus con BATCHING para máximo rendimiento en GPU.
Procesa el texto COMPLETO de cada noticia (sin límite de caracteres).
Args:
df: DataFrame con el corpus
nlp: Pipeline de Stanza
output_path: Ruta para guardar resultados
text_column: Nombre de columna con el texto
entities_column: Nombre de columna con entidades DBpedia
checkpoint_interval: Frecuencia de guardado automático
resume_from: Índice desde donde retomar
batch_size: Número de documentos por batch (32-64 recomendado para T4)
"""
import pandas as pd
results = ['[]'] * len(df)
# Cargar resultados previos si existen
if resume_from > 0 and os.path.exists(output_path):
df_prev = pd.read_csv(output_path)
for i in range(min(resume_from, len(df_prev))):
results[i] = df_prev.iloc[i].get('events_5w1h', '[]')
logger.info(f"Retomando desde índice {resume_from}")
total_docs = len(df) - resume_from
logger.info(f"Procesando {total_docs} documentos con batch_size={batch_size}")
processed = 0
errors = 0
# Procesar en batches
indices = list(range(resume_from, len(df)))
for batch_start in tqdm(range(0, len(indices), batch_size),
desc="Batches",
total=(len(indices) + batch_size - 1) // batch_size):
batch_indices = indices[batch_start:batch_start + batch_size]
batch_rows = [df.iloc[i] for i in batch_indices]
# Preparar textos del batch (sin límite de caracteres)
batch_texts = []
batch_entities = []
valid_indices = []
for idx, row in zip(batch_indices, batch_rows):
text = row.get(text_column, '')
if isinstance(text, str) and len(text) >= 20:
batch_texts.append(text)
batch_entities.append(parse_dbpedia_entities(row.get(entities_column)))
valid_indices.append(idx)
else:
results[idx] = '[]'
if not batch_texts:
continue
try:
# Procesamiento batch con Stanza
docs = nlp.bulk_process(batch_texts)
# Extraer eventos de cada documento
for doc, entities, idx in zip(docs, batch_entities, valid_indices):
try:
extractor = ClassicExtractorV3()
extractor.reset_context()
events = extract_events_from_doc(doc, extractor, entities)
results[idx] = json.dumps(events, ensure_ascii=False)
processed += 1
except Exception as e:
logger.error(f"Error extrayendo eventos doc {idx}: {e}")
results[idx] = '[]'
errors += 1
except Exception as e:
logger.error(f"Error en batch {batch_start}: {e}")
for idx in valid_indices:
results[idx] = '[]'
errors += 1
# Checkpoint
if processed > 0 and processed % checkpoint_interval == 0:
df_temp = df.copy()
df_temp['events_5w1h'] = results
df_temp.to_csv(output_path, index=False)
logger.info(f"Checkpoint: {processed} documentos procesados")
df['events_5w1h'] = results
logger.info(f"Completado: {processed} procesados, {errors} errores")
return df
# Mantener función original para compatibilidad
def process_corpus(
df,
nlp,
output_path: str,
text_column: str = 'articleBody',
entities_column: str = 'named_entities_dbpedia',
checkpoint_interval: int = 500,
resume_from: int = 0,
max_text_length: int = None # None = sin límite
):
"""
Procesa corpus documento a documento (versión simple).
Para mejor rendimiento GPU, usar process_corpus_batched.
"""
import pandas as pd
extractor = ClassicExtractorV3()
results = ['[]'] * len(df)
if resume_from > 0 and os.path.exists(output_path):
df_prev = pd.read_csv(output_path)
for i in range(min(resume_from, len(df_prev))):
results[i] = df_prev.iloc[i].get('events_5w1h', '[]')
logger.info(f"Retomando desde índice {resume_from}")
logger.info(f"Procesando {len(df) - resume_from} documentos...")
processed = 0
errors = 0
for index in tqdm(range(resume_from, len(df)), initial=resume_from, total=len(df)):
row = df.iloc[index]
try:
text = row.get(text_column, '')
if not isinstance(text, str) or len(text) < 20:
continue
# Aplicar límite solo si se especifica
if max_text_length:
text = text[:max_text_length]
dbpedia_ents = parse_dbpedia_entities(row.get(entities_column))
doc = nlp(text)
extractor.reset_context()
events = extract_events_from_doc(doc, extractor, dbpedia_ents)
results[index] = json.dumps(events, ensure_ascii=False)
processed += 1
if processed % checkpoint_interval == 0:
df_temp = df.copy()
df_temp['events_5w1h'] = results
df_temp.to_csv(output_path, index=False)
logger.info(f"Checkpoint: {processed} documentos procesados")
except Exception as e:
logger.error(f"Error en doc {index}: {e}")
results[index] = '[]'
errors += 1
df['events_5w1h'] = results
logger.info(f"Completado: {processed} procesados, {errors} errores")
return df