| """
|
| Módulo de limpieza de datos.
|
|
|
| Normaliza tipos y estructura el contenido JSON.
|
| """
|
|
|
| import pandas as pd
|
| import json
|
| import logging
|
| from typing import Optional, Dict, List, Any
|
| import html
|
| import re
|
| import dateparser
|
| import multiprocessing as mp
|
| import numpy as np
|
| from src.data import cleaner
|
|
|
| logger = logging.getLogger(__name__)
|
|
|
| def _safe_json_parse(json_str: str) -> Optional[Dict[str, Any]]:
|
| """
|
| Parsea cadenas JSON controlando errores de decodificación.
|
| """
|
| if not isinstance(json_str, str):
|
| return None
|
| try:
|
| return json.loads(json_str)
|
| except (json.JSONDecodeError, TypeError):
|
| return None
|
|
|
| def run_initial_preprocessing(df: pd.DataFrame) -> pd.DataFrame:
|
| """
|
| Ejecuta la limpieza inicial y conversión de tipos.
|
|
|
| Transformaciones:
|
| 1. 'publication_date' a datetime.
|
| 2. 'id' a string.
|
| 3. Deserialización de 'json_data'.
|
| """
|
| logger.info("Iniciando preprocesamiento de tipos...")
|
|
|
| df_processed = df.copy()
|
|
|
|
|
| df_processed['publication_date'] = pd.to_datetime(
|
| df_processed['publication_date'],
|
| errors='coerce'
|
| )
|
|
|
|
|
| df_processed['id'] = df_processed['id'].astype(str)
|
|
|
|
|
| df_processed['json_data'] = df_processed['json_data'].apply(_safe_json_parse)
|
|
|
| return df_processed
|
|
|
| def _unescape_text(text: Any) -> Optional[str]:
|
| """
|
| Decodifica entidades HTML en cadenas de texto.
|
| """
|
| if isinstance(text, str):
|
| return html.unescape(text)
|
| return text
|
|
|
| def extract_and_clean_content(df: pd.DataFrame) -> pd.DataFrame:
|
| """
|
| Extrae campos del objeto JSON y normaliza el contenido textual.
|
|
|
| Proceso:
|
| 1. Proyección de 'headline', 'articleBody' y 'description'.
|
| 2. Decodificación de caracteres HTML.
|
| 3. Eliminación de registros con cuerpo de noticia nulo.
|
| """
|
| logger.info("Iniciando extracción y limpieza de campos textuales...")
|
|
|
| df_ext = df.copy()
|
|
|
|
|
| fields = ['headline', 'articleBody', 'description']
|
| for field in fields:
|
|
|
| df_ext[field] = df_ext['json_data'].apply(
|
| lambda x: x.get(field) if isinstance(x, dict) else None
|
| )
|
|
|
|
|
| for field in fields:
|
| df_ext[field] = df_ext[field].apply(_unescape_text)
|
|
|
|
|
| initial_count = len(df_ext)
|
| df_ext = df_ext.dropna(subset=['articleBody'])
|
| logger.info(f"Limpieza finalizada. Registros descartados por falta de cuerpo: {initial_count - len(df_ext)}")
|
|
|
| return df_ext
|
|
|
| logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
| DATE_PATTERN_RELATIVE = re.compile(
|
| r'\b(\d{1,4}[-/]\d{1,2}[-/]\d{1,4})|'
|
| r'(\d{1,2}\s+de\s+[a-zA-Z]+(\s+de\s+\d{4})?)|'
|
| r'(lunes|martes|miércoles|jueves|viernes|sábado|domingo)|'
|
| r'\b(hoy|ayer|(?:pasado )?mañana|anoche|ante(?:s de )?ayer|'
|
| r'hace\s+(?:un|una|\d+|\w+)\s+(?:días?|semanas?|mes(?:es)?|años?))\b',
|
| re.IGNORECASE
|
| )
|
|
|
| def _find_date_mentions(text: str) -> List[str]:
|
| """
|
| Identifica menciones de fechas potenciales mediante expresiones regulares.
|
| """
|
| if not isinstance(text, str):
|
| return []
|
| matches = DATE_PATTERN_RELATIVE.findall(text)
|
| return [item for sublist in matches for item in sublist if item]
|
|
|
| def _resolve_relative_date(mention: str, base_date: pd.Timestamp) -> Optional[str]:
|
| """
|
| Normaliza una mención temporal relativa utilizando una fecha de referencia.
|
| """
|
| settings = {'RELATIVE_BASE': base_date}
|
| dt = dateparser.parse(mention, languages=['es'], settings=settings)
|
|
|
| if dt:
|
|
|
| if abs((dt.date() - base_date.date()).days) < 365:
|
| return dt.strftime('%Y-%m-%d')
|
| return None
|
|
|
| def extract_temporal_expressions(df: pd.DataFrame, text_column: str = 'articleBody') -> pd.DataFrame:
|
| """
|
| Ejecuta el pipeline de extracción y normalización temporal.
|
|
|
| Proceso:
|
| 1. Detección de candidatos mediante patrones léxicos.
|
| 2. Resolución de referencias relativas usando 'publication_date'.
|
| 3. Normalización al estándar ISO 8601.
|
| """
|
| logger.info("Iniciando normalización de expresiones temporales...")
|
|
|
| df_temp = df.copy()
|
|
|
|
|
| df_temp['date_mentions'] = df_temp[text_column].apply(_find_date_mentions)
|
|
|
|
|
| def process_row(row):
|
| base_date = row['publication_date']
|
| mentions = row['date_mentions']
|
|
|
| if not mentions or pd.isnull(base_date):
|
| return []
|
|
|
| parsed = [_resolve_relative_date(m, base_date) for m in mentions]
|
|
|
| return list(set([p for p in parsed if p]))
|
|
|
| df_temp['all_extracted_dates'] = df_temp.apply(process_row, axis=1)
|
|
|
| logger.info("Finalizada la extracción de fechas.")
|
| return df_temp
|
|
|
|
|
| def _process_row_dates_internal(row):
|
| from src.data.cleaner import _find_date_mentions, _resolve_relative_date
|
| mentions = _find_date_mentions(row['articleBody'])
|
| base_date = row['publication_date']
|
|
|
| if not mentions or pd.isnull(base_date):
|
| return []
|
|
|
| parsed = [_resolve_relative_date(m, base_date) for m in mentions]
|
| return list(set([p for p in parsed if p]))
|
|
|
| def extract_temporal_expressions_parallel(df: pd.DataFrame, n_cores: int = None) -> pd.DataFrame:
|
| """
|
| Distribuye la extracción de fechas mediante un pool de procesos.
|
| """
|
| if n_cores is None:
|
| n_cores = mp.cpu_count()
|
|
|
| logger.info(f"Extracción paralela activa: {n_cores} núcleos.")
|
|
|
| df_copy = df.copy()
|
|
|
| df_copy['all_extracted_dates'] = df_copy.apply(_process_row_dates_internal, axis=1)
|
|
|
| return df_copy |