| """ |
| Módulo NER de alto rendimiento para el análisis exhaustivo de noticias. |
| Optimizado para maximizar el throughput de la GPU sin perder entidades. |
| """ |
|
|
| import stanza |
| import pandas as pd |
| import logging |
| from tqdm.auto import tqdm |
|
|
| logger = logging.getLogger(__name__) |
|
|
| class NerProcessor: |
| def __init__(self, lang: str = 'es', use_gpu: bool = True): |
| stanza.download(lang=lang, logging_level='WARN') |
| |
| self.nlp = stanza.Pipeline( |
| lang=lang, |
| processors='tokenize,ner', |
| use_gpu=use_gpu |
| ) |
|
|
| def process_dataframe(self, df: pd.DataFrame, column: str, batch_size: int = 16) -> pd.DataFrame: |
| """ |
| Procesa el dataframe utilizando la técnica de documentos por lotes. |
| """ |
| logger.info(f"Iniciando NER optimizado sobre: {column}") |
| |
| |
| |
| texts = [str(t) for t in df[column].fillna("")] |
| |
| all_entities = [] |
| |
| |
| for i in tqdm(range(0, len(texts), batch_size), desc="Analizando noticias"): |
| batch_texts = texts[i : i + batch_size] |
| |
| |
| out_docs = self.nlp(batch_texts) |
| |
| |
| for doc in out_docs: |
| entities = [ |
| { |
| 'text': ent.text, |
| 'type': ent.type, |
| 'start': ent.start_char, |
| 'end': ent.end_char |
| } |
| for ent in doc.ents |
| ] |
| all_entities.append(entities) |
| |
| df_result = df.copy() |
| df_result['named_entities'] = all_entities |
| return df_result |