| import stanza |
| import re |
| from src.data.cleaner import _find_date_mentions |
|
|
| class ClassicExtractorV3: |
| """ |
| Extractor de eventos 5W1H - Versión 3. |
| |
| Mejoras sobre V2: |
| - WHO: Sintagma nominal limpio (corta subordinadas: acl, relcl, advcl) |
| - WHEN: Búsqueda de patrones temporales en la oración |
| - WHERE: Búsqueda de LOC en toda la oración (no solo circunstanciales) |
| - Sin fallback de fechas (se resuelve en Neo4j con COALESCE) |
| - Sin propagación de fechas entre oraciones |
| """ |
| |
| def __init__(self): |
| self.rules = { |
| 'who': ['nsubj', 'nsubj:pass', 'csubj', 'agent'], |
| 'what_verb': ['root'], |
| 'what_obj': ['obj', 'ccomp', 'xcomp'], |
| 'circumstantial': ['obl', 'advmod'], |
| 'why_markers': ['porque', 'debido', 'causa', 'pues'] |
| } |
| |
| |
| |
| self.mode_adverbs_whitelist = { |
| |
| 'rápidamente', 'lentamente', 'violentamente', 'pacíficamente', |
| 'bruscamente', 'suavemente', 'silenciosamente', 'públicamente', |
| 'secretamente', 'abiertamente', 'directamente', 'indirectamente', |
| |
| 'completamente', 'parcialmente', 'totalmente', 'absolutamente', |
| 'drásticamente', 'gradualmente', 'masivamente', 'unilateralmente', |
| 'conjuntamente', 'separadamente', 'individualmente', 'colectivamente', |
| |
| 'duramente', 'severamente', 'firmemente', 'categóricamente', |
| 'tajantemente', 'rotundamente', 'enérgicamente', 'contundentemente' |
| } |
| |
| |
| self.adverbs_blacklist = { |
| 'también', 'además', 'incluso', 'solo', 'sólo', 'solamente', |
| 'prácticamente', 'realmente', 'actualmente', 'finalmente', |
| 'inicialmente', 'posteriormente', 'anteriormente', 'recientemente', |
| 'simplemente', 'obviamente', 'claramente', 'evidentemente', |
| 'probablemente', 'posiblemente', 'seguramente', 'ciertamente', |
| 'siempre', 'nunca', 'jamás', 'todavía', 'aún', 'ya', |
| 'quizás', 'quizá', 'acaso', 'así', 'bien', 'mal', |
| 'no', 'sí', 'pero', 'sin embargo', 'aunque', 'mientras' |
| } |
| |
| |
| |
| self.causal_markers = { |
| |
| 'porque', 'pues', 'ya que', 'puesto que', 'dado que', 'como', |
| |
| 'debido a', 'a causa de', 'por culpa de', 'gracias a', |
| 'en virtud de', 'a raíz de', 'por razón de', 'por motivo de', |
| |
| 'causa', 'razón', 'motivo', 'consecuencia' |
| } |
| |
| |
| self.causal_prepositions = {'por', 'ante', 'tras'} |
| |
| |
| self.temporal_nouns = {'mañana', 'tarde', 'noche', 'madrugada', 'mediodía', 'día', 'hora', 'momento', 'instante'} |
| |
| |
| self.prune_deps = {'acl', 'acl:relcl', 'relcl', 'advcl', 'ccomp', 'xcomp', 'parataxis'} |
| |
| |
| self.keep_deps = {'det', 'amod', 'compound', 'flat', 'flat:name', 'nmod', 'nummod', 'appos'} |
| |
| |
| self.temporal_patterns = { |
| 'ayer', 'hoy', 'mañana', 'anteayer', 'anoche', 'ahora', |
| 'lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado', 'domingo', |
| 'enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', |
| 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre', 'diciembre', |
| 'semana', 'mes', 'año', 'década', 'siglo' |
| } |
| |
| |
| self.date_regex = re.compile(r'\b\d{1,2}[-/]\d{1,2}[-/]\d{2,4}\b|\b\d{4}\b') |
| |
| |
| self.anaphoric_pronouns = { |
| 'él', 'ella', 'ellos', 'ellas', |
| 'este', 'esta', 'estos', 'estas', |
| 'ese', 'esa', 'esos', 'esas', |
| 'aquel', 'aquella', 'aquellos', 'aquellas', |
| 'lo', 'la', 'los', 'las', 'le', 'les', |
| 'quien', 'quienes' |
| } |
| |
| self.relative_pronouns = {'que', 'cual', 'cuales', 'cuyo', 'cuya', 'cuyos', 'cuyas'} |
| self.context_memory = [] |
|
|
| def reset_context(self): |
| self.context_memory = [] |
|
|
| def _infer_gender(self, word): |
| """Intenta inferir el género si Stanza no lo da.""" |
| feats = {} |
| if word.feats: |
| for pair in word.feats.split('|'): |
| if '=' in pair: |
| k, v = pair.split('=') |
| feats[k] = v |
| |
| gender = feats.get('Gender') |
| |
| if not gender and word.upos == 'PROPN': |
| if word.text.endswith('a') or word.text.endswith('as'): |
| gender = 'Fem' |
| elif word.text.endswith('o') or word.text.endswith('os'): |
| gender = 'Masc' |
| |
| return gender, feats.get('Number') |
|
|
| def _is_anaphoric_pronoun(self, word): |
| """Determina si un pronombre necesita resolución de correferencia.""" |
| if word.upos != 'PRON': |
| return False |
| |
| text_lower = word.text.lower() |
| |
| if text_lower in self.relative_pronouns: |
| return False |
| |
| if word.feats and 'PronType=Rel' in word.feats: |
| return False |
| |
| return text_lower in self.anaphoric_pronouns |
|
|
| def _get_clean_noun_chunk(self, word, sent, ner_map, dbpedia_ents=None): |
| """ |
| V3: Obtiene el sintagma nominal LIMPIO. |
| Mantiene: det, amod, compound, flat, nmod cortos. |
| Poda: acl, relcl, advcl (subordinadas). |
| """ |
| def get_pruned_descendants(head_id, words_list, depth=0): |
| """Recursión con poda de subordinadas.""" |
| descendants = [head_id] |
| |
| for w in words_list: |
| if w.head == head_id: |
| |
| if w.deprel in self.prune_deps: |
| continue |
| |
| if w.deprel == 'nmod' and depth > 1: |
| continue |
| descendants.extend(get_pruned_descendants(w.id, words_list, depth + 1)) |
| |
| return descendants |
|
|
| subtree_ids = get_pruned_descendants(word.id, sent.words) |
| subtree_words = [ |
| sent.words[i-1] for i in subtree_ids |
| if sent.words[i-1].start_char is not None |
| ] |
| |
| if not subtree_words: |
| return {"span": "", "start": -1, "end": -1, "uri": None, "type": None} |
|
|
| subtree_words.sort(key=lambda w: w.id) |
| start_char = min(w.start_char for w in subtree_words) |
| end_char = max(w.end_char for w in subtree_words) |
| text_span = " ".join([w.text for w in subtree_words]) |
| entity_type = ner_map.get(word.id, None) |
|
|
| |
| uri = None |
| if dbpedia_ents: |
| for ent in dbpedia_ents: |
| ent_text = ent.get('text', ent.get('surfaceForm', '')) |
| if ent_text and ent_text in text_span: |
| uri = ent.get('uri') |
| break |
|
|
| return { |
| "span": text_span, |
| "start": start_char, |
| "end": end_char, |
| "uri": uri, |
| "type": entity_type |
| } |
|
|
| def _extract_temporal_from_sentence(self, sent): |
| """ |
| V3: Busca expresiones temporales en la oración. |
| Reutiliza _find_date_mentions de cleaner.py para capturar expresiones completas. |
| Deduplica spans solapados (mantiene el más largo). |
| """ |
| temporal_spans = [] |
| sent_text = sent.text |
| |
| |
| date_mentions = _find_date_mentions(sent_text) |
| |
| for mention in date_mentions: |
| mention = mention.strip() |
| if not mention: |
| continue |
| |
| idx = sent_text.lower().find(mention.lower()) |
| if idx != -1: |
| temporal_spans.append({ |
| "span": mention, |
| "start": idx, |
| "end": idx + len(mention), |
| "uri": None, |
| "type": "TEMPORAL" |
| }) |
| |
| |
| if len(temporal_spans) > 1: |
| temporal_spans.sort(key=lambda x: x['start']) |
| deduped = [] |
| for span in temporal_spans: |
| |
| if deduped and span['start'] < deduped[-1]['end']: |
| if len(span['span']) > len(deduped[-1]['span']): |
| deduped[-1] = span |
| else: |
| deduped.append(span) |
| temporal_spans = deduped |
| |
| return temporal_spans |
|
|
| def _extract_locations_from_sentence(self, sent, ner_map, dbpedia_ents=None): |
| """ |
| V3: Busca TODAS las entidades LOC en la oración. |
| No solo las que están en posición circunstancial. |
| """ |
| locations = [] |
| |
| for ent in sent.ents: |
| if ent.type in ['LOC', 'GPE']: |
| uri = None |
| if dbpedia_ents: |
| for db_ent in dbpedia_ents: |
| ent_text = db_ent.get('text', db_ent.get('surfaceForm', '')) |
| if ent_text and ent_text in ent.text: |
| uri = db_ent.get('uri') |
| break |
| |
| locations.append({ |
| "span": ent.text, |
| "start": ent.start_char, |
| "end": ent.end_char, |
| "uri": uri, |
| "type": ent.type |
| }) |
| |
| return locations |
|
|
| def _update_memory(self, word, span_data): |
| """Añade un candidato a la memoria de correferencia.""" |
| gender, number = self._infer_gender(word) |
| |
| candidate = { |
| 'gender': gender, |
| 'number': number, |
| 'data': span_data |
| } |
| |
| self.context_memory.append(candidate) |
| if len(self.context_memory) > 5: |
| self.context_memory.pop(0) |
|
|
| def _resolve_coreference(self, word): |
| """Busca antecedente compatible por género y número.""" |
| p_gender, p_number = self._infer_gender(word) |
|
|
| for candidate in reversed(self.context_memory): |
| c_gender = candidate['gender'] |
| c_number = candidate['number'] |
| |
| match_gender = (not p_gender or not c_gender or p_gender == c_gender) |
| match_number = (not p_number or not c_number or p_number == c_number) |
| |
| if match_gender and match_number: |
| return candidate['data'] |
| |
| return None |
|
|
| def extract(self, sent, dbpedia_ents=None): |
| """ |
| Extrae evento 5W1H de una oración. |
| V3: Mejoras en WHO (noun chunk limpio), WHEN (temporal), WHERE (búsqueda global). |
| """ |
| event = { |
| "who": [], "what": [], "when": [], |
| "where": [], "why": [], "how": [] |
| } |
| |
| |
| ner_map = {} |
| for ent in sent.ents: |
| for word in sent.words: |
| if (word.start_char is not None and word.end_char is not None and |
| ent.start_char is not None and ent.end_char is not None): |
| if word.start_char >= ent.start_char and word.end_char <= ent.end_char: |
| ner_map[word.id] = ent.type |
|
|
| |
| temporal_spans = self._extract_temporal_from_sentence(sent) |
| if temporal_spans: |
| event["when"].extend(temporal_spans) |
| |
| |
| location_spans = self._extract_locations_from_sentence(sent, ner_map, dbpedia_ents) |
| if location_spans: |
| event["where"].extend(location_spans) |
|
|
| root_verb = None |
| |
| for word in sent.words: |
| if word.start_char is None: |
| continue |
| |
| dep = word.deprel |
| |
| |
| if dep in self.rules['who']: |
| span_data = self._get_clean_noun_chunk(word, sent, ner_map, dbpedia_ents) |
| |
| if self._is_anaphoric_pronoun(word): |
| antecedent = self._resolve_coreference(word) |
| if antecedent: |
| span_data['uri'] = antecedent['uri'] |
| span_data['type'] = antecedent['type'] |
| span_data['span'] = f"{span_data['span']} (Ref: {antecedent['span']})" |
| |
| elif word.upos in ['NOUN', 'PROPN']: |
| if len(span_data['span']) > 2: |
| self._update_memory(word, span_data) |
| |
| event["who"].append(span_data) |
| |
| |
| if dep == 'root': |
| root_verb = word |
| event["what"].append({ |
| "span": word.text, |
| "start": word.start_char, |
| "end": word.end_char, |
| "uri": None, |
| "type": None |
| }) |
| |
| |
| if dep in self.rules['what_obj'] and word.head == (root_verb.id if root_verb else -1): |
| span_data = self._get_clean_noun_chunk(word, sent, ner_map, dbpedia_ents) |
| if word.upos in ['NOUN', 'PROPN']: |
| self._update_memory(word, span_data) |
| event["what"].append(span_data) |
|
|
| |
| if dep in self.rules['circumstantial']: |
| entity_type = ner_map.get(word.id, "O") |
| |
| if entity_type not in ['LOC', 'GPE', 'DATE', 'TIME']: |
| word_lower = word.text.lower() |
| |
| is_valid_how = ( |
| word_lower in self.mode_adverbs_whitelist or |
| (word_lower.endswith('mente') and word_lower not in self.adverbs_blacklist) |
| ) |
| |
| if is_valid_how and word_lower not in self.adverbs_blacklist: |
| event["how"].append({ |
| "span": word.text, |
| "start": word.start_char, |
| "end": word.end_char, |
| "uri": None, |
| "type": "MANNER" |
| }) |
|
|
| |
| |
| if dep == 'mark' and word.text.lower() in self.rules['why_markers']: |
| head_id = word.head |
| if head_id > 0: |
| head_word = sent.words[head_id-1] |
| span_data = self._get_clean_noun_chunk(head_word, sent, ner_map, dbpedia_ents) |
| span_data['type'] = 'CAUSAL_CLAUSE' |
| event["why"].append(span_data) |
| |
| |
| if dep == 'case' and word.text.lower() in self.causal_prepositions: |
| head_id = word.head |
| if head_id > 0: |
| head_word = sent.words[head_id-1] |
| |
| |
| if head_word.upos in ['NOUN', 'PROPN']: |
| if head_word.text.lower() not in self.temporal_nouns: |
| span_data = self._get_clean_noun_chunk(head_word, sent, ner_map, dbpedia_ents) |
| span_data['type'] = 'CAUSAL_PP' |
| event["why"].append(span_data) |
|
|
| |
| placeholder = {"span": "", "start": -1, "end": -1, "uri": None, "type": None} |
| for key in event: |
| if not event[key]: |
| event[key] = [placeholder] |
| |
| return event |