| import stanza |
|
|
| class ClassicExtractorV2: |
| def __init__(self): |
| self.rules = { |
| 'who': ['nsubj', 'nsubj:pass', 'csubj', 'agent'], |
| 'what_verb': ['root'], |
| 'what_obj': ['obj', 'ccomp', 'xcomp'], |
| 'circumstantial': ['obl', 'advmod'], |
| 'why_markers': ['porque', 'debido', 'causa', 'pues'] |
| } |
| |
| self.anaphoric_pronouns = { |
| 'él', 'ella', 'ellos', 'ellas', |
| 'este', 'esta', 'estos', 'estas', |
| 'ese', 'esa', 'esos', 'esas', |
| 'aquel', 'aquella', 'aquellos', 'aquellas', |
| 'lo', 'la', 'los', 'las', 'le', 'les', |
| 'quien', 'quienes' |
| } |
| |
| self.relative_pronouns = {'que', 'cual', 'cuales', 'cuyo', 'cuya', 'cuyos', 'cuyas'} |
| |
| |
| self.context_memory = [] |
|
|
| def reset_context(self): |
| self.context_memory = [] |
|
|
| def _infer_gender(self, word): |
| """Intenta inferir el género si Stanza no lo da.""" |
| feats = {} |
| if word.feats: |
| for pair in word.feats.split('|'): |
| if '=' in pair: |
| k, v = pair.split('=') |
| feats[k] = v |
| |
| gender = feats.get('Gender') |
| |
| if not gender and word.upos == 'PROPN': |
| if word.text.endswith('a') or word.text.endswith('as'): |
| gender = 'Fem' |
| elif word.text.endswith('o') or word.text.endswith('os'): |
| gender = 'Masc' |
| |
| return gender, feats.get('Number') |
|
|
| def _is_anaphoric_pronoun(self, word): |
| """ |
| Determina si un pronombre necesita resolución de correferencia. |
| Retorna True solo para pronombres personales/demostrativos anafóricos. |
| """ |
| if word.upos != 'PRON': |
| return False |
| |
| text_lower = word.text.lower() |
| |
| |
| if text_lower in self.relative_pronouns: |
| return False |
| |
| |
| if word.feats: |
| if 'PronType=Rel' in word.feats: |
| return False |
| |
| |
| return text_lower in self.anaphoric_pronouns |
|
|
| def get_span_data(self, word, sent, ner_map, dbpedia_ents=None): |
| def get_descendants(head_id, words_list): |
| children = [w.id for w in words_list if w.head == head_id] |
| descendants = [head_id] |
| for child in children: |
| descendants.extend(get_descendants(child, words_list)) |
| return descendants |
|
|
| subtree_ids = get_descendants(word.id, sent.words) |
| subtree_words = [ |
| sent.words[i-1] for i in subtree_ids |
| if sent.words[i-1].start_char is not None and sent.words[i-1].end_char is not None |
| ] |
| |
| if not subtree_words: |
| return {"span": "", "start": -1, "end": -1, "uri": None, "type": None} |
|
|
| start_char = min(w.start_char for w in subtree_words) |
| end_char = max(w.end_char for w in subtree_words) |
| subtree_words.sort(key=lambda w: w.id) |
| text_span = " ".join([w.text for w in subtree_words]) |
| entity_type = ner_map.get(word.id, None) |
|
|
| uri = None |
| if dbpedia_ents: |
| for ent in dbpedia_ents: |
| ent_text = ent.get('text', ent.get('surfaceForm', '')) |
| if ent_text and ent_text in text_span: |
| uri = ent.get('uri') |
| break |
|
|
| return { |
| "span": text_span, |
| "start": start_char, |
| "end": end_char, |
| "uri": uri, |
| "type": entity_type |
| } |
|
|
| def _update_memory(self, word, span_data): |
| """Añade un candidato a la memoria.""" |
| gender, number = self._infer_gender(word) |
| |
| candidate = { |
| 'gender': gender, |
| 'number': number, |
| 'data': span_data |
| } |
| |
| self.context_memory.append(candidate) |
| if len(self.context_memory) > 5: |
| self.context_memory.pop(0) |
|
|
| def _resolve_coreference(self, word): |
| """Busca antecedente compatible.""" |
| p_gender, p_number = self._infer_gender(word) |
|
|
| for candidate in reversed(self.context_memory): |
| c_gender = candidate['gender'] |
| c_number = candidate['number'] |
| |
| match_gender = True |
| if p_gender and c_gender: |
| match_gender = (p_gender == c_gender) |
| |
| match_number = True |
| if p_number and c_number: |
| match_number = (p_number == c_number) |
| |
| if match_gender and match_number: |
| return candidate['data'] |
| |
| return None |
|
|
| def extract(self, sent, dbpedia_ents=None): |
| event = { |
| "who": [], "what": [], "when": [], |
| "where": [], "why": [], "how": [] |
| } |
| |
| ner_map = {} |
| for ent in sent.ents: |
| for word in sent.words: |
| if (word.start_char is not None and word.end_char is not None and |
| ent.start_char is not None and ent.end_char is not None): |
| if word.start_char >= ent.start_char and word.end_char <= ent.end_char: |
| ner_map[word.id] = ent.type |
|
|
| root_verb = None |
| |
| for word in sent.words: |
| if word.start_char is None: continue |
| |
| dep = word.deprel |
| span_data = self.get_span_data(word, sent, ner_map, dbpedia_ents) |
| |
| |
| if dep in self.rules['who']: |
| |
| if self._is_anaphoric_pronoun(word): |
| antecedent = self._resolve_coreference(word) |
| if antecedent: |
| span_data['uri'] = antecedent['uri'] |
| span_data['type'] = antecedent['type'] |
| span_data['span'] = f"{span_data['span']} (Ref: {antecedent['span']})" |
| |
| elif word.upos in ['NOUN', 'PROPN']: |
| if len(span_data['span']) > 2: |
| self._update_memory(word, span_data) |
| |
| event["who"].append(span_data) |
| |
| |
| if dep == 'root': |
| root_verb = word |
| event["what"].append({ |
| "span": word.text, "start": word.start_char, "end": word.end_char, |
| "uri": None, "type": None |
| }) |
| |
| if dep in self.rules['what_obj'] and word.head == (root_verb.id if root_verb else -1): |
| |
| |
| |
| |
| if word.upos in ['NOUN', 'PROPN']: |
| self._update_memory(word, span_data) |
| |
| event["what"].append(span_data) |
|
|
| |
| if dep in self.rules['circumstantial']: |
| entity_type = ner_map.get(word.id, "O") |
| if entity_type in ['LOC', 'GPE']: event["where"].append(span_data) |
| elif entity_type in ['DATE', 'TIME']: event["when"].append(span_data) |
| else: event["how"].append(span_data) |
|
|
| if dep == 'mark' and word.text.lower() in self.rules['why_markers']: |
| head_id = word.head |
| if head_id > 0: |
| head_word = sent.words[head_id-1] |
| event["why"].append(self.get_span_data(head_word, sent, ner_map, dbpedia_ents)) |
|
|
| placeholder = {"span": "", "start": -1, "end": -1, "uri": None, "type": None} |
| for key in event: |
| if not event[key]: event[key] = [placeholder] |
| |
| return event |