SpanishMicroEvents / src /events /classic /v1_rules_only.py
martsola's picture
Initial dataset release for ACM MM 2026
77fb120 verified
import stanza
import ast
class ClassicExtractorV1:
def __init__(self):
self.rules = {
'who': ['nsubj', 'nsubj:pass', 'csubj', 'agent'],
'what_verb': ['root'],
'what_obj': ['obj', 'ccomp', 'xcomp'],
'circumstantial': ['obl', 'advmod'],
'why_markers': ['porque', 'debido', 'causa', 'pues']
}
def get_span_data(self, word, sent, ner_map, dbpedia_ents=None):
"""
Recupera texto, índices, URI de DBpedia Y TIPO DE ENTIDAD (Stanza).
"""
def get_descendants(head_id, words_list):
children = [w.id for w in words_list if w.head == head_id]
descendants = [head_id]
for child in children:
descendants.extend(get_descendants(child, words_list))
return descendants
subtree_ids = get_descendants(word.id, sent.words)
# Filtro de seguridad para tokens virtuales
subtree_words = [
sent.words[i-1] for i in subtree_ids
if sent.words[i-1].start_char is not None and sent.words[i-1].end_char is not None
]
if not subtree_words:
return {"span": "", "start": -1, "end": -1, "uri": None, "type": None}
start_char = min(w.start_char for w in subtree_words)
end_char = max(w.end_char for w in subtree_words)
subtree_words.sort(key=lambda w: w.id)
text_span = " ".join([w.text for w in subtree_words])
# 1. Recuperamos el TIPO de entidad (Stanza)
# Usamos el ID de la palabra 'cabeza' (word) para ver si tiene etiqueta NER
entity_type = ner_map.get(word.id, None)
# 2. Recuperamos la URI (DBpedia)
uri = None
if dbpedia_ents:
for ent in dbpedia_ents:
ent_text = ent.get('text', ent.get('surfaceForm', ''))
if ent_text and ent_text in text_span:
uri = ent.get('uri')
break
return {
"span": text_span,
"start": start_char,
"end": end_char,
"uri": uri,
"type": entity_type
}
def extract(self, sent, dbpedia_ents=None):
event = {
"who": [], "what": [], "when": [],
"where": [], "why": [], "how": []
}
# Construimos el mapa de NER completo para esta oración
ner_map = {}
for ent in sent.ents:
for word in sent.words:
if (word.start_char is not None and word.end_char is not None and
ent.start_char is not None and ent.end_char is not None):
if word.start_char >= ent.start_char and word.end_char <= ent.end_char:
ner_map[word.id] = ent.type
root_verb = None
for word in sent.words:
if word.start_char is None: continue
dep = word.deprel
# Pasamos ner_map a la función auxiliar
span_data = self.get_span_data(word, sent, ner_map, dbpedia_ents)
# WHO
if dep in self.rules['who']:
event["who"].append(span_data)
# WHAT
if dep == 'root':
root_verb = word
# El verbo no suele tener entidad, pero lo dejamos consistente
event["what"].append({
"span": word.text,
"start": word.start_char,
"end": word.end_char,
"uri": None,
"type": None
})
if dep in self.rules['what_obj'] and word.head == (root_verb.id if root_verb else -1):
event["what"].append(span_data)
# WHEN / WHERE / HOW
if dep in self.rules['circumstantial']:
entity_type = ner_map.get(word.id, "O") # Consultamos el mapa localmente para decidir
if entity_type in ['LOC', 'GPE']:
event["where"].append(span_data)
elif entity_type in ['DATE', 'TIME']:
event["when"].append(span_data)
else:
event["how"].append(span_data)
# WHY
if dep == 'mark' and word.text.lower() in self.rules['why_markers']:
head_id = word.head
if head_id > 0:
head_word = sent.words[head_id-1]
event["why"].append(self.get_span_data(head_word, sent, ner_map, dbpedia_ents))
# Rellenar vacíos
placeholder = {"span": "", "start": -1, "end": -1, "uri": None, "type": None}
for key in event:
if not event[key]:
event[key] = [placeholder]
return event