Spaces:
Sleeping
Sleeping
File size: 1,733 Bytes
852dd8d 029c5e8 852dd8d 029c5e8 852dd8d 029c5e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import spacy
from spacy.tokens import Doc
import coreferee
# Load spaCy model
nlp = spacy.load('en_core_web_sm')
nlp.add_pipe("coreferee")
# Register the custom extension attribute
Doc.set_extension('resolved_text', default=None, force=True)
def resolve_coreferences(query_text, conversation_history):
"""
Resolve coreferences in the given text using spaCy and coreferee.
Args:
query_text (str): The current query to resolve
conversation_history (list): List of dictionaries containing previous conversation turns
Returns:
str: Text with resolved coreferences
"""
# Combine conversation history and current query
combined_text = []
for turn in conversation_history:
combined_text.append(f"User: {turn['user']}")
combined_text.append(f"Bot: {turn['Bot']}")
combined_text.append(f"User: {query_text}")
text = "\n".join(combined_text)
# Process the text
doc = nlp(text)
# Get all tokens and their potential antecedents
resolved_tokens = list(doc)
# Resolve coreferences
for chain in doc._.coref_chains:
for mention in chain:
if mention.root_index != chain.most_specific.root_index:
# Replace mention with its antecedent
resolved_tokens[mention.root_index] = doc[chain.most_specific.root_index]
# Reconstruct the text with resolved references
resolved_text = "".join([token.text_with_ws if isinstance(token, spacy.tokens.Token)
else token.text + " " for token in resolved_tokens])
# Extract the resolved query (last line)
resolved_query = resolved_text.split('\n')[-1].replace("User: ", "").strip()
return resolved_query |