Transformers
Italian
English
semantic-search
explainable-ai
faiss
ai-ethics
responsible-ai
llm
prompt-engineering
multimodal-ai
ai-transparency
ethical-intelligence
explainable-llm
cognitive-ai
ethical-ai
scientific-retrieval
modular-ai
memory-augmented-llm
trustworthy-ai
reasoning-engine
ai-alignment
next-gen-llm
thinking-machines
open-source-ai
explainability
ai-research
semantic audit
cognitive agent
human-centered-ai
File size: 1,173 Bytes
64dfc65 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
# © 2025 Elena Marziali — Code released under Apache 2.0 license.
# See LICENSE in the repository for details.
# Removal of this copyright is prohibited.
# Function to retrieve similar responses
def retrieve_context(question, top_k=2):
""" Searches for similar responses in FAISS memory. """
emb_question = embedding_model.encode([question])
_, indices = index.search(np.array(emb_question, dtype=np.float32), top_k)
return [f"Previous response {i+1}" for i in indices[0]] if indices[0][0] != -1 else []
# **Usage example**
add_to_memory("What is general relativity?", "General relativity is Einstein's theory of gravity.")
similar_responses = retrieve_context("Can you explain relativity?")
print("Related responses:", similar_responses)
# Retrieve multi-turn context
def retrieve_multiturn_context(question, top_k=5):
""" Searches for related previous responses to build a broader context. """
emb_question = embedding_model.encode([question])
_, indices = index.search(np.array(emb_question, dtype=np.float32), top_k)
context = [f"Previous turn {i+1}" for i in indices[0] if i != -1]
return " ".join(context) if context else "" |