Mohamed284's picture
Update app.py
6142c70 verified
raw
history blame
8.85 kB
# Optimized RAG System with E5-Mistral Embeddings and Llama3-70B Generation
import json
import logging
import re
from typing import List, Tuple
import gradio as gr
from openai import OpenAI
from functools import lru_cache
from tenacity import retry, stop_after_attempt, wait_exponential
from langchain_community.retrievers import BM25Retriever
from langchain_community.vectorstores import FAISS
from langchain_core.embeddings import Embeddings
from langchain_core.documents import Document
from collections import defaultdict
import os
embedding_model = "e5-mistral-7b-instruct"
generation_model = "meta-llama-3-70b-instruct"
# --- Configuration ---
API_CONFIG = {
"api_key": os.getenv("API_KEY"),
"base_url": "https://chat-ai.academiccloud.de/v1"
}
CHUNK_SIZE = 800
OVERLAP = 200
# Initialize clients
client = OpenAI(**API_CONFIG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# --- Custom Embedding Handler ---
class MistralEmbeddings(Embeddings):
"""E5-Mistral-7B embedding adapter with error handling"""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
try:
response = client.embeddings.create(
input=texts,
model=embedding_model,
encoding_format="float"
)
return [e.embedding for e in response.data]
except Exception as e:
logger.error(f"Embedding Error: {str(e)}")
return [[] for _ in texts] # Return empty embeddings on failure
def embed_query(self, text: str) -> List[float]:
return self.embed_documents([text])[0]
# --- Data Processing ---
def load_and_chunk_data(file_path: str) -> List[Document]:
"""Enhanced chunking with metadata preservation"""
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
documents = []
for item in data:
base_content = f"""Source: {item['Source']}
Application: {item['Application']}
Functions: {', '.join(filter(None, [item.get('Function1'), item.get('Function2')]))}
Technical Concepts: {', '.join(item['technical_concepts'])}
Biological Mechanisms: {', '.join(item['biological_mechanisms'])}"""
strategy = item['Strategy']
for i in range(0, len(strategy), CHUNK_SIZE - OVERLAP):
chunk = strategy[i:i + CHUNK_SIZE]
documents.append(Document(
page_content=f"{base_content}\nStrategy Excerpt:\n{chunk}",
metadata={
"source": item["Source"],
"application": item["Application"],
"technical_concepts": item["technical_concepts"],
"sustainability_impacts": item["sustainability_impacts"],
"hyperlink": item["Hyperlink"],
"chunk_id": f"{item['Source']}-{len(documents)+1}"
}
))
return documents
# --- Hybrid Retrieval System ---
class EnhancedRetriever:
"""BM25 + E5-Mistral embeddings with fusion"""
def __init__(self, documents: List[Document]):
self.bm25 = BM25Retriever.from_documents(documents)
self.bm25.k = 5
self.vector_store = FAISS.from_documents(documents, MistralEmbeddings())
self.vector_retriever = self.vector_store.as_retriever(search_kwargs={"k": 3})
@lru_cache(maxsize=200)
def retrieve(self, query: str) -> str:
try:
processed_query = self._preprocess_query(query)
expanded_query = self._hyde_expansion(processed_query)
bm25_results = self.bm25.invoke(processed_query)
vector_results = self.vector_retriever.invoke(processed_query)
expanded_results = self.bm25.invoke(expanded_query)
fused_results = self._fuse_results([bm25_results, vector_results, expanded_results])
return self._format_context(fused_results[:5])
except Exception as e:
logger.error(f"Retrieval Error: {str(e)}")
return ""
def _preprocess_query(self, query: str) -> str:
return query.lower().strip()
def _hyde_expansion(self, query: str) -> str:
try:
response = client.chat.completions.create(
model=generation_model,
messages=[{
"role": "user",
"content": f"Generate a technical draft about biomimicry for: {query}\nInclude domain-specific terms."
}],
temperature=0.5,
max_tokens=200
)
return response.choices[0].message.content
except Exception as e:
logger.error(f"HyDE Error: {str(e)}")
return query
def _fuse_results(self, result_sets: List[List[Document]]) -> List[Document]:
fused_scores = defaultdict(float)
for docs in result_sets:
for rank, doc in enumerate(docs, 1):
fused_scores[doc.metadata["chunk_id"]] += 1 / (rank + 60)
seen = set()
return [
doc for doc in sorted(
(doc for docs in result_sets for doc in docs),
key=lambda x: fused_scores[x.metadata["chunk_id"]],
reverse=True
) if not (doc.metadata["chunk_id"] in seen or seen.add(doc.metadata["chunk_id"]))
]
def _format_context(self, docs: List[Document]) -> str:
context = []
for doc in docs:
context_str = f"""**Source**: {doc.metadata['source']}
**Application**: {doc.metadata['application']}
**Concepts**: {', '.join(doc.metadata['technical_concepts'])}
**Excerpt**: {doc.page_content.split('Strategy Excerpt:')[-1].strip()}
**Reference**: {doc.metadata['hyperlink']}"""
context.append(context_str)
return "\n\n---\n\n".join(context)
# --- Generation System ---
SYSTEM_PROMPT = """**Biomimicry Expert Guidelines**
1. Base answers strictly on context
2. Cite sources as [Source]
3. **Bold** technical terms
4. Include reference links
Context: {context}"""
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=20))
def get_ai_response(query: str, context: str) -> str:
try:
response = client.chat.completions.create(
model=generation_model,
messages=[
{"role": "system", "content": SYSTEM_PROMPT.format(context=context)},
{"role": "user", "content": f"Question: {query}\nProvide a detailed technical answer:"}
],
temperature=0.4,
max_tokens=600
)
return _postprocess_response(response.choices[0].message.content)
except Exception as e:
logger.error(f"Generation Error: {str(e)}")
return "I'm unable to generate a response right now. Please try again later."
def _postprocess_response(response: str) -> str:
response = re.sub(r"\[(.*?)\]", r"[\1](#)", response)
response = re.sub(r"\*\*([\w-]+)\*\*", r"**\1**", response)
return response
# --- Pipeline Integration ---
documents = load_and_chunk_data("mini_data_enhanced.json")
retriever = EnhancedRetriever(documents)
def generate_response(question: str) -> str:
try:
context = retriever.retrieve(question)
return get_ai_response(question, context) if context else "No relevant information found."
except Exception as e:
logger.error(f"Pipeline Error: {str(e)}")
return "An error occurred processing your request."
# --- Gradio Interface ---
def chat_interface(question: str, history: List[Tuple[str, str]]):
response = generate_response(question)
return "", history + [(question, response)]
with gr.Blocks(title="AskNature BioStrategy Interface", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🍃 AskNature BioDialogue """)
with gr.Row():
chatbot = gr.Chatbot(
label="Organism Strategy Dialogue",
height=500,
bubble_full_width=False,
avatar_images=("🧬", "🌐") # DNA + Earth icons
)
with gr.Row():
question = gr.Textbox(
placeholder="Explore biological strategiess (e.g., 'How are TISSIUM polymers inspired by skin and used in medicine?')...",
label="Biological Inquiry",
scale=4
)
clear_btn = gr.Button("🌱 New Exploration Thread", variant="secondary")
gr.Markdown("""
<div style="text-align: center; color: #4a7c59;">
<small>Powered by AskNature's Database |
Explore nature's blueprints at <a href="https://asknature.org">asknature.org</a></small>
</div>""")
question.submit(chat_interface, [question, chatbot], [question, chatbot])
clear_btn.click(lambda: [], None, chatbot)
if __name__ == "__main__":
demo.launch(show_error=True)