|
|
from decouple import config |
|
|
from fastapi import FastAPI, HTTPException |
|
|
from fastapi.middleware.cors import CORSMiddleware |
|
|
from pathlib import Path |
|
|
from pydantic import BaseModel |
|
|
import uvicorn |
|
|
import json |
|
|
import numpy as np |
|
|
import torch |
|
|
import faiss |
|
|
import time |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
import voyageai |
|
|
from typing import List, Dict, Any, Optional |
|
|
|
|
|
|
|
|
VOYAGE_API_KEY = config("VOYAGE_API_KEY") |
|
|
EMBEDDINGS_FINAL = "/home/ianwu/projects/exampleSvelteRepo/example/backend/mmsci/paper_embeddings/embeddings_final.npy" |
|
|
PAPER_FINAL = "/home/ianwu/projects/exampleSvelteRepo/example/backend/mmsci/paper_embeddings/paper_final.json" |
|
|
PAPER_FINAL_PATH_PREFIX = "" |
|
|
|
|
|
|
|
|
app = FastAPI(title="BioPACIFIC MIP Research Paper RAG API") |
|
|
|
|
|
|
|
|
app.add_middleware( |
|
|
CORSMiddleware, |
|
|
allow_origins=["*"], |
|
|
allow_credentials=True, |
|
|
allow_methods=["*"], |
|
|
allow_headers=["*"], |
|
|
) |
|
|
|
|
|
|
|
|
paper_embeddings = np.load(EMBEDDINGS_FINAL) |
|
|
|
|
|
dimension = paper_embeddings.shape[1] |
|
|
|
|
|
|
|
|
index = faiss.IndexFlatIP(dimension) |
|
|
index.add(paper_embeddings) |
|
|
|
|
|
with open(PAPER_FINAL, "r") as f: |
|
|
papers = json.load(f) |
|
|
|
|
|
|
|
|
model_name = "Qwen/Qwen3-8B" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_name, |
|
|
torch_dtype=torch.float16, |
|
|
device_map="cpu", |
|
|
) |
|
|
|
|
|
|
|
|
voyage_client = voyageai.Client(api_key=VOYAGE_API_KEY) |
|
|
|
|
|
class QueryRequest(BaseModel): |
|
|
message: str |
|
|
|
|
|
class PaperInfo(BaseModel): |
|
|
title: str |
|
|
abstract: str |
|
|
similarity: str |
|
|
link: str |
|
|
|
|
|
class QueryResponse(BaseModel): |
|
|
response: str |
|
|
papers: List[PaperInfo] |
|
|
|
|
|
def get_voyage_embedding(text, model="voyage-3-lite"): |
|
|
"""Get embeddings for text using VoyageAI API with rate limiting and retry logic.""" |
|
|
max_retries = 3 |
|
|
retry_delay = 5 |
|
|
|
|
|
for attempt in range(max_retries): |
|
|
try: |
|
|
response = voyage_client.embed( |
|
|
model=model, |
|
|
texts=[text] |
|
|
) |
|
|
return np.array(response.embeddings[0]) |
|
|
|
|
|
except Exception as e: |
|
|
if attempt == max_retries - 1: |
|
|
print(f"Failed after {max_retries} attempts: {str(e)}") |
|
|
return None |
|
|
print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying in {retry_delay} seconds...") |
|
|
time.sleep(retry_delay) |
|
|
retry_delay *= 2 |
|
|
|
|
|
def find_relevant_papers(query_embedding, top_k=5): |
|
|
|
|
|
scores, indices = index.search(query_embedding.reshape(1, -1), top_k) |
|
|
|
|
|
|
|
|
relevant_papers = [papers[idx] for idx in indices[0]] |
|
|
similarities = scores[0] |
|
|
|
|
|
return relevant_papers, similarities |
|
|
|
|
|
def format_papers_context(papers, similarities): |
|
|
relevant_papers = [] |
|
|
context = "Based on the following relevant papers:\n\n" |
|
|
for paper, similarity in zip(papers, similarities): |
|
|
original_paper = json.load(open(Path(PAPER_FINAL_PATH_PREFIX, paper['path']))) |
|
|
relevant_papers.append(original_paper) |
|
|
context += f"Title: {original_paper['title']}\n\n" |
|
|
context += f"Published Time: {original_paper['published_time']}\n\n" |
|
|
context += f"Abstract: {original_paper['abstract']}\n\n" |
|
|
context += "Question: " |
|
|
|
|
|
return context, relevant_papers |
|
|
|
|
|
|
|
|
@app.get("/") |
|
|
async def hello(): |
|
|
return {"message": "hello"} |
|
|
|
|
|
@app.post("/api/query", response_model=QueryResponse) |
|
|
async def generate_response(request: QueryRequest): |
|
|
try: |
|
|
|
|
|
query_embedding = get_voyage_embedding(request.message) |
|
|
|
|
|
if query_embedding is None: |
|
|
raise HTTPException(status_code=500, detail="Failed to generate embedding") |
|
|
|
|
|
|
|
|
relevant_papers, similarities = find_relevant_papers(query_embedding) |
|
|
|
|
|
|
|
|
context, paper_objects = format_papers_context(relevant_papers, similarities) |
|
|
|
|
|
|
|
|
system_prompt = """You are a helpful assistant specializing in materials science. Follow these guidelines: |
|
|
|
|
|
1. For general questions not requiring scientific papers: |
|
|
- Answer directly using your knowledge |
|
|
- No need to reference papers |
|
|
|
|
|
2. For scientific research questions: |
|
|
- If the question can be answered using the retrieved papers, answer using the retrieved papers |
|
|
- If the question cannot be answered using the retrieved papers, say so |
|
|
- Do not reference or recommend papers that weren't retrieved |
|
|
|
|
|
3. Never generate or recommend papers that weren't retrieved in the context.""" |
|
|
|
|
|
messages = [ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": context + request.message} |
|
|
] |
|
|
|
|
|
if hasattr(tokenizer, "apply_chat_template"): |
|
|
text = tokenizer.apply_chat_template( |
|
|
messages, |
|
|
tokenize=False, |
|
|
add_generation_prompt=True, |
|
|
enable_thinking=False |
|
|
) |
|
|
else: |
|
|
|
|
|
text = f"{system_prompt}\n\nUser: {context}{request.message}\n\nAssistant:" |
|
|
|
|
|
inputs = tokenizer(text, return_tensors="pt").to(model.device) |
|
|
with torch.no_grad(): |
|
|
outputs = model.generate( |
|
|
inputs.input_ids, |
|
|
max_new_tokens=16, |
|
|
num_return_sequences=1, |
|
|
temperature=0.7, |
|
|
do_sample=True, |
|
|
pad_token_id=tokenizer.eos_token_id, |
|
|
eos_token_id=tokenizer.eos_token_id, |
|
|
) |
|
|
response = tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True) |
|
|
|
|
|
paper_info = [] |
|
|
for i in range(min(3, len(paper_objects))): |
|
|
paper_info.append(PaperInfo( |
|
|
title=paper_objects[i]['title'], |
|
|
abstract=paper_objects[i]['abstract'], |
|
|
similarity=f"Similarity Score: {similarities[i]:.3f}", |
|
|
link=paper_objects[i]['pdf_link'] |
|
|
)) |
|
|
|
|
|
|
|
|
while len(paper_info) < 3: |
|
|
paper_info.append(PaperInfo( |
|
|
title="", |
|
|
abstract="", |
|
|
similarity="", |
|
|
link="" |
|
|
)) |
|
|
|
|
|
return QueryResponse( |
|
|
response=response, |
|
|
papers=paper_info |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error processing request: {str(e)}") |
|
|
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
uvicorn.run("server:app", host="0.0.0.0", port=8000, reload=True) |
|
|
|