svelte / example /backend /server.py
chengyanwu
Initial commit
e65da0b
from decouple import config
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pathlib import Path
from pydantic import BaseModel
import uvicorn
import json
import numpy as np
import torch
import faiss
import time
from transformers import AutoTokenizer, AutoModelForCausalLM
import voyageai
from typing import List, Dict, Any, Optional
# File paths from .env file
VOYAGE_API_KEY = config("VOYAGE_API_KEY")
EMBEDDINGS_FINAL = "/home/ianwu/projects/exampleSvelteRepo/example/backend/mmsci/paper_embeddings/embeddings_final.npy" # path to the embeddings file
PAPER_FINAL = "/home/ianwu/projects/exampleSvelteRepo/example/backend/mmsci/paper_embeddings/paper_final.json" # path to the papers file
PAPER_FINAL_PATH_PREFIX = "" # path to prefix of the paper files
# Initialize FastAPI
app = FastAPI(title="BioPACIFIC MIP Research Paper RAG API")
# Configure CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # In production, replace with your Svelte app's domain
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Load existing embeddings and papers
paper_embeddings = np.load(EMBEDDINGS_FINAL)
dimension = paper_embeddings.shape[1] # Get embedding dimension
# Initialize FAISS index
index = faiss.IndexFlatIP(dimension) # Use Inner Product similarity
index.add(paper_embeddings)
with open(PAPER_FINAL, "r") as f:
papers = json.load(f)
# Initialize models and tokenizer
model_name = "Qwen/Qwen3-8B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="cpu",
)
# Initialize VoyageAI client
voyage_client = voyageai.Client(api_key=VOYAGE_API_KEY)
class QueryRequest(BaseModel):
message: str
class PaperInfo(BaseModel):
title: str
abstract: str
similarity: str
link: str
class QueryResponse(BaseModel):
response: str
papers: List[PaperInfo]
def get_voyage_embedding(text, model="voyage-3-lite"):
"""Get embeddings for text using VoyageAI API with rate limiting and retry logic."""
max_retries = 3
retry_delay = 5
for attempt in range(max_retries):
try:
response = voyage_client.embed(
model=model,
texts=[text]
)
return np.array(response.embeddings[0])
except Exception as e:
if attempt == max_retries - 1: # Last attempt
print(f"Failed after {max_retries} attempts: {str(e)}")
return None
print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying in {retry_delay} seconds...")
time.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff
def find_relevant_papers(query_embedding, top_k=5):
# Perform similarity search using FAISS
scores, indices = index.search(query_embedding.reshape(1, -1), top_k)
# Get the papers and their similarity scores
relevant_papers = [papers[idx] for idx in indices[0]]
similarities = scores[0] # Scores are already cosine similarities
return relevant_papers, similarities
def format_papers_context(papers, similarities):
relevant_papers = []
context = "Based on the following relevant papers:\n\n"
for paper, similarity in zip(papers, similarities):
original_paper = json.load(open(Path(PAPER_FINAL_PATH_PREFIX, paper['path'])))
relevant_papers.append(original_paper)
context += f"Title: {original_paper['title']}\n\n"
context += f"Published Time: {original_paper['published_time']}\n\n"
context += f"Abstract: {original_paper['abstract']}\n\n"
context += "Question: "
# context += "Please use the information from the retrieved papers to answer the question. Do not reference or recommend papers that weren't retrieved.\n\n"
return context, relevant_papers
# Simple API health checker route.
@app.get("/")
async def hello():
return {"message": "hello"}
@app.post("/api/query", response_model=QueryResponse)
async def generate_response(request: QueryRequest):
try:
# Generate embedding for the query
query_embedding = get_voyage_embedding(request.message)
if query_embedding is None:
raise HTTPException(status_code=500, detail="Failed to generate embedding")
# Find relevant papers
relevant_papers, similarities = find_relevant_papers(query_embedding)
# Create context from relevant papers
context, paper_objects = format_papers_context(relevant_papers, similarities)
# Prepare the prompt with context
system_prompt = """You are a helpful assistant specializing in materials science. Follow these guidelines:
1. For general questions not requiring scientific papers:
- Answer directly using your knowledge
- No need to reference papers
2. For scientific research questions:
- If the question can be answered using the retrieved papers, answer using the retrieved papers
- If the question cannot be answered using the retrieved papers, say so
- Do not reference or recommend papers that weren't retrieved
3. Never generate or recommend papers that weren't retrieved in the context."""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": context + request.message}
]
# Use chat template if available, else fallback
if hasattr(tokenizer, "apply_chat_template"):
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
else:
# Fallback: simple concatenation
text = f"{system_prompt}\n\nUser: {context}{request.message}\n\nAssistant:"
# Generate response using model
inputs = tokenizer(text, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
inputs.input_ids,
max_new_tokens=16,
num_return_sequences=1,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
response = tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True)
# Format paper info for response (top 3)
paper_info = []
for i in range(min(3, len(paper_objects))):
paper_info.append(PaperInfo(
title=paper_objects[i]['title'],
abstract=paper_objects[i]['abstract'],
similarity=f"Similarity Score: {similarities[i]:.3f}",
link=paper_objects[i]['pdf_link']
))
# Ensure we have exactly 3 papers in the response
while len(paper_info) < 3:
paper_info.append(PaperInfo(
title="",
abstract="",
similarity="",
link=""
))
return QueryResponse(
response=response,
papers=paper_info
)
except Exception as e:
print(f"Error processing request: {str(e)}")
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
if __name__ == "__main__":
uvicorn.run("server:app", host="0.0.0.0", port=8000, reload=True)