File size: 7,537 Bytes
f5effbe
923dd91
 
773cbfc
923dd91
 
 
 
 
 
 
 
 
 
 
f5effbe
 
e65da0b
 
 
f5effbe
923dd91
f5effbe
923dd91
 
 
 
 
 
 
 
 
 
 
f5effbe
923dd91
 
 
 
 
 
 
f5effbe
923dd91
 
 
2a9b2c7
 
 
 
923dd91
e65da0b
923dd91
 
 
f5effbe
923dd91
 
 
 
 
 
 
 
bc09fd8
923dd91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
773cbfc
923dd91
2a9b2c7
 
923dd91
bc09fd8
 
923dd91
 
ebeb985
 
 
 
 
bc09fd8
923dd91
 
 
 
 
 
 
 
 
 
 
 
 
2a9b2c7
923dd91
2a9b2c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97c16a4
2a9b2c7
 
923dd91
2a9b2c7
923dd91
e65da0b
923dd91
 
 
2a9b2c7
 
923dd91
2a9b2c7
 
923dd91
 
 
 
 
bc09fd8
 
923dd91
2a9b2c7
923dd91
 
 
 
 
bc09fd8
 
923dd91
 
 
 
 
 
 
 
 
 
 
 
171eb84
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
from decouple import config
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pathlib import Path
from pydantic import BaseModel
import uvicorn
import json
import numpy as np
import torch
import faiss
import time
from transformers import AutoTokenizer, AutoModelForCausalLM
import voyageai
from typing import List, Dict, Any, Optional

# File paths from .env file
VOYAGE_API_KEY = config("VOYAGE_API_KEY")
EMBEDDINGS_FINAL = "/home/ianwu/projects/exampleSvelteRepo/example/backend/mmsci/paper_embeddings/embeddings_final.npy" # path to the embeddings file
PAPER_FINAL = "/home/ianwu/projects/exampleSvelteRepo/example/backend/mmsci/paper_embeddings/paper_final.json" # path to the papers file
PAPER_FINAL_PATH_PREFIX = "" # path to prefix of the paper files

# Initialize FastAPI
app = FastAPI(title="BioPACIFIC MIP Research Paper RAG API")

# Configure CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # In production, replace with your Svelte app's domain
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Load existing embeddings and papers
paper_embeddings = np.load(EMBEDDINGS_FINAL)

dimension = paper_embeddings.shape[1]  # Get embedding dimension

# Initialize FAISS index
index = faiss.IndexFlatIP(dimension)  # Use Inner Product similarity
index.add(paper_embeddings)

with open(PAPER_FINAL, "r") as f:
    papers = json.load(f)

# Initialize models and tokenizer
model_name = "Qwen/Qwen3-8B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.float16,
    device_map="cpu",
)

# Initialize VoyageAI client
voyage_client = voyageai.Client(api_key=VOYAGE_API_KEY)

class QueryRequest(BaseModel):
    message: str

class PaperInfo(BaseModel):
    title: str
    abstract: str
    similarity: str
    link: str

class QueryResponse(BaseModel):
    response: str
    papers: List[PaperInfo]

def get_voyage_embedding(text, model="voyage-3-lite"):
    """Get embeddings for text using VoyageAI API with rate limiting and retry logic."""
    max_retries = 3
    retry_delay = 5
    
    for attempt in range(max_retries):
        try:
            response = voyage_client.embed(
                model=model,
                texts=[text]
            )
            return np.array(response.embeddings[0])
            
        except Exception as e:
            if attempt == max_retries - 1:  # Last attempt
                print(f"Failed after {max_retries} attempts: {str(e)}")
                return None
            print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying in {retry_delay} seconds...")
            time.sleep(retry_delay)
            retry_delay *= 2  # Exponential backoff

def find_relevant_papers(query_embedding, top_k=5):
    # Perform similarity search using FAISS
    scores, indices = index.search(query_embedding.reshape(1, -1), top_k)
    
    # Get the papers and their similarity scores
    relevant_papers = [papers[idx] for idx in indices[0]]
    similarities = scores[0]  # Scores are already cosine similarities
    
    return relevant_papers, similarities

def format_papers_context(papers, similarities):
    relevant_papers = []
    context = "Based on the following relevant papers:\n\n"
    for paper, similarity in zip(papers, similarities):
        original_paper = json.load(open(Path(PAPER_FINAL_PATH_PREFIX, paper['path'])))
        relevant_papers.append(original_paper)
        context += f"Title: {original_paper['title']}\n\n"
        context += f"Published Time: {original_paper['published_time']}\n\n"
        context += f"Abstract: {original_paper['abstract']}\n\n"
    context += "Question: "
    # context += "Please use the information from the retrieved papers to answer the question. Do not reference or recommend papers that weren't retrieved.\n\n"
    return context, relevant_papers

# Simple API health checker route.
@app.get("/")
async def hello():
    return {"message": "hello"}

@app.post("/api/query", response_model=QueryResponse)
async def generate_response(request: QueryRequest):
    try:
        # Generate embedding for the query
        query_embedding = get_voyage_embedding(request.message)
        
        if query_embedding is None:
            raise HTTPException(status_code=500, detail="Failed to generate embedding")
        
        # Find relevant papers
        relevant_papers, similarities = find_relevant_papers(query_embedding)

        # Create context from relevant papers
        context, paper_objects = format_papers_context(relevant_papers, similarities)

        # Prepare the prompt with context
        system_prompt = """You are a helpful assistant specializing in materials science. Follow these guidelines:

1. For general questions not requiring scientific papers:
   - Answer directly using your knowledge
   - No need to reference papers

2. For scientific research questions:
   - If the question can be answered using the retrieved papers, answer using the retrieved papers
   - If the question cannot be answered using the retrieved papers, say so
   - Do not reference or recommend papers that weren't retrieved

3. Never generate or recommend papers that weren't retrieved in the context."""

        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": context + request.message}
        ]
        # Use chat template if available, else fallback
        if hasattr(tokenizer, "apply_chat_template"):
            text = tokenizer.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=True,
                enable_thinking=False
            )
        else:
            # Fallback: simple concatenation
            text = f"{system_prompt}\n\nUser: {context}{request.message}\n\nAssistant:"
        # Generate response using model
        inputs = tokenizer(text, return_tensors="pt").to(model.device)
        with torch.no_grad():
            outputs = model.generate(
                inputs.input_ids,
                max_new_tokens=16,
                num_return_sequences=1,
                temperature=0.7,
                do_sample=True,
                pad_token_id=tokenizer.eos_token_id,
                eos_token_id=tokenizer.eos_token_id,
            )
        response = tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True)
        # Format paper info for response (top 3)
        paper_info = []
        for i in range(min(3, len(paper_objects))):
            paper_info.append(PaperInfo(
                title=paper_objects[i]['title'],
                abstract=paper_objects[i]['abstract'],
                similarity=f"Similarity Score: {similarities[i]:.3f}",
                link=paper_objects[i]['pdf_link']
            ))

        # Ensure we have exactly 3 papers in the response
        while len(paper_info) < 3:
            paper_info.append(PaperInfo(
                title="",
                abstract="",
                similarity="",
                link=""
            ))
            
        return QueryResponse(
            response=response,
            papers=paper_info
        )
    
    except Exception as e:
        print(f"Error processing request: {str(e)}")
        raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")

if __name__ == "__main__":
    uvicorn.run("server:app", host="0.0.0.0", port=8000, reload=True)