Amna2024 commited on
Commit
8884697
·
verified ·
1 Parent(s): 247a38a

Delete api/main.py

Browse files
Files changed (1) hide show
  1. api/main.py +0 -50
api/main.py DELETED
@@ -1,50 +0,0 @@
1
- from fastapi import FastAPI, Depends, Body
2
- from typing import List, Dict
3
- from RAG.Retriever import Retriever, load_vector_store
4
- from RAG.llm import GeminiLLM
5
- import os
6
-
7
- app = FastAPI()
8
-
9
- # Retrieve API keys from environment variables
10
- userdata = {
11
- "GEMINI_API_KEY":os.getenv("GEMINI_API_KEY"),
12
- }
13
-
14
- GEMINI_KEY = userdata.get("GEMINI_API_KEY")
15
-
16
- PERSIST_DIR = os.path.join(PROJECT_ROOT, 'RAG/') # Make this configurable if needed
17
- v_store = load_vector_store(GEMINI_KEY, PERSIST_DIR)
18
- retriever = Retriever(v_store)
19
- gemini_llm = GeminiLLM(GEMINI_KEY)
20
-
21
-
22
- @app.post("/rag")
23
- async def rag_endpoint(query: str = Body(...)):
24
- # First retrieve relevant documents
25
- docs = retriever.retrieve_documents(query)
26
-
27
- # Create a clean message list with only role and content keys
28
- messages = [
29
- {
30
- "role": "user",
31
- "content": str(query)
32
- },
33
- {
34
- "role": "assistant",
35
- "content": f"Based on the retrieved documents: {str(docs)}, I will now answer your question."
36
- },
37
- {
38
- "role": "user",
39
- "content": "Please provide a clear and concise answer based on the above documents."
40
- }
41
- ]
42
-
43
- # Generate response using the formatted messages
44
- # formatted_messages = gemini_llm.format_messages(messages)
45
- response = gemini_llm.generate_response(messages)
46
-
47
- return {
48
- "query": query,
49
- "response": response
50
- }