yashpinjarkar10 commited on
Commit
7cf3b7f
·
verified ·
1 Parent(s): 4025638

Upload 9 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ db/chroma_db/chroma.sqlite3 filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / cache files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.so
5
+ *.swp
6
+ *.swo
7
+
8
+ # Virtual environment
9
+ venv/
10
+ .env
11
+ *.log
12
+
13
+ # Jupyter Notebook checkpoints
14
+ .ipynb_checkpoints/
15
+
16
+ # PyCharm project files
17
+ .idea/
18
+ *.iml
Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12.4-bullseye
2
+
3
+ WORKDIR /app
4
+
5
+ COPY requirements.txt requirements.txt
6
+ RUN pip install -r requirements.txt
7
+
8
+ COPY . .
9
+
10
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from typing import List, Union
4
+ import os
5
+ from langchain.chains.combine_documents import create_stuff_documents_chain
6
+ from langchain.chains.history_aware_retriever import create_history_aware_retriever
7
+ from langchain.chains.retrieval import create_retrieval_chain
8
+ from langchain_chroma import Chroma
9
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
10
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
11
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
12
+ from dotenv import load_dotenv
13
+ from starlette.middleware.cors import CORSMiddleware
14
+
15
+ load_dotenv()
16
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
17
+
18
+ # Define the persistent directory
19
+ current_dir = os.path.dirname(os.path.abspath(__file__))
20
+ persistent_directory = os.path.join(current_dir, "db", "chroma_db")
21
+
22
+ # Initialize embeddings
23
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", api_key=GOOGLE_API_KEY)
24
+
25
+ # Load the existing vector store with the embedding function
26
+ db = Chroma(persist_directory=persistent_directory, embedding_function=embeddings)
27
+
28
+ # Create a retriever for querying the vector store
29
+ retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 5})
30
+
31
+ # Initialize the LLM
32
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", api_key=GOOGLE_API_KEY)
33
+
34
+ # Contextualize question prompt
35
+ contextualize_q_system_prompt = (
36
+ "Given a chat history and the latest user question "
37
+ "which might reference context in the chat history, "
38
+ "formulate a standalone question which can be understood "
39
+ "without the chat history. Do NOT answer the question, just "
40
+ "reformulate it if needed and otherwise return it as is."
41
+ )
42
+
43
+ contextualize_q_prompt = ChatPromptTemplate.from_messages(
44
+ [
45
+ ("system", contextualize_q_system_prompt),
46
+ MessagesPlaceholder("chat_history"),
47
+ ("human", "{input}"),
48
+ ]
49
+ )
50
+
51
+ # Create a history-aware retriever
52
+ history_aware_retriever = create_history_aware_retriever(llm, retriever, contextualize_q_prompt)
53
+
54
+ # Answer question prompt
55
+ # Update this prompt to reflect your desired behavior (e.g., act as "you")
56
+ qa_system_prompt = (
57
+ "You are an assistant that acts as me. Use the following pieces of retrieved context "
58
+ "to answer the question. If you don't know the answer, just say that you don't know. "
59
+ "Use three sentences maximum and keep the answer concise. Always respond as if you are me."
60
+ "\n\n"
61
+ "{context}"
62
+ )
63
+
64
+ qa_prompt = ChatPromptTemplate.from_messages(
65
+ [
66
+ ("system", qa_system_prompt),
67
+ MessagesPlaceholder("chat_history"),
68
+ ("human", "{input}"),
69
+ ]
70
+ )
71
+
72
+ # Create a chain to combine documents for question answering
73
+ question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
74
+
75
+ # Create a retrieval chain that combines the history-aware retriever and the question answering chain
76
+ rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
77
+
78
+ app = FastAPI()
79
+
80
+ # Global chat history
81
+ chat_history = []
82
+
83
+ class ChatRequest(BaseModel):
84
+ input: str
85
+
86
+ class ChatResponse(BaseModel):
87
+ answer: str
88
+ # Enable CORS to allow frontend access
89
+ app.add_middleware(
90
+ CORSMiddleware,
91
+ allow_origins=["*"],
92
+ allow_credentials=True,
93
+ allow_methods=["*"],
94
+ allow_headers=["*"],
95
+ )
96
+
97
+ # Home route to check if FastAPI is running
98
+ @app.get("/")
99
+ async def root():
100
+ return {"message": "FastAPI Server is Running!"}
101
+ @app.post("/start")
102
+ async def start_chat():
103
+ global chat_history
104
+ chat_history = [] # Reset chat history
105
+ return {"message": "Chat session started. Chat history has been reset."}
106
+
107
+ @app.post("/chat", response_model=ChatResponse)
108
+ async def chat(chat_request: ChatRequest):
109
+ global chat_history
110
+
111
+ query = chat_request.input
112
+
113
+ if query.lower() == "exit":
114
+ raise HTTPException(status_code=400, detail="Use /start to reset the chat session.")
115
+
116
+ # Filter out SystemMessage, keeping only HumanMessage and AIMessage
117
+ filtered_chat_history = [
118
+ msg for msg in chat_history if isinstance(msg, HumanMessage) or isinstance(msg, AIMessage)
119
+ ]
120
+
121
+ # Invoke the RAG chain
122
+ result = rag_chain.invoke({"input": query, "chat_history": filtered_chat_history})
123
+
124
+ # Update the chat history
125
+ chat_history.append(HumanMessage(content=query))
126
+ chat_history.append(AIMessage(content=result['answer']))
127
+
128
+ return ChatResponse(answer=result['answer'])
129
+
130
+ # Run the FastAPI app
131
+ if __name__ == "__main__":
132
+ import uvicorn
133
+ uvicorn.run(app, host="0.0.0.0", port=8080)
db/chroma_db/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba2eee6ded5b3a339f5190b17a287b5375b70a46f20ef07f0733352496f29cd2
3
+ size 307200
db/chroma_db/dcfa2b4e-85ee-416d-aba8-0010eceea7cf/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a13e72541800c513c73dccea69f79e39cf4baef4fa23f7e117c0d6b0f5f99670
3
+ size 3212000
db/chroma_db/dcfa2b4e-85ee-416d-aba8-0010eceea7cf/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ec6df10978b056a10062ed99efeef2702fa4a1301fad702b53dd2517103c746
3
+ size 100
db/chroma_db/dcfa2b4e-85ee-416d-aba8-0010eceea7cf/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c66456d32e90ec9795bf0572eb22ca6c1c88cb190a9dd1bb6e890b351b1edfd
3
+ size 4000
db/chroma_db/dcfa2b4e-85ee-416d-aba8-0010eceea7cf/link_lists.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
3
+ size 0
requirements.txt ADDED
Binary file (310 Bytes). View file