stevafernandes commited on
Commit
3eb90d8
·
verified ·
1 Parent(s): 841adc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -34
app.py CHANGED
@@ -9,8 +9,6 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
9
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
10
  from langchain_community.vectorstores import FAISS
11
  from langchain_google_genai import ChatGoogleGenerativeAI
12
- from langchain.chains.question_answering import load_qa_chain
13
- from langchain.prompts import PromptTemplate
14
 
15
  # --- Get API key from Hugging Face Secrets ---
16
  GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "")
@@ -48,8 +46,22 @@ def get_vector_store(text_chunks, api_key):
48
  vector_store.save_local(FAISS_INDEX_PATH)
49
 
50
 
51
- def get_conversational_chain(api_key):
52
- prompt_template = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  You are a helpful assistant for Antimicrobial Pharmacology. You answer questions based ONLY on the context provided from the PDF documents.
54
 
55
  IMPORTANT RULES:
@@ -61,13 +73,13 @@ def get_conversational_chain(api_key):
61
  6. IMPORTANT: When referencing information from the course materials, always say "your professor says" or "according to your professor" instead of "the text states", "the document states", "the text says", or similar phrases. This makes the learning experience more personal and connected to the course.
62
 
63
  Chat History:
64
- {chat_history}
65
 
66
  Context from PDF:
67
  {context}
68
 
69
  Current Question:
70
- {question}
71
 
72
  Instructions:
73
  - If the user asks for a multiple choice question (MCQ), quiz, or test question:
@@ -87,34 +99,10 @@ def get_conversational_chain(api_key):
87
 
88
  Answer:
89
  """
90
- model = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0.3, google_api_key=api_key)
91
- prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question", "chat_history"])
92
- chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
93
- return chain
94
-
95
-
96
- def get_response(user_question, api_key, chat_history):
97
- """Get response from the AI model with chat history context"""
98
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=api_key)
99
- new_db = FAISS.load_local(FAISS_INDEX_PATH, embeddings, allow_dangerous_deserialization=True)
100
- docs = new_db.similarity_search(user_question, k=4)
101
-
102
- # Format chat history for context
103
- history_text = ""
104
- for msg in chat_history[-10:]: # Keep last 10 messages for context
105
- role = "User" if msg["role"] == "user" else "Assistant"
106
- history_text += f"{role}: {msg['content']}\n"
107
 
108
- chain = get_conversational_chain(api_key)
109
- response = chain(
110
- {
111
- "input_documents": docs,
112
- "question": user_question,
113
- "chat_history": history_text
114
- },
115
- return_only_outputs=True
116
- )
117
- return response["output_text"]
118
 
119
 
120
  def main():
@@ -291,4 +279,4 @@ def main():
291
 
292
 
293
  if __name__ == "__main__":
294
- main()
 
9
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
10
  from langchain_community.vectorstores import FAISS
11
  from langchain_google_genai import ChatGoogleGenerativeAI
 
 
12
 
13
  # --- Get API key from Hugging Face Secrets ---
14
  GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "")
 
46
  vector_store.save_local(FAISS_INDEX_PATH)
47
 
48
 
49
+ def get_response(user_question, api_key, chat_history):
50
+ """Get response from the AI model with chat history context"""
51
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=api_key)
52
+ new_db = FAISS.load_local(FAISS_INDEX_PATH, embeddings, allow_dangerous_deserialization=True)
53
+ docs = new_db.similarity_search(user_question, k=4)
54
+
55
+ # Format chat history for context
56
+ history_text = ""
57
+ for msg in chat_history[-10:]: # Keep last 10 messages for context
58
+ role = "User" if msg["role"] == "user" else "Assistant"
59
+ history_text += f"{role}: {msg['content']}\n"
60
+
61
+ # Combine document contents
62
+ context = "\n\n".join([doc.page_content for doc in docs])
63
+
64
+ prompt_template = f"""
65
  You are a helpful assistant for Antimicrobial Pharmacology. You answer questions based ONLY on the context provided from the PDF documents.
66
 
67
  IMPORTANT RULES:
 
73
  6. IMPORTANT: When referencing information from the course materials, always say "your professor says" or "according to your professor" instead of "the text states", "the document states", "the text says", or similar phrases. This makes the learning experience more personal and connected to the course.
74
 
75
  Chat History:
76
+ {history_text}
77
 
78
  Context from PDF:
79
  {context}
80
 
81
  Current Question:
82
+ {user_question}
83
 
84
  Instructions:
85
  - If the user asks for a multiple choice question (MCQ), quiz, or test question:
 
99
 
100
  Answer:
101
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
+ model = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0.3, google_api_key=api_key)
104
+ response = model.invoke(prompt_template)
105
+ return response.content
 
 
 
 
 
 
 
106
 
107
 
108
  def main():
 
279
 
280
 
281
  if __name__ == "__main__":
282
+ main()