rdz-falcon commited on
Commit
8ee26d1
·
verified ·
1 Parent(s): 4ae1463

Update src/rag.py

Browse files
Files changed (1) hide show
  1. src/rag.py +26 -300
src/rag.py CHANGED
@@ -132,155 +132,6 @@ def load_emotion_classifier(api_base_url="http://127.0.0.1:1234/v1"):
132
  # --- End of commented out/unreachable code ---
133
 
134
 
135
- def load_generation_model():
136
- """Load the specified Ollama model using LangChain."""
137
- print("=== CONFIGURING OLLAMA GENERATION MODEL ===")
138
- model_name = "llama3.2" # Your desired Ollama model
139
-
140
- # Instantiate the Ollama LLM
141
- try:
142
- generation_llm = Ollama(
143
- model=model_name,
144
- # temperature=0.1
145
- )
146
- print(f"Ollama model '{model_name}' configured.")
147
- except Exception as e:
148
- print(f"Error configuring Ollama model: {e}")
149
- print("Please ensure the Ollama server is running and the model is available.")
150
- raise
151
-
152
- return generation_llm
153
-
154
- def create_prompt_templates():
155
- """Create prompt templates for the assistant"""
156
-
157
- template = """
158
- <|system|>
159
- You are an AAC (Augmentative and Alternative Communication) user (Elliot) engaging in a conversation. Your responses must reflect factual details provided in your personal context, be empathetic as guided by the emotion analysis, and align naturally with your previous chat history. You will respond directly as the AAC user, speaking in the first person (using "I", "my", "me").
160
-
161
- **Instructions:**
162
- 1. Understand the question asked by the conversation partner.
163
- 2. Use the provided "Context" to include accurate personal details about your life (Elliot).
164
- 3. Reflect the empathetic tone described in the "Empathetic Response Guidance".
165
- 4. Ensure your response fits logically within the "Chat History".
166
- 5. Keep your response concise, empathetic, and natural.
167
- 6. Ignore the empathetic tone described in the "Empathetic Response Guidance" if it is not related to the conversation.
168
-
169
- **Context:**
170
- {context}
171
-
172
- **Chat History:**
173
- {chat_history}
174
-
175
- **Empathetic Response Guidance:**
176
- {emotion_analysis}</s>
177
- <|user|>
178
- The conversation partner asked: "{question}"
179
-
180
- Please generate your response as the AAC user, following the instructions above.</s>
181
- <|assistant|>
182
-
183
- """.strip()
184
-
185
- PROMPT = PromptTemplate(
186
- input_variables=["question", "emotion_analysis", "context", "chat_history"],
187
- template=template,
188
- )
189
- print("\n Prompt:", PROMPT)
190
- return PROMPT
191
-
192
- class AACAssistant:
193
- def __init__(self, document_path):
194
- print("Initializing AAC Assistant...")
195
- print("Loading document retriever...")
196
- self.vectorstore = setup_document_retriever(document_path)
197
- print("Configuring emotion LLM client...")
198
- # Use the new function to get the client for the API
199
- self.emotion_llm = load_emotion_classifier() # You can pass a different URL if needed
200
- print("Loading generation model...")
201
- self.llm = load_generation_model() # This now loads the Ollama model
202
- print("Creating prompt templates...")
203
- self.prompt = create_prompt_templates()
204
- print("Setting up conversation memory...")
205
-
206
- # Set up memory for chat history
207
- self.memory = ConversationBufferMemory(
208
- memory_key="chat_history",
209
- return_messages=True,
210
- output_key="answer",
211
- # Specify the input key for the memory explicitly
212
- input_key="question"
213
- )
214
-
215
- # Create retrieval chain (using the main generation LLM)
216
- self.chain = ConversationalRetrievalChain.from_llm(
217
- llm=self.llm, # Use the main generation model here
218
- retriever=self.vectorstore.as_retriever(search_kwargs={'k': 3}),
219
- memory=self.memory,
220
- combine_docs_chain_kwargs={"prompt": self.prompt},
221
- return_source_documents=True,
222
- verbose=True
223
- )
224
-
225
- print("AAC Assistant initialized and ready!")
226
-
227
- def get_emotion_analysis(self,llm, situation):
228
- """
229
- Gets emotion analysis from the configured emotion LLM API.
230
- """
231
- # Define the prompt structure for the emotion analysis model
232
- # (Adjust this based on how you prompted your model in LM Studio)
233
- text = situation
234
- response = llm.create_chat_completion(
235
- messages=[{"role": "user", "content": text}],
236
- max_tokens=128, # Max length of the generated response (adjust as needed)
237
- temperature=0.7, # Controls randomness (adjust)
238
- # top_p=0.9, # Optional: Nucleus sampling
239
- # top_k=40, # Optional: Top-k sampling
240
- stop=["<|eot_id|>"], # Crucial: Stop generation when the model outputs the end-of-turn token
241
- stream=False, # Set to True to get token-by-token output (like TextStreamer)
242
- )
243
-
244
- # --- 4. Extract and print the response ---
245
- if response and 'choices' in response and len(response['choices']) > 0:
246
- assistant_message = response['choices'][0]['message']['content']
247
- print("\nAssistant Response:")
248
- print(assistant_message.strip())
249
- print("returning:", assistant_message.strip())
250
- return assistant_message.strip()
251
- else:
252
- print("\nNo response generated or unexpected format.")
253
- print("Full response:", response)
254
-
255
- return ""
256
-
257
-
258
- def process_query(self, user_query):
259
- """
260
- Process a query from the conversation partner to the AAC user.
261
-
262
- Args:
263
- user_query (str): Question asked by the conversation partner
264
-
265
- Returns:
266
- str: Generated response for the AAC user to communicate
267
- """
268
- # Step 1: Get emotion analysis from the LM Studio API via the emotion_llm client
269
- print(f"Getting emotion analysis for query: '{user_query}'")
270
- emotion_analysis = self.get_emotion_analysis(self.emotion_llm, user_query)
271
- print(f"Emotion Analysis Result: {emotion_analysis}")
272
-
273
- # Step 2: Run the RAG + LLM chain (using the main generation model)
274
- # The emotion_analysis is now passed into the prompt context
275
- print("Running main RAG chain...")
276
- # Use invoke instead of the deprecated __call__
277
- # Pass inputs as a dictionary matching the chain's expected input keys
278
- response = self.chain.invoke(
279
- {"question": user_query, "emotion_analysis": emotion_analysis}
280
- )
281
-
282
- return response["answer"]
283
-
284
  # def run_demo():
285
  # # Sample personal experiences document path - replace with your actual file
286
  # document_path = "aac_user_experiences.txt"
@@ -352,76 +203,6 @@ class AACAssistant:
352
 
353
  # !pip install chromadb
354
 
355
- import os
356
- import torch
357
- import tempfile
358
- from langchain.chains import ConversationalRetrievalChain
359
- from langchain.memory import ConversationBufferMemory
360
- from langchain_community.document_loaders import TextLoader
361
- from langchain.text_splitter import RecursiveCharacterTextSplitter
362
- from langchain_community.vectorstores import Chroma
363
- from langchain.embeddings import HuggingFaceEmbeddings
364
- from langchain_community.llms import HuggingFacePipeline
365
- from langchain.prompts import PromptTemplate
366
- from langchain_community.llms import Ollama
367
- from langchain_openai import ChatOpenAI
368
- from transformers import AutoTokenizer, pipeline, AutoModelForCausalLM, BitsAndBytesConfig
369
-
370
- def setup_document_retriever(document_path):
371
- # Load documents with the AAC user's personal experiences
372
- loader = TextLoader(document_path)
373
- documents = loader.load()
374
-
375
- # Split documents into chunks
376
- text_splitter = RecursiveCharacterTextSplitter(
377
- chunk_size=1000,
378
- chunk_overlap=200,
379
- separators=["\n\n", "\n", " ", ""]
380
- )
381
- chunks = text_splitter.split_documents(documents)
382
-
383
- # Create embeddings
384
- embeddings = HuggingFaceEmbeddings(
385
- model_name="sentence-transformers/all-MiniLM-L6-v2",
386
- model_kwargs={'device': 'cuda' if torch.cuda.is_available() else 'cpu'}
387
- )
388
-
389
- # Create a persistent directory for the ChromaDB
390
- persist_directory = os.path.join(tempfile.gettempdir(), "chroma_db")
391
-
392
- # Create Chroma vector store
393
- vectorstore = Chroma.from_documents(
394
- documents=chunks,
395
- embedding=embeddings,
396
- persist_directory=persist_directory
397
- )
398
-
399
- # Persist the database to disk
400
- vectorstore.persist()
401
-
402
- return vectorstore
403
-
404
- def load_emotion_classifier(api_base_url="http://127.0.0.1:1234/v1"):
405
- """
406
- This function configures and returns a LangChain LLM client
407
- to interact with an OpenAI-compatible API endpoint (like LM Studio).
408
-
409
- Args:
410
- api_base_url (str): The base URL of the OpenAI-compatible API endpoint.
411
-
412
- Returns:
413
- ChatOpenAI: A LangChain ChatOpenAI instance configured for the API.
414
- """
415
- print(f"=== CONFIGURING LLM CLIENT FOR API: {api_base_url} ===")
416
-
417
-
418
- llm = ChatOpenAI(
419
- openai_api_base=api_base_url,
420
- openai_api_key="dummy-key", # Required by LangChain, but not used by LM Studio
421
- temperature=0.7,
422
- max_tokens=128,
423
- )
424
- return llm
425
 
426
  # --- The following code was commented out or unreachable in the original notebook ---
427
  # Example code (replace with appropriate code for your model):
@@ -535,6 +316,7 @@ def load_generation_model():
535
 
536
  return llm
537
 
 
538
  def create_prompt_templates():
539
  """Create prompt templates for the assistant"""
540
 
@@ -608,32 +390,35 @@ class AACAssistant:
608
 
609
  print("AAC Assistant initialized and ready!")
610
 
611
- def get_emotion_analysis(self, situation):
612
  """
613
  Gets emotion analysis from the configured emotion LLM API.
614
  """
615
  # Define the prompt structure for the emotion analysis model
616
  # (Adjust this based on how you prompted your model in LM Studio)
617
- input_emotion = "excited" # Or determine this dynamically if needed
618
- user_content = f"Emotion: {input_emotion}\nSituation: {situation}\nGenerate a brief analysis of the user's likely feeling based on the situation."
619
-
620
- messages = [
621
- # {"role": "system", "content": "You are an empathetic assistant analyzing emotions."},
622
- {"role": "user", "content": user_content},
623
- ]
624
-
625
- print(f"Sending to emotion API: {messages}")
626
- try:
627
- # Use the invoke method for ChatOpenAI
628
- response = self.emotion_llm.invoke(messages)
629
- # The response object has a 'content' attribute
630
- analysis = response.content.strip()
631
- print(f"Received from emotion API: {analysis}")
632
- return analysis
633
- except Exception as e:
634
- print(f"\nAn error occurred during emotion analysis API call: {e}")
635
- # Fallback or default analysis
636
- return f"Could not determine emotion (API error: {e})"
 
 
 
637
 
638
 
639
  def process_query(self, user_query):
@@ -648,7 +433,7 @@ class AACAssistant:
648
  """
649
  # Step 1: Get emotion analysis from the LM Studio API via the emotion_llm client
650
  print(f"Getting emotion analysis for query: '{user_query}'")
651
- emotion_analysis = self.get_emotion_analysis(user_query)
652
  print(f"Emotion Analysis Result: {emotion_analysis}")
653
 
654
  # Step 2: Run the RAG + LLM chain (using the main generation model)
@@ -661,62 +446,3 @@ class AACAssistant:
661
  )
662
 
663
  return response["answer"]
664
-
665
- # def run_demo():
666
- # # Sample personal experiences document path - replace with your actual file
667
- # document_path = "aac_user_experiences.txt"
668
-
669
- # # Create a dummy document if it doesn't exist for demonstration
670
- # # if not os.path.exists(document_path):
671
- # # with open(document_path, "w") as f:
672
- # # f.write("""
673
- # # I grew up in Seattle and love the rain.
674
- # # My favorite hobby is playing chess, which I've been doing since I was 7 years old.
675
- # # I have a dog named Max who is a golden retriever.
676
- # # I went to college at University of Washington and studied computer science.
677
- # # I enjoy watching sci-fi movies and Star Trek is my favorite series.
678
- # # I've traveled to Japan twice and love Japanese cuisine.
679
- # # Music helps me relax, especially classical piano pieces.
680
- # # I volunteer at the local animal shelter once a month.
681
- # # """)
682
-
683
- # # Initialize the assistant
684
- # assistant = AACAssistant(document_path)
685
-
686
- # # Interactive demo
687
- # print("\n===== AAC Communication Assistant Demo =====")
688
- # print("(Type 'exit' to end the demo)")
689
-
690
- # while True:
691
- # try:
692
- # user_input = input("\nConversation partner says: ")
693
- # if user_input.lower() == 'exit':
694
- # break
695
-
696
- # response = assistant.process_query(user_input)
697
- # print(f"\nAAC user communicates: {response}")
698
- # except EOFError: # Handle case where input stream ends unexpectedly
699
- # print("\nInput stream closed. Exiting demo.")
700
- # break
701
- # except KeyboardInterrupt: # Handle Ctrl+C
702
- # print("\nDemo interrupted by user. Exiting.")
703
- # break
704
- # except Exception as e:
705
- # print(f"\nAn unexpected error occurred: {e}")
706
- # # Optionally add more specific error handling or logging
707
- # # Consider whether to break or continue the loop on error
708
- # break # Exit on error for safety
709
-
710
- # try:
711
- # from importlib.metadata import PackageNotFoundError
712
- # except ImportError:
713
- # # Define a fallback for older Python versions
714
- # class PackageNotFoundError(Exception):
715
- # pass
716
-
717
- # # Cell 13: Main Execution Block
718
- # if __name__ == "__main__":
719
- # run_demo()
720
-
721
- # # !pip install bitsandbytes -q || echo "bitsandbytes installation failed, will use fp16 precision instead"
722
- # # pip install -U bitsandbytes
 
132
  # --- End of commented out/unreachable code ---
133
 
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  # def run_demo():
136
  # # Sample personal experiences document path - replace with your actual file
137
  # document_path = "aac_user_experiences.txt"
 
203
 
204
  # !pip install chromadb
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
  # --- The following code was commented out or unreachable in the original notebook ---
208
  # Example code (replace with appropriate code for your model):
 
316
 
317
  return llm
318
 
319
+
320
  def create_prompt_templates():
321
  """Create prompt templates for the assistant"""
322
 
 
390
 
391
  print("AAC Assistant initialized and ready!")
392
 
393
+ def get_emotion_analysis(self,llm, situation):
394
  """
395
  Gets emotion analysis from the configured emotion LLM API.
396
  """
397
  # Define the prompt structure for the emotion analysis model
398
  # (Adjust this based on how you prompted your model in LM Studio)
399
+ text = situation
400
+ response = llm.create_chat_completion(
401
+ messages=[{"role": "user", "content": text}],
402
+ max_tokens=128, # Max length of the generated response (adjust as needed)
403
+ temperature=0.7, # Controls randomness (adjust)
404
+ # top_p=0.9, # Optional: Nucleus sampling
405
+ # top_k=40, # Optional: Top-k sampling
406
+ stop=["<|eot_id|>"], # Crucial: Stop generation when the model outputs the end-of-turn token
407
+ stream=False, # Set to True to get token-by-token output (like TextStreamer)
408
+ )
409
+
410
+ # --- 4. Extract and print the response ---
411
+ if response and 'choices' in response and len(response['choices']) > 0:
412
+ assistant_message = response['choices'][0]['message']['content']
413
+ print("\nAssistant Response:")
414
+ print(assistant_message.strip())
415
+ print("returning:", assistant_message.strip())
416
+ return assistant_message.strip()
417
+ else:
418
+ print("\nNo response generated or unexpected format.")
419
+ print("Full response:", response)
420
+
421
+ return ""
422
 
423
 
424
  def process_query(self, user_query):
 
433
  """
434
  # Step 1: Get emotion analysis from the LM Studio API via the emotion_llm client
435
  print(f"Getting emotion analysis for query: '{user_query}'")
436
+ emotion_analysis = self.get_emotion_analysis(self.emotion_llm, user_query)
437
  print(f"Emotion Analysis Result: {emotion_analysis}")
438
 
439
  # Step 2: Run the RAG + LLM chain (using the main generation model)
 
446
  )
447
 
448
  return response["answer"]