rdz-falcon commited on
Commit
9ea7d10
·
verified ·
1 Parent(s): 996ac6b

Update src/rag.py

Browse files
Files changed (1) hide show
  1. src/rag.py +396 -15
src/rag.py CHANGED
@@ -1,12 +1,3 @@
1
- # /Users/divyeshpatel/Desktop/archiveWork/rajvi/nlp/rag.py
2
- # !pip install llama-cpp-python
3
-
4
- # from llama.cpp import Llama
5
- #
6
- # llm = Llama.from_pretrained(
7
- # repo_id="rdz-falcon/model",
8
- # filename="unsloth.F16.gguf",
9
- # )
10
 
11
  # !pip install langchain
12
  # !pip install langchain-community
@@ -75,14 +66,20 @@ def load_emotion_classifier(api_base_url="http://127.0.0.1:1234/v1"):
75
  """
76
  print(f"=== CONFIGURING LLM CLIENT FOR API: {api_base_url} ===")
77
 
 
78
 
79
- llm = ChatOpenAI(
80
- openai_api_base=api_base_url,
81
- openai_api_key="dummy-key", # Required by LangChain, but not used by LM Studio
82
- temperature=0.7,
83
- max_tokens=128,
84
  )
85
- return llm
 
 
 
 
 
 
 
86
 
87
  # --- The following code was commented out or unreachable in the original notebook ---
88
  # Example code (replace with appropriate code for your model):
@@ -180,6 +177,390 @@ You are an AAC (Augmentative and Alternative Communication) user (Elliot) engagi
180
  <|user|>
181
  The conversation partner asked: "{question}"
182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
  Please generate your response as the AAC user, following the instructions above.</s>
184
  <|assistant|>
185
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  # !pip install langchain
3
  # !pip install langchain-community
 
66
  """
67
  print(f"=== CONFIGURING LLM CLIENT FOR API: {api_base_url} ===")
68
 
69
+ from llama_cpp import Llama
70
 
71
+ llm = Llama.from_pretrained(
72
+ repo_id="rdz-falcon/llma_fine-tuned",
73
+ filename="unsloth.F16.gguf",
 
 
74
  )
75
+
76
+ # llm = ChatOpenAI(
77
+ # openai_api_base=api_base_url,
78
+ # openai_api_key="dummy-key", # Required by LangChain, but not used by LM Studio
79
+ # temperature=0.7,
80
+ # max_tokens=128,
81
+ # )
82
+ # return llm
83
 
84
  # --- The following code was commented out or unreachable in the original notebook ---
85
  # Example code (replace with appropriate code for your model):
 
177
  <|user|>
178
  The conversation partner asked: "{question}"
179
 
180
+ Please generate your response as the AAC user, following the instructions above.</s>
181
+ <|assistant|>
182
+
183
+ """.strip()
184
+
185
+ PROMPT = PromptTemplate(
186
+ input_variables=["question", "emotion_analysis", "context", "chat_history"],
187
+ template=template,
188
+ )
189
+ print("\n Prompt:", PROMPT)
190
+ return PROMPT
191
+
192
+ class AACAssistant:
193
+ def __init__(self, document_path):
194
+ print("Initializing AAC Assistant...")
195
+ print("Loading document retriever...")
196
+ self.vectorstore = setup_document_retriever(document_path)
197
+ print("Configuring emotion LLM client...")
198
+ # Use the new function to get the client for the API
199
+ self.emotion_llm = load_emotion_classifier() # You can pass a different URL if needed
200
+ print("Loading generation model...")
201
+ self.llm = load_generation_model() # This now loads the Ollama model
202
+ print("Creating prompt templates...")
203
+ self.prompt = create_prompt_templates()
204
+ print("Setting up conversation memory...")
205
+
206
+ # Set up memory for chat history
207
+ self.memory = ConversationBufferMemory(
208
+ memory_key="chat_history",
209
+ return_messages=True,
210
+ output_key="answer",
211
+ # Specify the input key for the memory explicitly
212
+ input_key="question"
213
+ )
214
+
215
+ # Create retrieval chain (using the main generation LLM)
216
+ self.chain = ConversationalRetrievalChain.from_llm(
217
+ llm=self.llm, # Use the main generation model here
218
+ retriever=self.vectorstore.as_retriever(search_kwargs={'k': 3}),
219
+ memory=self.memory,
220
+ combine_docs_chain_kwargs={"prompt": self.prompt},
221
+ return_source_documents=True,
222
+ verbose=True
223
+ )
224
+
225
+ print("AAC Assistant initialized and ready!")
226
+
227
+ def get_emotion_analysis(self,llm, situation):
228
+ """
229
+ Gets emotion analysis from the configured emotion LLM API.
230
+ """
231
+ # Define the prompt structure for the emotion analysis model
232
+ # (Adjust this based on how you prompted your model in LM Studio)
233
+ text = situation
234
+ response = llm.create_chat_completion(
235
+ messages=[{"role": "user", "content": text}],
236
+ max_tokens=128, # Max length of the generated response (adjust as needed)
237
+ temperature=0.7, # Controls randomness (adjust)
238
+ # top_p=0.9, # Optional: Nucleus sampling
239
+ # top_k=40, # Optional: Top-k sampling
240
+ stop=["<|eot_id|>"], # Crucial: Stop generation when the model outputs the end-of-turn token
241
+ stream=False, # Set to True to get token-by-token output (like TextStreamer)
242
+ )
243
+
244
+ # --- 4. Extract and print the response ---
245
+ if response and 'choices' in response and len(response['choices']) > 0:
246
+ assistant_message = response['choices'][0]['message']['content']
247
+ print("\nAssistant Response:")
248
+ print(assistant_message.strip())
249
+ print("returning:", assistant_message.strip())
250
+ return assistant_message.strip()
251
+ else:
252
+ print("\nNo response generated or unexpected format.")
253
+ print("Full response:", response)
254
+
255
+ return ""
256
+
257
+
258
+ def process_query(self, user_query):
259
+ """
260
+ Process a query from the conversation partner to the AAC user.
261
+
262
+ Args:
263
+ user_query (str): Question asked by the conversation partner
264
+
265
+ Returns:
266
+ str: Generated response for the AAC user to communicate
267
+ """
268
+ # Step 1: Get emotion analysis from the LM Studio API via the emotion_llm client
269
+ print(f"Getting emotion analysis for query: '{user_query}'")
270
+ emotion_analysis = self.get_emotion_analysis(self.emotion_llm, user_query)
271
+ print(f"Emotion Analysis Result: {emotion_analysis}")
272
+
273
+ # Step 2: Run the RAG + LLM chain (using the main generation model)
274
+ # The emotion_analysis is now passed into the prompt context
275
+ print("Running main RAG chain...")
276
+ # Use invoke instead of the deprecated __call__
277
+ # Pass inputs as a dictionary matching the chain's expected input keys
278
+ response = self.chain.invoke(
279
+ {"question": user_query, "emotion_analysis": emotion_analysis}
280
+ )
281
+
282
+ return response["answer"]
283
+
284
+ # def run_demo():
285
+ # # Sample personal experiences document path - replace with your actual file
286
+ # document_path = "aac_user_experiences.txt"
287
+
288
+ # # Create a dummy document if it doesn't exist for demonstration
289
+ # # if not os.path.exists(document_path):
290
+ # # with open(document_path, "w") as f:
291
+ # # f.write("""
292
+ # # I grew up in Seattle and love the rain.
293
+ # # My favorite hobby is playing chess, which I've been doing since I was 7 years old.
294
+ # # I have a dog named Max who is a golden retriever.
295
+ # # I went to college at University of Washington and studied computer science.
296
+ # # I enjoy watching sci-fi movies and Star Trek is my favorite series.
297
+ # # I've traveled to Japan twice and love Japanese cuisine.
298
+ # # Music helps me relax, especially classical piano pieces.
299
+ # # I volunteer at the local animal shelter once a month.
300
+ # # """)
301
+
302
+ # # Initialize the assistant
303
+ # assistant = AACAssistant(document_path)
304
+
305
+ # # Interactive demo
306
+ # print("\n===== AAC Communication Assistant Demo =====")
307
+ # print("(Type 'exit' to end the demo)")
308
+
309
+ # while True:
310
+ # try:
311
+ # user_input = input("\nConversation partner says: ")
312
+ # if user_input.lower() == 'exit':
313
+ # break
314
+
315
+ # response = assistant.process_query(user_input)
316
+ # print(f"\nAAC user communicates: {response}")
317
+ # except EOFError: # Handle case where input stream ends unexpectedly
318
+ # print("\nInput stream closed. Exiting demo.")
319
+ # break
320
+ # except KeyboardInterrupt: # Handle Ctrl+C
321
+ # print("\nDemo interrupted by user. Exiting.")
322
+ # break
323
+ # except Exception as e:
324
+ # print(f"\nAn unexpected error occurred: {e}")
325
+ # # Optionally add more specific error handling or logging
326
+ # # Consider whether to break or continue the loop on error
327
+ # break # Exit on error for safety
328
+
329
+ # try:
330
+ # from importlib.metadata import PackageNotFoundError
331
+ # except ImportError:
332
+ # # Define a fallback for older Python versions
333
+ # class PackageNotFoundError(Exception):
334
+ # pass
335
+
336
+ # # Cell 13: Main Execution Block
337
+ # if __name__ == "__main__":
338
+ # run_demo()
339
+
340
+ # # !pip install bitsandbytes -q || echo "bitsandbytes installation failed, will use fp16 precision instead"
341
+ # # pip install -U bitsandbytes
342
+
343
+ # from llama.cpp import Llama
344
+ #
345
+ # llm = Llama.from_pretrained(
346
+ # repo_id="rdz-falcon/model",
347
+ # filename="unsloth.F16.gguf",
348
+ # )
349
+
350
+ # !pip install langchain
351
+ # !pip install langchain-community
352
+
353
+ # !pip install chromadb
354
+
355
+ import os
356
+ import torch
357
+ import tempfile
358
+ from langchain.chains import ConversationalRetrievalChain
359
+ from langchain.memory import ConversationBufferMemory
360
+ from langchain_community.document_loaders import TextLoader
361
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
362
+ from langchain_community.vectorstores import Chroma
363
+ from langchain.embeddings import HuggingFaceEmbeddings
364
+ from langchain_community.llms import HuggingFacePipeline
365
+ from langchain.prompts import PromptTemplate
366
+ from langchain_community.llms import Ollama
367
+ from langchain_openai import ChatOpenAI
368
+ from transformers import AutoTokenizer, pipeline, AutoModelForCausalLM, BitsAndBytesConfig
369
+
370
+ def setup_document_retriever(document_path):
371
+ # Load documents with the AAC user's personal experiences
372
+ loader = TextLoader(document_path)
373
+ documents = loader.load()
374
+
375
+ # Split documents into chunks
376
+ text_splitter = RecursiveCharacterTextSplitter(
377
+ chunk_size=1000,
378
+ chunk_overlap=200,
379
+ separators=["\n\n", "\n", " ", ""]
380
+ )
381
+ chunks = text_splitter.split_documents(documents)
382
+
383
+ # Create embeddings
384
+ embeddings = HuggingFaceEmbeddings(
385
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
386
+ model_kwargs={'device': 'cuda' if torch.cuda.is_available() else 'cpu'}
387
+ )
388
+
389
+ # Create a persistent directory for the ChromaDB
390
+ persist_directory = os.path.join(tempfile.gettempdir(), "chroma_db")
391
+
392
+ # Create Chroma vector store
393
+ vectorstore = Chroma.from_documents(
394
+ documents=chunks,
395
+ embedding=embeddings,
396
+ persist_directory=persist_directory
397
+ )
398
+
399
+ # Persist the database to disk
400
+ vectorstore.persist()
401
+
402
+ return vectorstore
403
+
404
+ def load_emotion_classifier(api_base_url="http://127.0.0.1:1234/v1"):
405
+ """
406
+ This function configures and returns a LangChain LLM client
407
+ to interact with an OpenAI-compatible API endpoint (like LM Studio).
408
+
409
+ Args:
410
+ api_base_url (str): The base URL of the OpenAI-compatible API endpoint.
411
+
412
+ Returns:
413
+ ChatOpenAI: A LangChain ChatOpenAI instance configured for the API.
414
+ """
415
+ print(f"=== CONFIGURING LLM CLIENT FOR API: {api_base_url} ===")
416
+
417
+
418
+ llm = ChatOpenAI(
419
+ openai_api_base=api_base_url,
420
+ openai_api_key="dummy-key", # Required by LangChain, but not used by LM Studio
421
+ temperature=0.7,
422
+ max_tokens=128,
423
+ )
424
+ return llm
425
+
426
+ # --- The following code was commented out or unreachable in the original notebook ---
427
+ # Example code (replace with appropriate code for your model):
428
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
429
+ # model = AutoModelForCausalLM.from_pretrained(model_name)
430
+ # emotion_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
431
+
432
+ # input_emotion = "excited"
433
+ # input_situation = text # 'text' variable was not defined here in the original notebook
434
+
435
+ # # Format the user message content
436
+ # user_content = f"Emotion: {input_emotion}\nSituation: {input_situation}"
437
+ # # Create the messages list in the standard OpenAI/chat format
438
+ # messages = [
439
+ # # Note: llama-cpp might not explicitly use a system prompt unless provided here
440
+ # # or baked into the chat_format handler. You might need to add:
441
+ # # {"role": "system", "content": "You are an empathetic assistant."},
442
+ # {"role": "user", "content": user_content},
443
+ # ]
444
+
445
+ # # --- 3. Generate the response using create_chat_completion -- This method doesn't exist on ChatOpenAI, use invoke instead ---
446
+ # print("Generating response...")
447
+ # try:
448
+ # response = llm.create_chat_completion( # This should be llm.invoke(messages)
449
+ # messages=messages,
450
+ # max_tokens=128, # Max length of the generated response (adjust as needed)
451
+ # temperature=0.7, # Controls randomness (adjust)
452
+ # # top_p=0.9, # Optional: Nucleus sampling
453
+ # # top_k=40, # Optional: Top-k sampling
454
+ # stop=["<|eot_id|>"], # Crucial: Stop generation when the model outputs the end-of-turn token
455
+ # stream=False, # Set to True to get token-by-token output (like TextStreamer)
456
+ # )
457
+
458
+ # # --- 4. Extract and print the response -- Access response.content with invoke ---
459
+ # if response and 'choices' in response and len(response['choices']) > 0:
460
+ # assistant_message = response['choices'][0]['message']['content']
461
+ # print("\nAssistant Response:")
462
+ # print(assistant_message.strip())
463
+ # print("returning:", assistant_message.strip())
464
+ # return assistant_message.strip()
465
+ # else:
466
+ # print("\nNo response generated or unexpected format.")
467
+ # print("Full response:", response)
468
+
469
+ # return ""
470
+
471
+ # except Exception as e:
472
+ # print(f"\nAn error occurred during generation: {e}")
473
+ # return ""
474
+ # --- End of commented out/unreachable code ---
475
+
476
+
477
+ def load_generation_model():
478
+ """Load the specified Ollama model using LangChain."""
479
+ model_name = "meta-llama/Llama-3.2-3B-Instruct" # ~1.1B parameters
480
+
481
+ import os
482
+ token = os.getenv('HF_TOKEN'))
483
+
484
+ try:
485
+ # First try loading with 4-bit quantization if bitsandbytes is available
486
+ from bitsandbytes.cuda_setup.main import get_compute_capability
487
+
488
+ # Configuration for 4-bit quantization to reduce memory usage
489
+ quantization_config = BitsAndBytesConfig(
490
+ load_in_4bit=True,
491
+ bnb_4bit_compute_dtype=torch.float16,
492
+ bnb_4bit_quant_type="nf4",
493
+ )
494
+
495
+ # Load model and tokenizer with quantization
496
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
497
+ model = AutoModelForCausalLM.from_pretrained(
498
+ model_name,
499
+ quantization_config=quantization_config,
500
+ device_map="auto",
501
+ torch_dtype=torch.float16,
502
+ token = token
503
+ )
504
+ print("Model loaded with 4-bit quantization")
505
+
506
+ except (ImportError, ModuleNotFoundError, PackageNotFoundError) as e:
507
+ print(f"Quantization not available: {e}")
508
+ print("Loading model in fp16 precision without quantization")
509
+
510
+ # Fallback to fp16 without quantization
511
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
512
+ model = AutoModelForCausalLM.from_pretrained(
513
+ model_name,
514
+ device_map="cuda:1",
515
+ torch_dtype=torch.float16,
516
+ low_cpu_mem_usage=True,
517
+
518
+ )
519
+
520
+ # Create text generation pipeline
521
+ generation_pipeline = pipeline(
522
+ "text-generation",
523
+ model=model,
524
+ tokenizer=tokenizer,
525
+ max_new_tokens=512,
526
+ do_sample=True,
527
+ temperature=0.7,
528
+ top_p=0.95,
529
+ repetition_penalty=1.1,
530
+ pad_token_id=tokenizer.eos_token_id
531
+ )
532
+
533
+ # Create LangChain wrapper
534
+ llm = HuggingFacePipeline(pipeline=generation_pipeline)
535
+
536
+ return llm
537
+
538
+ def create_prompt_templates():
539
+ """Create prompt templates for the assistant"""
540
+
541
+ template = """
542
+ <|system|>
543
+ You are an AAC (Augmentative and Alternative Communication) user (Elliot) engaging in a conversation. Your responses must reflect factual details provided in your personal context, be empathetic as guided by the emotion analysis, and align naturally with your previous chat history. You will respond directly as the AAC user, speaking in the first person (using "I", "my", "me").
544
+
545
+ **Instructions:**
546
+ 1. Understand the question asked by the conversation partner.
547
+ 2. Use the provided "Context" to include accurate personal details about your life (Elliot).
548
+ 3. Reflect the empathetic tone described in the "Empathetic Response Guidance".
549
+ 4. Ensure your response fits logically within the "Chat History".
550
+ 5. Keep your response concise, empathetic, and natural.
551
+ 6. Ignore the empathetic tone described in the "Empathetic Response Guidance" if it is not related to the conversation.
552
+
553
+ **Context:**
554
+ {context}
555
+
556
+ **Chat History:**
557
+ {chat_history}
558
+
559
+ **Empathetic Response Guidance:**
560
+ {emotion_analysis}</s>
561
+ <|user|>
562
+ The conversation partner asked: "{question}"
563
+
564
  Please generate your response as the AAC user, following the instructions above.</s>
565
  <|assistant|>
566