Shami96 commited on
Commit
8165700
·
verified ·
1 Parent(s): aa235e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -19
app.py CHANGED
@@ -6,6 +6,8 @@ from langchain_community.vectorstores import Chroma
6
  from langchain_community.embeddings import HuggingFaceEmbeddings
7
  from langchain_groq import ChatGroq
8
  from langchain_community.document_loaders import PyPDFLoader
 
 
9
 
10
  # Configuration
11
  HF_REPO_ID = "Shami96/7solar-documentation"
@@ -36,35 +38,46 @@ def initialize_components():
36
  )
37
  chunks = text_splitter.split_documents(documents)
38
  embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
39
- return Chroma.from_documents(chunks, embeddings)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  # Chat function
42
  def respond(message, history):
43
  try:
44
  # Initialize if not already done
45
- if 'vector_db' not in globals():
46
- global vector_db
47
- vector_db = initialize_components()
48
 
49
  # Handle greetings
50
  if message.lower() in ["hi", "hello", "hey"]:
51
  return "Hello! I'm your 7Solar assistant. How can I help you today?"
52
 
53
- # Search documents
54
- docs = vector_db.similarity_search(message, k=3)
55
- if not docs:
56
- return "I couldn't find relevant information. Please try another question about 7Solar."
57
-
58
- # Generate response
59
- llm = ChatGroq(
60
- model_name="llama3-70b-8192",
61
- temperature=0.3
62
- )
63
- context = "\n\n".join([doc.page_content for doc in docs])
64
- response = llm.invoke(
65
- f"Using only this context:\n{context}\n\nQuestion: {message}\nAnswer:"
66
- )
67
- return response.content
68
  except Exception as e:
69
  return f"An error occurred: {str(e)}"
70
 
 
6
  from langchain_community.embeddings import HuggingFaceEmbeddings
7
  from langchain_groq import ChatGroq
8
  from langchain_community.document_loaders import PyPDFLoader
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain.chains import ConversationalRetrievalChain
11
 
12
  # Configuration
13
  HF_REPO_ID = "Shami96/7solar-documentation"
 
38
  )
39
  chunks = text_splitter.split_documents(documents)
40
  embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
41
+ vectorstore = Chroma.from_documents(chunks, embeddings)
42
+
43
+ # Initialize LLM
44
+ llm = ChatGroq(
45
+ model_name="llama3-70b-8192",
46
+ temperature=0.3
47
+ )
48
+
49
+ # Create conversation memory
50
+ memory = ConversationBufferMemory(
51
+ memory_key="chat_history",
52
+ return_messages=True
53
+ )
54
+
55
+ # Create retrieval chain with memory
56
+ qa_chain = ConversationalRetrievalChain.from_llm(
57
+ llm=llm,
58
+ retriever=vectorstore.as_retriever(),
59
+ memory=memory,
60
+ chain_type="stuff"
61
+ )
62
+
63
+ return qa_chain
64
 
65
  # Chat function
66
  def respond(message, history):
67
  try:
68
  # Initialize if not already done
69
+ if 'qa_chain' not in globals():
70
+ global qa_chain
71
+ qa_chain = initialize_components()
72
 
73
  # Handle greetings
74
  if message.lower() in ["hi", "hello", "hey"]:
75
  return "Hello! I'm your 7Solar assistant. How can I help you today?"
76
 
77
+ # Get response with conversation context
78
+ result = qa_chain({"question": message})
79
+ return result["answer"]
80
+
 
 
 
 
 
 
 
 
 
 
 
81
  except Exception as e:
82
  return f"An error occurred: {str(e)}"
83