karthigrj commited on
Commit
c14624d
·
verified ·
1 Parent(s): 7da55c2

Upload interface2_v1.0.py

Browse files
Files changed (1) hide show
  1. interface2_v1.0.py +68 -0
interface2_v1.0.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pickle
3
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
4
+ from langchain_community.chat_message_histories import SQLChatMessageHistory
5
+ from langchain_groq import ChatGroq
6
+ from langchain_core.runnables import RunnablePassthrough
7
+ from langchain.schema.output_parser import StrOutputParser
8
+ from langchain_core.runnables.history import RunnableWithMessageHistory
9
+ from operator import itemgetter
10
+
11
+ # Load the vector store
12
+ with open("qdrant_vectorstore.pkl", "rb") as f:
13
+ qdrant_vectorstore = pickle.load(f)
14
+
15
+ # Updated function definition
16
+ def echo_user_input(*args):
17
+ user_input = args[0] # Extract user_input from args
18
+
19
+ # Set up retriever
20
+ qdrant_retriever = qdrant_vectorstore.as_retriever()
21
+ found_docs = qdrant_vectorstore.similarity_search(user_input)
22
+
23
+ context_str = ""
24
+ for context_data in found_docs:
25
+ context_str += context_data.page_content + '\n\n'
26
+
27
+ # Define prompt template
28
+ prompt = ChatPromptTemplate.from_messages([
29
+ ("system", "Act as a helpful AI Assistant. Here is some {context}"),
30
+ MessagesPlaceholder(variable_name="history"),
31
+ ("human", "{human_input}")
32
+ ])
33
+
34
+ # Set up session history
35
+ def get_session_history(session_id):
36
+ return SQLChatMessageHistory(session_id, "sqlite:///memory.db")
37
+
38
+ # Initialize the LLM with Groq
39
+ groq_api_key = "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"
40
+ llm = ChatGroq(groq_api_key=groq_api_key, model_name="Gemma2-9b-It")
41
+
42
+ # Chain context with retriever
43
+ context = itemgetter("human_input") | qdrant_retriever
44
+ first_step = RunnablePassthrough.assign(context=context)
45
+ llm_chain = first_step | prompt | llm | StrOutputParser()
46
+
47
+ conv_chain = RunnableWithMessageHistory(llm_chain, get_session_history, input_messages_key="human_input", history_messages_key="history")
48
+
49
+ # Define a session ID for the conversation
50
+ session_id = 'bond007'
51
+ # return conv_chain.invoke(({"human_input": user_input}), {'configurable': {'session_id': session_id}})
52
+
53
+ llm_response = conv_chain.invoke(({"human_input": user_input}), {'configurable': {'session_id': session_id}})
54
+
55
+ # Combine context with the LLM response for Gradio output
56
+ combined_output = f"**Retrieved Context:**\n{context_str}\n\n**Response:**\n{llm_response}"
57
+ return combined_output
58
+
59
+ # Define the Gradio chat interface
60
+ interface = gr.ChatInterface(
61
+ fn=echo_user_input,
62
+ title="Comply2Reg Chat Assistant",
63
+ description="Type your question and press enter to see a conversational response. 🤖",
64
+ )
65
+
66
+ # Launch the interface with share=True for a public link
67
+ if __name__ == "__main__":
68
+ interface.launch(share=True)