sid22669 commited on
Commit
54dcef0
·
verified ·
1 Parent(s): 1c049c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -19
app.py CHANGED
@@ -9,52 +9,52 @@ from langchain_openai import ChatOpenAI
9
  from langchain.chains.combine_documents import create_stuff_documents_chain
10
  from langchain.embeddings import HuggingFaceEmbeddings
11
 
 
12
  embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
13
 
 
14
  persist_directory = 'vec_db'
 
 
15
 
16
- vectordb = Chroma(persist_directory=persist_directory,
17
- embedding_function=embedding_model)
18
-
19
- vectordb_retriever = vectordb.as_retriever(search_kwargs={'k':5})
20
-
21
  llm = ChatOpenAI(model="gpt-4.1-nano", temperature=0.7)
22
 
 
23
  with open("instructions.txt", 'r') as file:
24
  instructions = file.read()
25
 
26
-
27
- # Custom prompt
28
  custom_prompt = ChatPromptTemplate.from_messages([
29
  ("system", instructions),
30
  MessagesPlaceholder(variable_name="chat_history"),
31
  ("user", "Question: {input}\nContext: {context}")
32
  ])
33
 
34
- # Memory
35
  memory = ConversationBufferMemory(
36
  memory_key="chat_history",
37
  return_messages=True
38
  )
39
 
 
40
  question_answer_chain = create_stuff_documents_chain(llm, custom_prompt)
41
-
42
  chain = create_retrieval_chain(vectordb_retriever, question_answer_chain)
43
 
 
44
  def conversate_assistant(query, history):
45
  greetings = {"hey", "hi", "hello"}
46
  normalized_query = query.strip().lower()
47
 
48
- if len(memory.load_memory_variables({})["chat_history"]) >=6:
49
- chat_history = memory.load_memory_variables({})["chat_history"][-6::]
50
- else:
51
- chat_history = memory.load_memory_variables({})["chat_history"]
52
 
53
- # If greeting, skip retrieval and context
54
  if normalized_query in greetings:
55
  response = question_answer_chain.invoke({
56
  "input": query,
57
- "context": [], # empty context for greetings
58
  "chat_history": chat_history
59
  })
60
  answer = response
@@ -64,15 +64,17 @@ def conversate_assistant(query, history):
64
  "chat_history": chat_history
65
  })
66
  answer = response['answer']
67
-
68
- # Save to memory
69
  memory.save_context({"input": query}, {"output": answer})
70
 
71
- return answer
72
 
 
73
  demo = gr.ChatInterface(
74
  conversate_assistant,
75
  type="messages"
76
  )
77
 
78
- demo.launch()
 
 
9
  from langchain.chains.combine_documents import create_stuff_documents_chain
10
  from langchain.embeddings import HuggingFaceEmbeddings
11
 
12
+ # Embedding model
13
  embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
14
 
15
+ # Vector store setup
16
  persist_directory = 'vec_db'
17
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_model)
18
+ vectordb_retriever = vectordb.as_retriever(search_kwargs={'k': 5})
19
 
20
+ # LLM
 
 
 
 
21
  llm = ChatOpenAI(model="gpt-4.1-nano", temperature=0.7)
22
 
23
+ # Load instructions
24
  with open("instructions.txt", 'r') as file:
25
  instructions = file.read()
26
 
27
+ # Custom prompt
 
28
  custom_prompt = ChatPromptTemplate.from_messages([
29
  ("system", instructions),
30
  MessagesPlaceholder(variable_name="chat_history"),
31
  ("user", "Question: {input}\nContext: {context}")
32
  ])
33
 
34
+ # Memory
35
  memory = ConversationBufferMemory(
36
  memory_key="chat_history",
37
  return_messages=True
38
  )
39
 
40
+ # Chains
41
  question_answer_chain = create_stuff_documents_chain(llm, custom_prompt)
 
42
  chain = create_retrieval_chain(vectordb_retriever, question_answer_chain)
43
 
44
+ # Main interaction function
45
  def conversate_assistant(query, history):
46
  greetings = {"hey", "hi", "hello"}
47
  normalized_query = query.strip().lower()
48
 
49
+ # Load the last 6 messages from memory
50
+ chat_history = memory.load_memory_variables({})["chat_history"]
51
+ chat_history = chat_history[-6:] if len(chat_history) >= 6 else chat_history
 
52
 
53
+ # If greeting, skip context retrieval
54
  if normalized_query in greetings:
55
  response = question_answer_chain.invoke({
56
  "input": query,
57
+ "context": [], # Empty context for greetings
58
  "chat_history": chat_history
59
  })
60
  answer = response
 
64
  "chat_history": chat_history
65
  })
66
  answer = response['answer']
67
+
68
+ # Save the interaction in memory
69
  memory.save_context({"input": query}, {"output": answer})
70
 
71
+ return answer
72
 
73
+ # Gradio interface
74
  demo = gr.ChatInterface(
75
  conversate_assistant,
76
  type="messages"
77
  )
78
 
79
+ # Launch the app
80
+ demo.launch()