pratikshahp commited on
Commit
a63d980
Β·
verified Β·
1 Parent(s): a656f6d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -0
app.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from langchain_openai import ChatOpenAI
4
+ from dotenv import load_dotenv
5
+ from langchain_huggingface import HuggingFaceEmbeddings
6
+ from langchain_chroma import Chroma
7
+ from langchain_core.documents import Document
8
+
9
+ # βœ… Load OpenAI API Key
10
+ load_dotenv()
11
+ api_key = os.getenv("OPENAI_API_KEY")
12
+
13
+ # βœ… Initialize OpenAI Model with LangChain
14
+ model = ChatOpenAI(
15
+ model="gpt-4o-mini",
16
+ openai_api_key=api_key
17
+ )
18
+
19
+ # βœ… Initialize HuggingFace Embeddings
20
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
21
+
22
+ # βœ… Initialize Chroma Vector Store (No need for chromadb.PersistentClient)
23
+ vector_store = Chroma(
24
+ collection_name="chat_collection", # Specify the collection name
25
+ embedding_function=embeddings,
26
+ persist_directory="./chroma_langchain_db", # Directory to store data locally
27
+ )
28
+
29
+ # βœ… Step 1: Helper Functions for Chat Memory
30
+ def get_chat_history(user_id):
31
+ """Fetches stored messages for a given user from the vector store."""
32
+ # Use Chroma's retriever to fetch documents filtered by user_id
33
+ retriever = vector_store.as_retriever(
34
+ search_type="mmr", # Modify search type as needed (e.g., "mmr" for Maximal Marginal Relevance)
35
+ search_kwargs={"k": 100, "fetch_k": 100} # Adjust the number of results to fetch and how many to filter
36
+ )
37
+
38
+ # Use the retriever to fetch results, filtered by the user_id
39
+ results = retriever.invoke("Chat history", filter={"user_id": user_id})
40
+ # Extract the page content (chat messages) from the results
41
+ user_history = [doc.page_content for doc in results]
42
+ return "\n".join(user_history) if user_history else ""
43
+
44
+ def store_chat_message(user_id, user_input, bot_response):
45
+ """Stores user-bot conversations in ChromaDB."""
46
+ chat_entry = f"User: {user_input}\nBot: {bot_response}"
47
+ # Add to vector store with user_id as metadata
48
+ vector_store.add_documents([Document(page_content=chat_entry, metadata={"user_id": user_id})])
49
+
50
+ # βœ… Step 2: Generate Response Using OpenAI GPT
51
+ def generate_response(username, user_input):
52
+ """Generates a chatbot response using GPT-4 and stores chat history."""
53
+ user_id = username.lower().strip()
54
+ history = get_chat_history(user_id)
55
+ # Append the latest user input to the conversation history
56
+ messages = [{"role": "system", "content": "You are a helpful AI assistant. Please provide answer in 20 words only"}]
57
+ if history:
58
+ messages.append({"role": "user", "content": f"Chat history:\n{history}"})
59
+ # Include the latest user input in the conversation
60
+ messages.append({"role": "user", "content": user_input})
61
+ print(messages)
62
+ response = model.invoke(messages) # Using the model to generate response
63
+ bot_response = response.content
64
+ # Store the conversation for future reference
65
+ store_chat_message(user_id, user_input, bot_response)
66
+ # Return the entire conversation including the user's input and bot's response
67
+ return f"{history}\nUser: {user_input}\nBot: {bot_response}"
68
+
69
+ # βœ… Step 3: Gradio UI with User Dropdown
70
+ with gr.Blocks() as demo:
71
+ gr.Markdown("# πŸ”₯ Multi-User Chatbot with GPT-4 and Memory (ChromaDB)")
72
+
73
+ # Dropdown for selecting user
74
+ username_input = gr.Dropdown(
75
+ label="Select User",
76
+ choices=["Aarya", "Ved", "Vivaan"],
77
+ )
78
+
79
+ # Chat input and output
80
+ chat_input = gr.Textbox(label="Your Message", placeholder="Type here...")
81
+ chat_output = gr.Textbox(label="Chatbot Response", interactive=False)
82
+
83
+ # Button to send the message
84
+ chat_button = gr.Button("Send")
85
+ chat_button.click(generate_response, inputs=[username_input, chat_input], outputs=chat_output)
86
+
87
+ # βœ… Step 4: Run the Gradio app
88
+ demo.launch()