GOKULSINGHSHAH123 commited on
Commit
01c38e7
Β·
verified Β·
1 Parent(s): 8a098e9

Create chatbot_backend.py

Browse files
Files changed (1) hide show
  1. chatbot_backend.py +194 -0
chatbot_backend.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, Any, List, Optional
3
+ import streamlit as st
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain.memory import ConversationBufferMemory # Using simpler memory
6
+ from langchain.chains import ConversationChain
7
+ from langchain.schema import BaseMessage, HumanMessage, AIMessage
8
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
9
+ from langchain.callbacks.base import BaseCallbackHandler
10
+ from langchain.prompts import PromptTemplate
11
+ import asyncio
12
+ from config import config
13
+
14
+ class StreamlitCallbackHandler(BaseCallbackHandler):
15
+ """Custom callback handler for Streamlit streaming"""
16
+
17
+ def __init__(self, container):
18
+ self.container = container
19
+ self.text = ""
20
+
21
+ def on_llm_new_token(self, token: str, **kwargs) -> None:
22
+ """Handle new token from LLM"""
23
+ self.text += token
24
+ self.container.markdown(self.text + "β–Œ")
25
+
26
+ class GeminiChatBot:
27
+ """Advanced Gemini-powered chatbot with LangChain integration"""
28
+
29
+ def __init__(self, api_key: str):
30
+ """Initialize the chatbot with API key"""
31
+ self.api_key = api_key
32
+ self.llm = None
33
+ self.memory = None
34
+ self.conversation_chain = None
35
+ self._initialize_components()
36
+
37
+ def _initialize_components(self):
38
+ """Initialize LLM, memory, and conversation chain"""
39
+ try:
40
+ # Initialize Gemini LLM with streaming
41
+ self.llm = ChatGoogleGenerativeAI(
42
+ model=config.MODEL_NAME,
43
+ google_api_key=self.api_key,
44
+ temperature=config.TEMPERATURE,
45
+ max_tokens=config.MAX_TOKENS,
46
+ streaming=True,
47
+ convert_system_message_to_human=True
48
+ )
49
+
50
+ # Initialize conversation memory with correct memory_key
51
+ self.memory = ConversationBufferMemory(
52
+ memory_key="history", # FIXED: Use 'history' not 'chat_history'
53
+ return_messages=False, # Return as string, not message objects
54
+ input_key="input"
55
+ )
56
+
57
+ # Create custom prompt template that matches memory structure
58
+ template = """The following is a friendly conversation between a human and an AI assistant. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
59
+
60
+ Current conversation:
61
+ {history}
62
+ Human: {input}
63
+ AI:"""
64
+
65
+ prompt = PromptTemplate(
66
+ input_variables=["history", "input"],
67
+ template=template
68
+ )
69
+
70
+ # Create conversation chain with custom prompt
71
+ self.conversation_chain = ConversationChain(
72
+ llm=self.llm,
73
+ memory=self.memory,
74
+ prompt=prompt, # Use custom prompt
75
+ verbose=True
76
+ )
77
+
78
+ print("βœ… Chatbot components initialized successfully")
79
+
80
+ except Exception as e:
81
+ st.error(f"❌ Failed to initialize chatbot: {str(e)}")
82
+ raise e
83
+
84
+ def get_response(self, user_input: str, stream_container=None) -> str:
85
+ """Get response from the chatbot with optional streaming"""
86
+ try:
87
+ if stream_container:
88
+ # Streaming response
89
+ callback_handler = StreamlitCallbackHandler(stream_container)
90
+ response = self.conversation_chain.invoke(
91
+ {"input": user_input},
92
+ {"callbacks": [callback_handler]}
93
+ )
94
+ return response.get('response', '')
95
+ else:
96
+ # Non-streaming response
97
+ response = self.conversation_chain.invoke({"input": user_input})
98
+ return response.get('response', '')
99
+
100
+ except Exception as e:
101
+ error_msg = f"Error generating response: {str(e)}"
102
+ st.error(error_msg)
103
+ return "I apologize, but I encountered an error processing your request. Please try again."
104
+
105
+ def clear_memory(self):
106
+ """Clear conversation memory"""
107
+ if self.memory:
108
+ self.memory.clear()
109
+ st.success("🧹 Conversation history cleared!")
110
+
111
+ def get_conversation_history(self) -> List[Dict[str, Any]]:
112
+ """Get formatted conversation history"""
113
+ if not self.memory:
114
+ return []
115
+
116
+ messages = []
117
+ try:
118
+ # Get memory variables
119
+ memory_vars = self.memory.load_memory_variables({})
120
+ history = memory_vars.get('history', '')
121
+
122
+ # Parse the history string (simple parsing)
123
+ if history:
124
+ # Split by Human: and AI: markers
125
+ parts = history.split('\n')
126
+ current_role = None
127
+ current_content = ""
128
+
129
+ for part in parts:
130
+ part = part.strip()
131
+ if part.startswith('Human:'):
132
+ if current_role and current_content:
133
+ messages.append({"role": current_role, "content": current_content.strip()})
134
+ current_role = "user"
135
+ current_content = part[6:] # Remove 'Human:'
136
+ elif part.startswith('AI:'):
137
+ if current_role and current_content:
138
+ messages.append({"role": current_role, "content": current_content.strip()})
139
+ current_role = "assistant"
140
+ current_content = part[3:] # Remove 'AI:'
141
+ else:
142
+ if current_content:
143
+ current_content += " " + part
144
+ else:
145
+ current_content = part
146
+
147
+ # Add the last message
148
+ if current_role and current_content:
149
+ messages.append({"role": current_role, "content": current_content.strip()})
150
+
151
+ except Exception as e:
152
+ st.warning(f"Could not load conversation history: {str(e)}")
153
+
154
+ return messages
155
+
156
+ def add_system_context(self, context: str):
157
+ """Add system context to improve responses"""
158
+ # Add initial context to memory
159
+ system_input = f"Please remember this context for our conversation: {context}"
160
+ system_response = "I understand and will keep this context in mind for our conversation."
161
+
162
+ # Add to memory manually
163
+ self.memory.save_context(
164
+ {"input": system_input},
165
+ {"response": system_response}
166
+ )
167
+
168
+ def initialize_chatbot() -> Optional[GeminiChatBot]:
169
+ """Initialize chatbot with proper error handling"""
170
+ api_key = config.get_api_key()
171
+
172
+ if not api_key:
173
+ st.error("πŸ”‘ **Google API Key Required!** Please set your GOOGLE_API_KEY in the sidebar.")
174
+ st.info("πŸ‘ˆ Enter your API key in the sidebar to get started")
175
+ return None
176
+
177
+ try:
178
+ # Initialize chatbot
179
+ chatbot = GeminiChatBot(api_key)
180
+
181
+ # Add system context
182
+ chatbot.add_system_context(
183
+ "You are an intelligent AI assistant powered by Google Gemini and LangChain. Provide helpful, detailed responses and maintain conversation context."
184
+ )
185
+
186
+ return chatbot
187
+
188
+ except Exception as e:
189
+ st.error(f"❌ Failed to initialize chatbot: {str(e)}")
190
+ st.info("Please check your API key and try again.")
191
+ return None
192
+
193
+ # Export key functions
194
+ __all__ = ['GeminiChatBot', 'initialize_chatbot']