import os from typing import List, Dict, Optional import gradio as gr from memory_manager import MemoryManager from chat_interface import HuggingFaceChat from rich.console import Console console = Console() # Version tracking APP_VERSION = "v2.0.0 - Enhanced Conversation Quality" class MemoryChatApp: """Main application that combines memory management with Hugging Face chat.""" def __init__(self): """Initialize the application.""" self.memory_manager = MemoryManager() self.chat_interface = HuggingFaceChat() # Conversation history self.conversation_history = [] # Load existing memories summary = self.memory_manager.get_summary() console.print(f"[blue]Loaded {summary['total_memories']} memories[/blue]") def should_record_memory(self, user_input: str, ai_response: str) -> bool: """ Determine if the conversation should be recorded as a memory. Args: user_input: The user's input ai_response: The AI's response Returns: True if this should be recorded as a memory """ # Keywords that indicate important information important_keywords = [ "remember", "important", "note", "fact", "detail", "information", "love", "hate", "like", "dislike", "favorite", "never", "always", "birthday", "anniversary", "special", "urgent", "must", "should" ] # Combine user input and AI response for analysis combined_text = f"{user_input} {ai_response}".lower() # Check for important keywords for keyword in important_keywords: if keyword in combined_text: return True # Check for personal information patterns personal_patterns = [ "my name is", "i live in", "i work at", "i study", "my birthday", "my favorite", "i love", "i hate", "i like", "i dislike" ] for pattern in personal_patterns: if pattern in combined_text: return True return False def extract_memory_content(self, user_input: str, ai_response: str) -> str: """ Extract the most important information to store as a memory. Args: user_input: The user's input ai_response: The AI's response Returns: The content to store as a memory """ # If user explicitly wants to record something if any(word in user_input.lower() for word in ["remember", "note", "save"]): return user_input # Extract personal information with more detail personal_info = [] if "my name is" in user_input.lower(): # Extract the actual name name_part = user_input.lower().split("my name is")[-1].strip() personal_info.append(f"User's name is {name_part}") if "i live in" in user_input.lower(): location_part = user_input.lower().split("i live in")[-1].strip() personal_info.append(f"User lives in {location_part}") if "i work at" in user_input.lower(): work_part = user_input.lower().split("i work at")[-1].strip() personal_info.append(f"User works at {work_part}") if "i study" in user_input.lower(): study_part = user_input.lower().split("i study")[-1].strip() personal_info.append(f"User studies {study_part}") if "my birthday" in user_input.lower(): birthday_part = user_input.lower().split("my birthday")[-1].strip() personal_info.append(f"User's birthday is {birthday_part}") if "my favorite" in user_input.lower(): favorite_part = user_input.lower().split("my favorite")[-1].strip() personal_info.append(f"User's favorite {favorite_part}") if personal_info: return f"Personal info: {', '.join(personal_info)}" # Default to user input if no specific patterns found return user_input def chat_with_memory(self, user_input: str) -> str: """ Chat with the AI while managing memories. Args: user_input: The user's input Returns: The AI's response """ # Check if model is available if not self.chat_interface.check_model_availability(): return "I'm sorry, but I couldn't load the AI model. Please check your internet connection and model availability." # Add user input to conversation history self.conversation_history.append({"role": "user", "content": user_input}) # Retrieve relevant memories to provide context relevant_memories = self.memory_manager.retrieve_memories(user_input, k=5) # Get more memories # Build context from memories with better formatting context = "" if relevant_memories: context = "Here's what I remember about you:\n" for i, memory in enumerate(relevant_memories[:3], 1): # Show top 3 memories context += f"{i}. {memory['content']}\n" context += "\n" # Build the prompt with enhanced context and conversation history prompt = self.build_prompt(user_input, context) # Generate AI response ai_response = self.chat_interface.generate_response(prompt) # Add AI response to conversation history self.conversation_history.append({"role": "assistant", "content": ai_response}) # Check if we should record a memory if self.should_record_memory(user_input, ai_response): memory_content = self.extract_memory_content(user_input, ai_response) context_info = f"During conversation at {self.get_current_time()}" self.memory_manager.add_memory( content=memory_content, context=context_info, memory_type="conversation" ) return ai_response def build_prompt(self, user_input: str, context: str) -> str: """ Build the prompt for the AI model. Args: user_input: The user's input context: Context from relevant memories Returns: The prompt to send to the AI model """ # Build a more natural conversation prompt prompt = f"""{context}The user says: "{user_input}" As an AI assistant, respond naturally and helpfully. Consider any relevant memories above when crafting your response. Be conversational, engaging, and provide helpful information. Your response: """ return prompt def get_current_time(self) -> str: """Get current time in a readable format.""" import datetime return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") def get_memories_summary(self) -> str: """Get a summary of stored memories.""" summary = self.memory_manager.get_summary() memory_types = summary['memory_types'] summary_text = f""" ## Memory Summary **Total Memories:** {summary['total_memories']} **Memory Types:** """ for memory_type, count in memory_types.items(): summary_text += f"- {memory_type}: {count}\n" return summary_text def get_recent_memories(self) -> str: """Get the most recent memories.""" recent_memories = self.memory_manager.get_recent_memories() if not recent_memories: return "No memories stored yet." memory_text = "## Recent Memories\n\n" for memory in recent_memories: memory_text += f"**{memory['type'].title()}** ({memory['timestamp'][:19]}):\n" memory_text += f"{memory['content']}\n\n" return memory_text def clear_all_memories(self) -> str: """Clear all memories.""" self.memory_manager.clear_memories() return "All memories have been cleared." def get_model_info(self) -> str: """Get information about the AI model.""" info = self.chat_interface.get_model_info() return f""" ## Model Information **App Version:** {APP_VERSION} **Model:** {info['model_name']} **Device:** {info['device']} **Available:** {'Yes' if info['available'] else 'No'} *If the model is not available, responses will be limited.* """ def run_gradio_interface(self): """Run the Gradio interface.""" with gr.Blocks(title="Memory Chat") as demo: gr.Markdown(f"# 🤖 Memory Chat with Hugging Face") gr.Markdown(f"**Version: {APP_VERSION}**") with gr.Tab("Chat"): chatbot = gr.Chatbot() msg = gr.Textbox(label="Message", placeholder="Type your message here...") clear = gr.Button("Clear Conversation") def user(user_message, history): # Get AI response ai_response = self.chat_with_memory(user_message) # Update conversation history if history is None: history = [] history.append({"role": "user", "content": user_message}) history.append({"role": "assistant", "content": ai_response}) return "", history def clear_history(): self.conversation_history = [] return None msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False) clear.click(clear_history, None, chatbot, queue=False) with gr.Tab("Memories"): memories_summary = gr.Markdown(value=self.get_memories_summary()) recent_memories = gr.Markdown(value=self.get_recent_memories()) clear_memories_btn = gr.Button("Clear All Memories") model_info = gr.Markdown(value=self.get_model_info()) def refresh_memories(): return self.get_memories_summary(), self.get_recent_memories() refresh_btn = gr.Button("Refresh Memories") refresh_btn.click(refresh_memories, outputs=[memories_summary, recent_memories]) clear_memories_btn.click(self.clear_all_memories, outputs=[]) with gr.Tab("About"): gr.Markdown(""" ## About This Application This application combines Hugging Face AI models with a memory system that records important information from your conversations. ### Features: - 🤖 Chat with Hugging Face models - 💾 Automatic memory recording - 📚 View and manage your memories - 🔍 Search through your memories ### How it works: 1. Have a conversation with the AI 2. The system automatically detects important information 3. Important memories are stored and can be recalled in future conversations 4. View your memory timeline and statistics ### Memory Types: - **General**: General information and facts - **Conversation**: Important details from chats - **Preferences**: Likes, dislikes, favorites - **Important**: Critical information marked as important """) return demo def main(): """Main entry point.""" console.print("[green]🚀 Starting Memory Chat Application...[/green]") # Create and run the application app = MemoryChatApp() # Run Gradio interface demo = app.run_gradio_interface() demo.launch(server_name="0.0.0.0", server_port=7860, share=False) if __name__ == "__main__": main()