import gradio as gr from typing import Generator, List import json import os from pathlib import Path from src.agents.manager_agent import manager_agent # Custom streaming implementation for better compatibility def get_cache_file_path(): """Returns the path for the bookmark cache file.""" data_dir = Path("data") data_dir.mkdir(exist_ok=True) return str(data_dir / "ai_bookmarks_cache.json") def load_cache(): """Loads the bookmark cache from JSON file.""" cache_file = get_cache_file_path() if os.path.exists(cache_file): try: with open(cache_file, "r", encoding="utf-8") as f: return json.load(f) except Exception as e: print(f"Error loading cache: {e}") return {"bookmarks": [], "last_updated": None} def get_categories_data(): """Get categorized bookmarks data for display.""" AI_CATEGORIES = { "research_breakthroughs": { "name": "Research & Breakthroughs", "description": "Novel papers, theoretical advances, new architectures, state-of-the-art results.", "icon": "πŸ”¬", }, "model_releases": { "name": "Model Releases & Updates", "description": "Launches of new large-language or vision models, version upgrades, open-source checkpoints.", "icon": "πŸš€", }, "tools_frameworks": { "name": "Tools, Frameworks & Platforms", "description": "SDKs, libraries, cloud services, developer toolkits, hosting/serving solutions.", "icon": "πŸ› οΈ", }, "applications_industry": { "name": "Applications & Industry Use Cases", "description": "AI in healthcare, finance, manufacturing, marketing, roboticsβ€”real-world deployments.", "icon": "🏭", }, "regulation_ethics": { "name": "Regulation, Ethics & Policy", "description": "Government guidelines, ethical debates, bias/fairness studies, compliance news.", "icon": "βš–οΈ", }, "investment_funding": { "name": "Investment, Funding & M&A", "description": "Venture rounds, strategic investments, acquisitions, startup valuations.", "icon": "πŸ’°", }, "benchmarks_leaderboards": { "name": "Benchmarks & Leaderboards", "description": "Performance comparisons, academic/industry challenges, leaderboard standings.", "icon": "πŸ†", }, "community_events": { "name": "Community, Events & Education", "description": "Conferences, workshops, hackathons, courses, tutorials, webinars.", "icon": "πŸŽ“", }, "security_privacy": { "name": "Security, Privacy & Safety", "description": "Adversarial attacks, defensive techniques, data-privacy breakthroughs, AI safety research.", "icon": "πŸ”’", }, "market_trends": { "name": "Market Trends & Analysis", "description": "Adoption rates, market forecasts, analyst reports, surveys on AI usage.", "icon": "πŸ“ˆ", }, } cache = load_cache() bookmarks = cache.get("bookmarks", []) categories_with_content = {} category_counts = {} # Initialize categories for key, data in AI_CATEGORIES.items(): categories_with_content[key] = { "name": data["name"], "description": data["description"], "icon": data["icon"], "bookmarks": [], } category_counts[key] = 0 # Categorize bookmarks for bookmark in bookmarks: category = bookmark.get("category", "uncategorized") if category in categories_with_content: categories_with_content[category]["bookmarks"].append(bookmark) category_counts[category] += 1 # Sort categories by bookmark count (descending) sorted_categories = sorted(categories_with_content.items(), key=lambda x: len(x[1]["bookmarks"]), reverse=True) return sorted_categories, category_counts, len(bookmarks) def create_categories_interface(): """Create the categories display interface.""" def refresh_categories(): """Refresh and display categories data.""" try: sorted_categories, category_counts, total_bookmarks = get_categories_data() if total_bookmarks == 0: return "## No bookmarks found in cache\n\nPlease use the Chat tab to load and categorize your bookmarks first." # Create the display content content = "# 🏷️ AI Bookmarks Categories\n\n" content += f"**Total Bookmarks:** {total_bookmarks}\n\n" # Display top 10 categories for i, (category_key, category_data) in enumerate(sorted_categories[:10], 1): icon = category_data["icon"] name = category_data["name"] description = category_data["description"] bookmark_count = len(category_data["bookmarks"]) content += f"## {i}. {icon} {name}\n" content += f"**Count:** {bookmark_count} bookmarks\n" content += f"**Description:** {description}\n\n" # Show first 5 bookmarks for each category if bookmark_count > 0: content += "**Recent Bookmarks:**\n" for j, bookmark in enumerate(category_data["bookmarks"][:5], 1): title = bookmark.get("title", "Untitled") url = bookmark.get("url", "") content += f"{j}. [{title}]({url})\n" if bookmark_count > 5: content += f" ... and {bookmark_count - 5} more\n" content += "\n" else: content += "*No bookmarks in this category yet.*\n\n" content += "---\n\n" return content except Exception as e: return f"## Error loading categories\n\n{str(e)}" with gr.Blocks() as categories_tab: gr.Markdown("# 🏷️ AI Bookmark Categories Dashboard") gr.Markdown("View your AI bookmarks organized by the main 10 categories.") refresh_btn = gr.Button("πŸ”„ Refresh Categories", variant="primary") categories_display = gr.Markdown(refresh_categories()) refresh_btn.click(fn=refresh_categories, outputs=categories_display) return categories_tab def create_about_interface(): """Create the about page interface.""" about_content = """ # 🧠 About ReMind ## Bring your past to mind. **ReMind** is your intelligent digital memory assistant that helps you rediscover, organize, and make sense of your accumulated digital knowledge. In our information-rich world, we often bookmark valuable resources only to forget about them later. ReMind solves this problem by intelligently categorizing and surfacing your digital discoveries when you need them most. --- ## 🎯 What ReMind Does ### πŸ”– **Smart Bookmark Management** - Automatically imports and manages your Chrome bookmarks - Provides intelligent search and filtering capabilities - Tracks bookmark statistics and usage patterns - Focuses specifically on AI and technology resources ### 🏷️ **Intelligent Categorization** ReMind automatically organizes your bookmarks into **10 key AI categories**: 1. **πŸ”¬ Research & Breakthroughs** - Latest papers and theoretical advances 2. **πŸš€ Model Releases & Updates** - New AI models and version updates 3. **πŸ› οΈ Tools, Frameworks & Platforms** - Developer tools and SDKs 4. **🏭 Applications & Industry Use Cases** - Real-world AI implementations 5. **βš–οΈ Regulation, Ethics & Policy** - AI governance and ethical considerations 6. **πŸ’° Investment, Funding & M&A** - Market movements and startup funding 7. **πŸ† Benchmarks & Leaderboards** - Performance comparisons and competitions 8. **πŸŽ“ Community, Events & Education** - Learning resources and conferences 9. **πŸ”’ Security, Privacy & Safety** - AI safety and security research 10. **πŸ“ˆ Market Trends & Analysis** - Industry insights and forecasts ### πŸ’¬ **Conversational Interface** - Chat naturally with your AI assistant about your bookmarks - Ask questions like "Show me my latest AI tools" or "Find research about transformers" - Get contextual recommendations based on your interests - Real-time thinking process visualization ### πŸ“§ **Email Integration** - Browse and search through your important emails - Focus on AI newsletters and updates from trusted sources - Extract insights from your email-based learning resources --- ## πŸ”§ How It Works **ReMind** is powered by **Smolagents**, a modern AI agent framework that enables: - **πŸ€– Multi-tool orchestration** - Seamlessly combines bookmark management, email access, and web search - **🧠 Real-time reasoning** - Watch the AI think through problems step-by-step - **πŸ”„ Dynamic categorization** - Continuously learns and improves bookmark organization - **πŸ” Semantic search** - Find resources based on meaning, not just keywords --- ## πŸš€ Getting Started 1. **Load Your Bookmarks**: Use the chat interface to import your Chrome bookmarks 2. **Categorize Content**: Ask ReMind to automatically categorize your AI resources 3. **Explore Categories**: Browse organized categories in the Categories Dashboard 4. **Search & Discover**: Use natural language to find specific resources 5. **Stay Updated**: Let ReMind help you track new developments in AI --- ## πŸ”’ Privacy & Security - **Local Processing**: Your bookmarks are processed and stored locally - **Selective Email Access**: Only accesses specified trusted email sources - **No Data Sharing**: Your personal information stays on your device - **Transparent Operations**: All AI operations are visible and explainable --- ## πŸ’‘ Why ReMind? In the fast-moving world of AI and technology, staying informed while managing information overload is challenging. ReMind transforms your passive bookmark collection into an active, intelligent knowledge base that: - **Surfaces forgotten gems** from your browsing history - **Identifies patterns** in your learning journey - **Suggests connections** between different resources - **Keeps you organized** without manual effort - **Learns your interests** and adapts over time --- *"The palest ink is better than the best memory, but the smartest AI makes both ink and memory work together."* **Welcome to ReMind - where your digital past becomes your future advantage.** """ with gr.Blocks() as about_tab: gr.Markdown(about_content) return about_tab def validate_message_history(history): """Validate and return properly formatted message history""" validated = [] for msg in history: if isinstance(msg, dict) and "role" in msg and "content" in msg: # Ensure content is a string if not isinstance(msg["content"], str): msg["content"] = str(msg["content"]) validated.append(msg) else: print(f"Warning: Invalid message format detected: {msg}") return validated def chat_with_agent(message: str, history: List) -> Generator[List, None, None]: """ Chat with the agent using custom streaming functionality for real-time thinking display """ try: # Convert history to proper format if needed if history is None: history = [] # Ensure all history items are properly formatted formatted_history = [] for item in history: if isinstance(item, dict): # Already a dict, check if it has required keys if "role" in item and "content" in item: formatted_history.append(item) else: # Skip malformed dict items print(f"Warning: Skipping malformed history item: {item}") continue elif hasattr(item, "role") and hasattr(item, "content"): # ChatMessage object - convert to dict formatted_history.append({"role": item.role, "content": item.content}) elif isinstance(item, (list, tuple)) and len(item) == 2: # Legacy format: [user_message, assistant_message] or (user, assistant) # Convert to proper message format if isinstance(item[0], str) and isinstance(item[1], str): formatted_history.append({"role": "user", "content": item[0]}) formatted_history.append({"role": "assistant", "content": item[1]}) else: print(f"Warning: Skipping malformed history item: {item}") continue else: # Unknown format, skip it print(f"Warning: Skipping unknown history format: {type(item)} - {item}") continue # Reset memory for long conversations to prevent token overflow reset_memory = len(formatted_history) > 10 # Reset after 5 user-assistant exchanges # Start with user message in history new_history = formatted_history.copy() # Show initial thinking message thinking_message = { "role": "assistant", "content": "🧠 **Agent Planning**\n\nAnalyzing your request and creating execution plan...", } new_history.append(thinking_message) yield validate_message_history(new_history) # Run agent with streaming enabled try: # Use agent.run with stream=True to get step-by-step execution agent_stream = manager_agent.run( message, stream=True, reset=reset_memory, ) step_count = 0 for step in agent_stream: step_count += 1 # Update thinking message with current step info if hasattr(step, "step_number") and hasattr(step, "action"): step_content = "🧠 **Agent Planning & Execution**\n\n" step_content += f"**Step {step.step_number}:**\n" if hasattr(step, "thought") and step.thought: step_content += f"πŸ’­ **Thought:** {step.thought}\n\n" if hasattr(step, "action") and step.action: step_content += f"πŸ› οΈ **Action:** {step.action}\n\n" if hasattr(step, "observations") and step.observations: obs_text = str(step.observations)[:300] if len(str(step.observations)) > 300: obs_text += "..." step_content += f"πŸ‘οΈ **Observation:** {obs_text}\n\n" thinking_message["content"] = step_content new_history[-1] = thinking_message yield validate_message_history(new_history) except Exception as stream_error: # If streaming fails, fall back to regular execution print(f"Streaming failed: {stream_error}, falling back to regular execution") thinking_message["content"] = "🧠 **Agent Working**\n\nProcessing your request using available tools..." new_history[-1] = thinking_message yield validate_message_history(new_history) # Execute without streaming result = manager_agent.run( message, stream=False, reset=reset_memory, ) # Show tool usage if available tool_usage_content = "" if ( hasattr(manager_agent, "memory") and hasattr(manager_agent.memory, "steps") and manager_agent.memory.steps ): try: # Get recent action steps action_steps = [step for step in manager_agent.memory.steps if hasattr(step, "step_number")] recent_steps = action_steps[-3:] if len(action_steps) > 3 else action_steps if recent_steps: tool_details = [] for step in recent_steps: if hasattr(step, "step_number"): step_info = f"**Step {step.step_number}**" if hasattr(step, "duration") and step.duration: step_info += f" ({step.duration:.1f}s)" if hasattr(step, "observations") and step.observations: obs_text = str(step.observations)[:150] if len(str(step.observations)) > 150: obs_text += "..." step_info += f"\nβœ… {obs_text}" if hasattr(step, "error") and step.error: error_text = str(step.error)[:100] if len(str(step.error)) > 100: error_text += "..." step_info += f"\n❌ {error_text}" tool_details.append(step_info) if tool_details: tool_usage_content = "\n\n".join(tool_details) except Exception as e: print(f"Error processing agent steps: {e}") tool_usage_content = "Agent executed actions successfully" # Update thinking to show completion thinking_message["content"] = ( "🧠 **Agent Complete**\n\nβœ… Request processed successfully\nβœ… Response prepared" ) new_history[-1] = thinking_message yield validate_message_history(new_history) # Add tool usage message if there were tools used if tool_usage_content: tool_message = {"role": "assistant", "content": f"πŸ› οΈ **Tools & Actions Used**\n\n{tool_usage_content}"} new_history.append(tool_message) yield validate_message_history(new_history) # Add final response final_response = str(result) if result else "I couldn't process your request." final_message = {"role": "assistant", "content": final_response} new_history.append(final_message) yield validate_message_history(new_history) return # If we get here, streaming worked, so get the final result # The streaming should have shown all the steps, now get final answer thinking_message["content"] = "🧠 **Agent Complete**\n\nβœ… All steps executed\nβœ… Preparing final response" new_history[-1] = thinking_message yield validate_message_history(new_history) # Get the final result from the agent memory final_response = "Task completed successfully!" if hasattr(manager_agent, "memory") and hasattr(manager_agent.memory, "steps") and manager_agent.memory.steps: # Get the last step's observations as the final answer last_step = manager_agent.memory.steps[-1] if hasattr(last_step, "observations") and last_step.observations: final_response = str(last_step.observations) final_message = {"role": "assistant", "content": final_response} new_history.append(final_message) yield validate_message_history(new_history) except Exception as e: # Fallback error handling error_message = { "role": "assistant", "content": f"❌ **System Error:** {str(e)}\n\nPlease try again with a different approach.", } if "new_history" in locals(): new_history.append(error_message) yield validate_message_history(new_history) else: # If new_history wasn't initialized, create a minimal valid history yield validate_message_history([error_message]) # Create the main chat interface chat_interface = gr.ChatInterface( fn=chat_with_agent, type="messages", title="πŸ”– Digital Assistant - Powered by Smolagents", description=""" ## Your Comprehensive AI Assistant! πŸ€– I can help you with: ### πŸ”– **Chrome Bookmarks Management** - Search and filter AI resources bookmarks - Get bookmark statistics and information - Filter bookmarks by domain - Cache and manage Chrome bookmarks data ### 🏷️ **AI News Categorization** - Categorize AI bookmarks into 10 predefined categories - Get categorization statistics and insights - Search bookmarks by specific categories - Manually recategorize bookmarks when needed ### πŸ“§ **Email Management** - Browse recent emails from trusted senders - Search emails by keywords (AI, newsletters, updates, etc.) - Read full email content when you need details ### 🌐 **Web Search** - Perform web searches for current information - Research topics and gather up-to-date data --- **πŸ”’ Security Note:** Email read access is limited to `habib.adoum01@gmail.com` and `news@alphasignal.ai` **πŸ’‘ Watch the agent think in real-time** - You'll see my reasoning process, tool selection, and execution steps in collapsible sections! """, examples=[ "πŸ”– Search my AI bookmarks", "πŸ“§ Show me my latest 5 emails", "πŸ€– Find emails about AI", "🌐 Search for latest AI news", "πŸ’Ž What AI resources do I have?", "πŸ™ Filter bookmarks by GitHub domain", "πŸ“° Search for newsletter emails", "🏷️ Categorize all my AI bookmarks", "πŸ“Š Show me categorization statistics", "πŸ”¬ Get research & breakthrough bookmarks", "πŸš€ Show model releases bookmarks", "πŸ› οΈ Find tools and frameworks bookmarks", ], show_progress="hidden", ) # Create categories and about interfaces categories_interface = create_categories_interface() about_interface = create_about_interface() # Create tabbed interface demo = gr.TabbedInterface( [about_interface, chat_interface, categories_interface], ["ℹ️ About", "πŸ’¬ Chat Assistant", "🏷️ Categories Dashboard"], title="ReMind - Bring your past to mind.", )