File size: 26,350 Bytes
368277b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
da3a984
 
368277b
9685fdc
 
 
 
 
 
 
 
da3a984
368277b
da3a984
 
368277b
da3a984
368277b
 
 
da3a984
 
368277b
da3a984
 
 
 
 
 
 
 
 
 
 
 
 
368277b
 
da3a984
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368277b
 
 
 
 
da3a984
368277b
da3a984
 
 
 
 
368277b
 
 
 
 
da3a984
 
 
 
 
368277b
 
 
 
 
da3a984
 
 
 
 
368277b
 
 
 
 
da3a984
368277b
da3a984
 
 
 
 
368277b
 
 
da3a984
368277b
da3a984
 
 
 
 
 
 
368277b
 
 
da3a984
 
 
 
 
 
 
 
 
 
 
 
 
 
368277b
 
 
 
b58981e
 
 
 
 
 
 
 
 
 
 
 
 
 
54df027
 
 
 
 
b58981e
 
 
 
 
 
54df027
 
 
 
 
67a3f70
368277b
 
 
 
67a3f70
 
 
 
 
 
 
 
 
 
b58981e
 
 
67a3f70
54df027
 
 
67a3f70
b58981e
 
 
54df027
 
 
 
b58981e
 
54df027
 
 
 
 
 
 
67a3f70
368277b
67a3f70
368277b
 
67a3f70
368277b
da3a984
67a3f70
 
da3a984
67a3f70
368277b
54df027
368277b
 
 
 
 
 
 
 
 
 
 
 
 
 
da3a984
368277b
da3a984
 
368277b
 
b58981e
368277b
 
b58981e
368277b
 
 
 
 
 
 
da3a984
b58981e
 
 
368277b
54df027
368277b
 
 
 
 
b58981e
 
 
 
368277b
54df027
368277b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b58981e
 
 
 
368277b
54df027
368277b
 
 
b58981e
 
 
 
368277b
54df027
368277b
 
 
b58981e
368277b
54df027
368277b
 
 
 
b58981e
 
 
 
368277b
54df027
368277b
 
 
 
 
 
 
 
 
b58981e
368277b
54df027
368277b
 
 
67a3f70
 
 
 
54df027
 
 
 
 
 
368277b
 
 
 
 
 
da3a984
368277b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8940e31
368277b
 
633d1a6
368277b
 
 
 
 
 
 
 
 
 
 
 
633d1a6
 
368277b
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
import gradio as gr
from typing import Generator, List
import json
import os
from pathlib import Path
from src.agents.manager_agent import manager_agent

# Custom streaming implementation for better compatibility


def get_cache_file_path():
    """Returns the path for the bookmark cache file."""
    data_dir = Path("data")
    data_dir.mkdir(exist_ok=True)
    return str(data_dir / "ai_bookmarks_cache.json")


def load_cache():
    """Loads the bookmark cache from JSON file."""
    cache_file = get_cache_file_path()
    if os.path.exists(cache_file):
        try:
            with open(cache_file, "r", encoding="utf-8") as f:
                return json.load(f)
        except Exception as e:
            print(f"Error loading cache: {e}")
    return {"bookmarks": [], "last_updated": None}


def get_categories_data():
    """Get categorized bookmarks data for display."""
    AI_CATEGORIES = {
        "research_breakthroughs": {
            "name": "Research & Breakthroughs",
            "description": "Novel papers, theoretical advances, new architectures, state-of-the-art results.",
            "icon": "🔬",
        },
        "model_releases": {
            "name": "Model Releases & Updates",
            "description": "Launches of new large-language or vision models, version upgrades, open-source checkpoints.",
            "icon": "🚀",
        },
        "tools_frameworks": {
            "name": "Tools, Frameworks & Platforms",
            "description": "SDKs, libraries, cloud services, developer toolkits, hosting/serving solutions.",
            "icon": "🛠️",
        },
        "applications_industry": {
            "name": "Applications & Industry Use Cases",
            "description": "AI in healthcare, finance, manufacturing, marketing, robotics—real-world deployments.",
            "icon": "🏭",
        },
        "regulation_ethics": {
            "name": "Regulation, Ethics & Policy",
            "description": "Government guidelines, ethical debates, bias/fairness studies, compliance news.",
            "icon": "⚖️",
        },
        "investment_funding": {
            "name": "Investment, Funding & M&A",
            "description": "Venture rounds, strategic investments, acquisitions, startup valuations.",
            "icon": "💰",
        },
        "benchmarks_leaderboards": {
            "name": "Benchmarks & Leaderboards",
            "description": "Performance comparisons, academic/industry challenges, leaderboard standings.",
            "icon": "🏆",
        },
        "community_events": {
            "name": "Community, Events & Education",
            "description": "Conferences, workshops, hackathons, courses, tutorials, webinars.",
            "icon": "🎓",
        },
        "security_privacy": {
            "name": "Security, Privacy & Safety",
            "description": "Adversarial attacks, defensive techniques, data-privacy breakthroughs, AI safety research.",
            "icon": "🔒",
        },
        "market_trends": {
            "name": "Market Trends & Analysis",
            "description": "Adoption rates, market forecasts, analyst reports, surveys on AI usage.",
            "icon": "📈",
        },
    }

    cache = load_cache()
    bookmarks = cache.get("bookmarks", [])

    categories_with_content = {}
    category_counts = {}

    # Initialize categories
    for key, data in AI_CATEGORIES.items():
        categories_with_content[key] = {
            "name": data["name"],
            "description": data["description"],
            "icon": data["icon"],
            "bookmarks": [],
        }
        category_counts[key] = 0

    # Categorize bookmarks
    for bookmark in bookmarks:
        category = bookmark.get("category", "uncategorized")
        if category in categories_with_content:
            categories_with_content[category]["bookmarks"].append(bookmark)
            category_counts[category] += 1

    # Sort categories by bookmark count (descending)
    sorted_categories = sorted(categories_with_content.items(), key=lambda x: len(x[1]["bookmarks"]), reverse=True)

    return sorted_categories, category_counts, len(bookmarks)


def create_categories_interface():
    """Create the categories display interface."""

    def refresh_categories():
        """Refresh and display categories data."""
        try:
            sorted_categories, category_counts, total_bookmarks = get_categories_data()

            if total_bookmarks == 0:
                return "## No bookmarks found in cache\n\nPlease use the Chat tab to load and categorize your bookmarks first."

            # Create the display content
            content = "# 🏷️ AI Bookmarks Categories\n\n"
            content += f"**Total Bookmarks:** {total_bookmarks}\n\n"

            # Display top 10 categories
            for i, (category_key, category_data) in enumerate(sorted_categories[:10], 1):
                icon = category_data["icon"]
                name = category_data["name"]
                description = category_data["description"]
                bookmark_count = len(category_data["bookmarks"])

                content += f"## {i}. {icon} {name}\n"
                content += f"**Count:** {bookmark_count} bookmarks\n"
                content += f"**Description:** {description}\n\n"

                # Show first 5 bookmarks for each category
                if bookmark_count > 0:
                    content += "**Recent Bookmarks:**\n"
                    for j, bookmark in enumerate(category_data["bookmarks"][:5], 1):
                        title = bookmark.get("title", "Untitled")
                        url = bookmark.get("url", "")
                        content += f"{j}. [{title}]({url})\n"

                    if bookmark_count > 5:
                        content += f"   ... and {bookmark_count - 5} more\n"
                    content += "\n"
                else:
                    content += "*No bookmarks in this category yet.*\n\n"

                content += "---\n\n"

            return content

        except Exception as e:
            return f"## Error loading categories\n\n{str(e)}"

    with gr.Blocks() as categories_tab:
        gr.Markdown("# 🏷️ AI Bookmark Categories Dashboard")
        gr.Markdown("View your AI bookmarks organized by the main 10 categories.")

        refresh_btn = gr.Button("🔄 Refresh Categories", variant="primary")
        categories_display = gr.Markdown(refresh_categories())

        refresh_btn.click(fn=refresh_categories, outputs=categories_display)

    return categories_tab


def create_about_interface():
    """Create the about page interface."""

    intro_content = """
    # 🧠 About ReMind - AI and Tech News Assistant

    ## 🎥 Project Demo Video

    [![ReMind Demo Video](https://img.youtube.com/vi/CD0j2dGVycs/maxresdefault.jpg)](https://youtu.be/CD0j2dGVycs)

    **Watch the full project demonstration:** [https://youtu.be/CD0j2dGVycs](https://youtu.be/CD0j2dGVycs)

    ---

    ## Bring your AI knowledge to mind.

    **ReMind** is your intelligent digital memory assistant that helps you rediscover, organize, and make sense of your accumulated AI and technology knowledge. In our information-rich world, we often bookmark valuable resources and receive important newsletters only to forget about them later. This system solves this problem by intelligently categorizing and surfacing your digital discoveries when you need them most.
    """

    about_content = """
## 🎯 What ReMind Does

### 🔖 **Smart Bookmark Management**
- Automatically extracts and caches AI bookmarks from your Chrome "AI ressources" folder
- Provides intelligent search and filtering capabilities by domain, keywords, and categories
- Tracks bookmark statistics and usage patterns
- Focuses specifically on AI and technology resources with comprehensive caching system

### 📧 **Newsletter Email Integration**
- Securely accesses emails from trusted AI news sources (news@alphasignal.ai)
- Searches through AI newsletters and updates with intelligent filtering
- Extracts insights from email-based learning resources
- Provides recent email browsing and content reading capabilities

### 🌐 **Real-time Web Search**
- Performs live web searches for the latest AI and tech developments
- Combines cached knowledge with current information
- Supports up to 6-step search processes for comprehensive research
- Delivers real-time results and analysis

### 🏷️ **Intelligent Categorization**
The system automatically organizes your content into **10 key AI categories**:

1. **🔬 Research & Breakthroughs** - Latest papers, theoretical advances, and novel architectures
2. **🚀 Model Releases & Updates** - New AI models, version upgrades, and open-source checkpoints  
3. **🛠️ Tools, Frameworks & Platforms** - Developer SDKs, libraries, cloud services, and deployment solutions
4. **🏭 Applications & Industry Use Cases** - Real-world AI implementations across healthcare, finance, manufacturing
5. **⚖️ Regulation, Ethics & Policy** - AI governance, ethical considerations, and compliance guidelines
6. **💰 Investment, Funding & M&A** - Market movements, startup funding, and acquisition news
7. **🏆 Benchmarks & Leaderboards** - Performance comparisons, academic challenges, and ranking systems
8. **🎓 Community, Events & Education** - Learning resources, conferences, workshops, and tutorials
9. **🔒 Security, Privacy & Safety** - AI safety research, adversarial attacks, and privacy protection
10. **📈 Market Trends & Analysis** - Industry insights, adoption rates, and market forecasts

### 💬 **Multi-Agent Conversational Interface**
- **Manager Agent**: Routes queries to specialized agents based on context
- **Bookmarks Agent**: Handles Chrome bookmark extraction, caching, and management
- **Gmail Agent**: Manages email access, search, and content reading
- **Categorizer Agent**: Provides intelligent classification with manual override capabilities
- **Web Search Agent**: Performs real-time web research and analysis

---

## 🔧 How It Works

**ReMind** is powered by HuggingFace's **Smolagents**. Here is how it works:

- **🤖 Multi-agent orchestration** - Specialized agents work together seamlessly
- **🧠 Real-time reasoning** - Watch AI agents think through problems step-by-step
- **🔄 Dynamic categorization** - Keyword-based classification with manual recategorization
- **🔍 Semantic search** - Find resources across bookmarks, emails, and web results
- **💾 Local caching** - Efficient JSON-based storage for offline access

---

## 🚀 Getting Started

1. **Update Bookmark Cache**: Import your Chrome "AI ressources" bookmarks into the local cache
2. **Categorize Content**: Automatically classify your AI resources into organized categories
3. **Access Email Updates**: Browse recent newsletters and AI news from trusted sources
4. **Search Across Sources**: Use natural language to find specific resources across all data sources
5. **Stay Current**: Combine cached knowledge with real-time web search for latest developments

---

## 🔒 Privacy & Security

- **Local JSON Caching**: Bookmarks processed and stored in local cache files
- **Selective Email Access**: Only accesses specified trusted email sources with OAuth security
- **Controlled Web Access**: Real-time search with responsible usage limits
- **Transparent Operations**: All agent operations are visible and explainable
- **No Data Sharing**: Personal information processed locally with secure authentication

---

## 💡 Why ReMind?

In the fast-moving world of AI and technology, staying informed while managing information overload is challenging. This system transforms your passive bookmark collection and newsletter subscriptions into an active, intelligent knowledge base that:

- **Surfaces forgotten resources** from your Chrome bookmarks
- **Organizes email newsletters** into actionable intelligence  
- **Combines multiple sources** for comprehensive AI knowledge management
- **Provides real-time updates** through web search integration
- **Learns and adapts** through intelligent categorization and recategorization

---

## 🙏 Acknowledgments

Thanks to **Modal Labs**, **Hugging Face**, **Nebius**, **Anthropic**, **OpenAI**, **Hyperbolic Labs**, **MistralAI**, and **Sambanova.AI** for providing credits that power this demo.

---

*"The smartest AI system is one that helps you manage AI knowledge itself."*

**Welcome to ReMind - where your digital AI knowledge becomes your strategic advantage.**
    """

    with gr.Blocks() as about_tab:
        gr.Markdown(intro_content)

        # Add the AI architecture diagram
        gr.Markdown("## 🏗️ System Architecture")
        gr.Image(
            value="data/ai_diagram.png",
            label="ReMind AI System Architecture",
            show_label=True,
            show_download_button=True,
            height=400,
            width=None,
            interactive=False,
        )
        gr.Markdown("*System architecture showing the multi-agent orchestration and data flow in ReMind*")
        gr.Markdown(about_content)
    return about_tab


def sanitize_content(content):
    """Sanitize content to ensure it's a clean string without complex objects"""
    if isinstance(content, str):
        return content
    elif isinstance(content, dict):
        # If content is a dict, convert to string representation
        return str(content)
    elif hasattr(content, "__dict__"):
        # If it's an object with attributes, convert to string
        return str(content)
    else:
        return str(content)


def validate_message_history(history):
    """Validate and return properly formatted message history"""
    validated = []
    for msg in history:
        if isinstance(msg, dict) and "role" in msg and "content" in msg:
            # Ensure content is a string and properly formatted
            content = sanitize_content(msg["content"])

            # Create a clean message dict to avoid any nesting issues
            clean_msg = {"role": str(msg["role"]), "content": content}
            validated.append(clean_msg)
        else:
            print(f"Warning: Invalid message format detected: {msg}")
    return validated


def chat_with_agent(message: str, history: List) -> Generator[List, None, None]:
    """
    Chat with the agent using custom streaming functionality for real-time thinking display
    """
    try:
        # Convert history to proper format if needed
        if history is None:
            history = []

        # Ensure all history items are properly formatted
        formatted_history = []
        for item in history:
            if isinstance(item, dict):
                # Already a dict, check if it has required keys
                if "role" in item and "content" in item:
                    # Ensure content is a simple string
                    content = sanitize_content(item["content"])
                    formatted_history.append({"role": str(item["role"]), "content": content})
                else:
                    # Skip malformed dict items
                    print(f"Warning: Skipping malformed history item: {item}")
                    continue
            elif hasattr(item, "role") and hasattr(item, "content"):
                # ChatMessage object - convert to dict with string content
                content = sanitize_content(item.content)
                formatted_history.append({"role": str(item.role), "content": content})
            elif isinstance(item, (list, tuple)) and len(item) == 2:
                # Legacy format: [user_message, assistant_message] or (user, assistant)
                # Convert to proper message format
                if isinstance(item[0], str) and isinstance(item[1], str):
                    formatted_history.append({"role": "user", "content": str(item[0])})
                    formatted_history.append({"role": "assistant", "content": str(item[1])})
                else:
                    print(f"Warning: Skipping malformed history item: {item}")
                    continue
            else:
                # Unknown format, skip it
                print(f"Warning: Skipping unknown history format: {type(item)} - {item}")
                continue

        # Reset memory for long conversations to prevent token overflow
        reset_memory = len(formatted_history) > 10  # Reset after 5 user-assistant exchanges

        # Start with user message in history
        new_history = formatted_history.copy()

        # Show initial thinking message with spinner
        thinking_message = {
            "role": "assistant",
            "content": "🔄 **Agent Planning** ⏳\n\n💫 Analyzing your request and creating execution plan...\n\n*Please wait while I process your request...*",
        }
        new_history.append(thinking_message)
        yield validate_message_history(new_history)

        # Run agent with streaming enabled
        try:
            # Use agent.run with stream=True to get step-by-step execution
            agent_stream = manager_agent.run(
                message,
                stream=True,
                reset=reset_memory,
            )

            step_count = 0
            for step in agent_stream:
                step_count += 1

                # Update thinking message with current step info and spinner
                if hasattr(step, "step_number") and hasattr(step, "action"):
                    step_content = "⚡ **Agent Working** 🔄\n\n"
                    step_content += f"🔍 **Step {step.step_number}:** *In Progress...*\n\n"

                    if hasattr(step, "thought") and step.thought:
                        step_content += f"💭 **Thought:** {str(step.thought)}\n\n"

                    if hasattr(step, "action") and step.action:
                        step_content += f"🛠️ **Action:** {str(step.action)}\n\n"

                    if hasattr(step, "observations") and step.observations:
                        obs_text = str(step.observations)[:300]
                        if len(str(step.observations)) > 300:
                            obs_text += "..."
                        step_content += f"👁️ **Observation:** {obs_text}\n\n"

                    step_content += "⏳ *Processing next step...*"

                    # Ensure the content is a clean string
                    thinking_message = {"role": "assistant", "content": str(step_content)}
                    new_history[-1] = thinking_message
                    yield validate_message_history(new_history)

        except Exception as stream_error:
            # If streaming fails, fall back to regular execution
            print(f"Streaming failed: {stream_error}, falling back to regular execution")

            thinking_message = {
                "role": "assistant",
                "content": "⚡ **Agent Working** 🔄\n\n💫 Processing your request using available tools...\n\n⏳ *Please wait...*",
            }
            new_history[-1] = thinking_message
            yield validate_message_history(new_history)

            # Execute without streaming
            result = manager_agent.run(
                message,
                stream=False,
                reset=reset_memory,
            )

            # Show tool usage if available
            tool_usage_content = ""
            if (
                hasattr(manager_agent, "memory")
                and hasattr(manager_agent.memory, "steps")
                and manager_agent.memory.steps
            ):
                try:
                    # Get recent action steps
                    action_steps = [step for step in manager_agent.memory.steps if hasattr(step, "step_number")]
                    recent_steps = action_steps[-3:] if len(action_steps) > 3 else action_steps

                    if recent_steps:
                        tool_details = []
                        for step in recent_steps:
                            if hasattr(step, "step_number"):
                                step_info = f"**Step {step.step_number}**"

                                if hasattr(step, "duration") and step.duration:
                                    step_info += f" ({step.duration:.1f}s)"

                                if hasattr(step, "observations") and step.observations:
                                    obs_text = str(step.observations)[:150]
                                    if len(str(step.observations)) > 150:
                                        obs_text += "..."
                                    step_info += f"\n✅ {obs_text}"

                                if hasattr(step, "error") and step.error:
                                    error_text = str(step.error)[:100]
                                    if len(str(step.error)) > 100:
                                        error_text += "..."
                                    step_info += f"\n❌ {error_text}"

                                tool_details.append(step_info)

                        if tool_details:
                            tool_usage_content = "\n\n".join(tool_details)

                except Exception as e:
                    print(f"Error processing agent steps: {e}")
                    tool_usage_content = "Agent executed actions successfully"

            # Update thinking to show completion
            thinking_message = {
                "role": "assistant",
                "content": "✅ **Agent Complete** 🎉\n\n✅ Request processed successfully\n✅ Response prepared",
            }
            new_history[-1] = thinking_message
            yield validate_message_history(new_history)

            # Add tool usage message if there were tools used
            if tool_usage_content:
                tool_message = {
                    "role": "assistant",
                    "content": f"🛠️ **Tools & Actions Used**\n\n{str(tool_usage_content)}",
                }
                new_history.append(tool_message)
                yield validate_message_history(new_history)

            # Add final response
            final_response = str(result) if result else "I couldn't process your request."
            final_message = {"role": "assistant", "content": str(final_response)}
            new_history.append(final_message)
            yield validate_message_history(new_history)
            return

        # If we get here, streaming worked, so get the final result
        # The streaming should have shown all the steps, now get final answer
        thinking_message = {
            "role": "assistant",
            "content": "✅ **Agent Complete** 🎉\n\n✅ All steps executed\n✅ Preparing final response",
        }
        new_history[-1] = thinking_message
        yield validate_message_history(new_history)

        # Get the final result from the agent memory
        final_response = "Task completed successfully!"
        if hasattr(manager_agent, "memory") and hasattr(manager_agent.memory, "steps") and manager_agent.memory.steps:
            # Get the last step's observations as the final answer
            last_step = manager_agent.memory.steps[-1]
            if hasattr(last_step, "observations") and last_step.observations:
                final_response = str(last_step.observations)

        final_message = {"role": "assistant", "content": str(final_response)}
        new_history.append(final_message)
        yield validate_message_history(new_history)

    except Exception as e:
        # Fallback error handling
        error_message = {
            "role": "assistant",
            "content": f"❌ **System Error:** {str(e)}\n\nPlease try again with a different approach.",
        }
        if "new_history" in locals():
            new_history.append(error_message)
            yield validate_message_history(new_history)
        else:
            # If new_history wasn't initialized, create a minimal valid history
            yield validate_message_history([error_message])


# Create the main chat interface
chat_interface = gr.ChatInterface(
    fn=chat_with_agent,
    type="messages",
    title="🔖 AI and Tech News Assistant - Powered by Smolagents",
    description="""
    ## Your Comprehensive AI Assistant! 🤖
    
    I can help you with:
    
    ### 🔖 **Chrome Bookmarks Management**
    - Search and filter AI resources bookmarks
    - Get bookmark statistics and information  
    - Filter bookmarks by domain
    - Cache and manage Chrome bookmarks data
    
    ### 🏷️ **AI News Categorization**
    - Categorize AI bookmarks into 10 predefined categories
    - Get categorization statistics and insights
    - Search bookmarks by specific categories
    - Manually recategorize bookmarks when needed
    
    ### 📧 **Email Management** 
    - Browse recent emails from trusted senders
    - Search emails by keywords (AI, newsletters, updates, etc.)
    - Read full email content when you need details
    
    ### 🌐 **Web Search**
    - Perform web searches for current information
    - Research topics and gather up-to-date data
    
    ---
    **⏱️ Processing Time Note:** Depending on the type of query, processing can take several seconds or minutes to complete.
    """,
    examples=[
        "📧 Show me my latest 5 emails",
        "🔖 Search my AI bookmarks",
        "🤖 Find emails about AI",
        "🌐 Search for latest AI news",
        "💎 What AI resources do I have?",
        "🐙 Filter bookmarks by GitHub domain",
        "📰 Search for newsletter emails",
        "🏷️ Categorize all my AI bookmarks",
        "📊 Show me categorization statistics",
        "🔬 Get research & breakthrough bookmarks",
        "🚀 Show model releases bookmarks",
        "🛠️ Find tools and frameworks bookmarks",
    ],
    show_progress="hidden",
    cache_examples=False,
)

# Create categories and about interfaces
categories_interface = create_categories_interface()
about_interface = create_about_interface()

# Create tabbed interface
demo = gr.TabbedInterface(
    [about_interface, chat_interface, categories_interface],
    ["ℹ️ About", "💬 Chat Assistant", "🏷️ Categories Dashboard"],
    title="ReMind - Bring your past to mind.",
)