File size: 5,062 Bytes
6350667
 
57ac9fb
 
 
 
6350667
83b02d0
fbc8645
 
799053e
9f94d04
8ea41f1
3fb9411
c53d545
6350667
 
f481211
 
6350667
57ac9fb
 
0c163b8
 
 
 
 
 
57ac9fb
6350667
35642cb
26da217
 
 
 
35642cb
059e12c
 
 
278cfbf
059e12c
 
35642cb
 
 
 
 
5f31ada
 
 
 
 
35642cb
 
 
 
 
 
 
 
 
 
 
26da217
 
 
 
 
 
35642cb
26da217
 
 
 
 
 
 
9188dd0
 
 
 
f481211
 
aa4dc59
83b02d0
 
f481211
 
 
 
 
 
 
9f94d04
8ea41f1
3fb9411
c53d545
9f94d04
57ac9fb
 
 
 
 
b19cc5f
 
 
 
 
 
57ac9fb
b19cc5f
 
57ac9fb
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
from __future__ import annotations

import os

import gradio as gr

import health
from layout import CELL_CSS, cell
from problem_cell import render_problem_cell
from solution_cell import render_solution_cell
from setup_cell import render_setup_cell
from context_biased_transcription_cell import render_context_biased_transcription_cell
from media_analysis_cell import render_media_analysis_cell
from translation_cell import render_translation_cell
from wrap_up_cell import render_wrap_up_cell


def render_health_panel(gemini_api_key: str | None = None) -> str:
    return health.render_health_notice(gemini_api_key)


def create_app() -> gr.Blocks:
    """Create the Gradio Blocks application used in the Hugging Face Space.

    The layout is intentionally notebook-like: each conceptual unit
    (problem, health, demos, wrap-up) is encapsulated in its own module
    and rendered as a "cell" to keep the main app glue straightforward.
    """
    with gr.Blocks(title="Aileen3 Demo") as demo:
        gr.HTML(f"<style>{CELL_CSS}</style>")

        with cell("πŸ“˜ Introduction"):
            gr.Markdown(
                """
# Aileen 3 Core
<div style="display: flex; justify-content: center; gap: 10px;">
<a href="https://ndurner.de/links/aileen3-linkedin">
  <img alt="LinkedIn post" src="https://img.shields.io/badge/πŸ”— LinkedIn-Post-blue?logo=linkedin">
</a>
<a href="https://youtu.be/r56najKVS4I">
  <img alt="Demo video" src="https://img.shields.io/badge/YouTube-MCP%20demo%20video-red?logo=youtube">
</a>
</div>

Large Language Models (LLMs) rely on tools – sometimes provided by MCP servers – to interact with the outside world.
Aileen 3 Core is an MCP server focused on **Information Foraging**: mining high-noise sources for novel insights and turning
them into dense briefings you can consume quickly. Grounded in cognitive science, Aileen 3 models novelty as prediction error against
explicit priors such as user expectations, facts from an AI Memory Bank, or media context.

> **"Information is surprises. You learn something when things don’t turn out the way you expected."** βΈΊ Roger Schank

To that end, the Aileen 3 Core MCP server provides media access and analysis services backed by Google Gemini.

This Space shows a small, notebook-style slice of that system so you can verify the core MCP server is reachable and working.

Today, Aileen 3 Core is a contender in the **[MCP's 1st Birthday – Hosted by Anthropic and Gradio](https://huggingface.co/MCP-1st-Birthday)**
hackathon. It also powers the **[Aileen 3 Agent](https://github.com/ndurner/aileen3-agent)** project, a
[capstone project](https://ndurner.de/links/aileen3-kaggle-writeup) for the
[*AI Agents Intensive Course with Google*](https://www.kaggle.com/learn-guide/5-day-agents). Its predecessor, Aileen 2, explored autonomous
personalized summarization of German parliament proceedings and won Honorable Mention in the
[Generative AI Agents Developer Contest by NVIDIA and LangChain](https://www.nvidia.com/en-in/ai-data-science/generative-ai/developer-contest-with-langchain/).

For more details on setup, usage, and background, see the full project README. Aileen 3 Core has been tested with Claude Desktop and Aileen 3 Agent.
                """
            )

        with cell("🧭 How this demo is organized"):
            gr.Markdown(
                """
Think of this interface as a lightweight Jupyter notebook: instead of code cells, you get a sequence of interactive β€œcells” that walk through key aspects of the Aileen 3 Core MCP server.

- The **Introduction** cell gives you the high-level context from the README.
- The **Health check** cell below connects to the MCP server used by the Space and shows whether it can be started successfully.
- Future cells will demonstrate the problem statement and core information-foraging capabilities.
                """
            )

        render_problem_cell()

        render_solution_cell()

        gemini_key_box = render_setup_cell()

        with cell("πŸ‘©πŸ»β€βš•οΈ Health check"):
            health_panel = gr.HTML(value=health.render_placeholder_notice())
            run_button = gr.Button("Run health check", variant="primary")
            run_button.click(
                fn=render_health_panel,
                inputs=gemini_key_box,
                outputs=health_panel,
                queue=False,
            )

        render_context_biased_transcription_cell(gemini_key_box)
        render_media_analysis_cell(gemini_key_box)
        render_translation_cell(gemini_key_box)
        render_wrap_up_cell()

    return demo


def main() -> None:
    app = create_app()
    server_name = os.environ.get("GRADIO_SERVER_NAME") or os.environ.get("HOST") or "0.0.0.0"
    port_value = os.environ.get("GRADIO_SERVER_PORT") or os.environ.get("PORT") or "7860"
    try:
        server_port = int(port_value)
    except ValueError:
        server_port = 7860
    app.launch(
        server_name=server_name,
        server_port=server_port,
    )


if __name__ == "__main__":
    main()