Spaces:
Running
Running
Commit ·
85842db
1
Parent(s): d893ab4
feat: Rename to AI Engineer Masterclass + add 3 new modules from AI Engineering Guidebook 2025 (Context Engineering, Agentic Patterns, Agent Protocol Landscape)
Browse files- GenAI-AgenticAI/app.js +376 -8
- GenAI-AgenticAI/index.html +3 -3
- index.html +2 -2
GenAI-AgenticAI/app.js
CHANGED
|
@@ -1,20 +1,24 @@
|
|
| 1 |
-
//
|
| 2 |
const modules = [
|
| 3 |
-
{ id: 'llm-fundamentals', icon: '🧠', title: 'LLM Fundamentals', desc: 'Tokenization, attention, pre-training,
|
| 4 |
-
{ id: 'transformers', icon: '⚡', title: 'Transformer Architecture', desc: 'Self-attention math, multi-head attention, positional encoding', category: 'Foundation', catClass: 'cat-foundation' },
|
| 5 |
{ id: 'huggingface', icon: '🤗', title: 'Hugging Face Ecosystem', desc: 'Transformers library, Model Hub, Datasets, Spaces, PEFT', category: 'Core Tools', catClass: 'cat-core' },
|
| 6 |
-
{ id: 'finetuning', icon: '🎯', title: 'Fine-Tuning & PEFT', desc: 'LoRA, QLoRA,
|
| 7 |
-
{ id: 'rag', icon: '🔍', title: 'RAG Pipelines', desc: 'Chunking, embedding models, vector search, re-ranking', category: 'Core', catClass: 'cat-core' },
|
| 8 |
{ id: 'vectordb', icon: '🗄️', title: 'Vector Databases', desc: 'FAISS, Pinecone, ChromaDB, HNSW, IVF algorithms', category: 'Core', catClass: 'cat-core' },
|
| 9 |
-
{ id: '
|
| 10 |
-
{ id: '
|
| 11 |
-
{ id: '
|
|
|
|
|
|
|
|
|
|
| 12 |
{ id: 'evaluation', icon: '📊', title: 'Evaluation & Benchmarks', desc: 'LLM-as-a-judge, RAGAS, BLEU/ROUGE, human eval', category: 'Production', catClass: 'cat-production' },
|
| 13 |
{ id: 'guardrails', icon: '🛡️', title: 'Guardrails & Safety', desc: 'Hallucination detection, content filtering, red-teaming', category: 'Production', catClass: 'cat-production' },
|
| 14 |
{ id: 'deployment', icon: '🚀', title: 'Deployment & Serving', desc: 'vLLM, TGI, Ollama, quantization (GPTQ/AWQ/GGUF)', category: 'Production', catClass: 'cat-production' },
|
| 15 |
{ id: 'production', icon: '⚙️', title: 'Production Patterns', desc: 'Caching, streaming, rate limiting, cost optimization', category: 'Production', catClass: 'cat-production' }
|
| 16 |
];
|
| 17 |
|
|
|
|
| 18 |
const MODULE_CONTENT = {
|
| 19 |
'llm-fundamentals': {
|
| 20 |
concepts: `
|
|
@@ -1850,6 +1854,370 @@ router = litellm.Router(
|
|
| 1850 |
</div>`
|
| 1851 |
}
|
| 1852 |
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1853 |
// ─── Dashboard Render ───────────────────────────────────────────────────────
|
| 1854 |
function renderDashboard() {
|
| 1855 |
const grid = document.getElementById('modulesGrid');
|
|
|
|
| 1 |
+
// AI Engineer Masterclass — Module Data (based on AI Engineering Guidebook 2025)
|
| 2 |
const modules = [
|
| 3 |
+
{ id: 'llm-fundamentals', icon: '🧠', title: 'LLM Fundamentals', desc: 'Tokenization, attention, pre-training, 7 generation parameters, text generation strategies', category: 'Foundation', catClass: 'cat-foundation' },
|
| 4 |
+
{ id: 'transformers', icon: '⚡', title: 'Transformer Architecture', desc: 'Self-attention math, multi-head attention, positional encoding, MoE vs dense', category: 'Foundation', catClass: 'cat-foundation' },
|
| 5 |
{ id: 'huggingface', icon: '🤗', title: 'Hugging Face Ecosystem', desc: 'Transformers library, Model Hub, Datasets, Spaces, PEFT', category: 'Core Tools', catClass: 'cat-core' },
|
| 6 |
+
{ id: 'finetuning', icon: '🎯', title: 'Fine-Tuning & PEFT', desc: 'LoRA, QLoRA, SFT vs RFT, GRPO reasoning LLMs, IFT dataset generation', category: 'Core', catClass: 'cat-core' },
|
| 7 |
+
{ id: 'rag', icon: '🔍', title: 'RAG Pipelines', desc: 'Chunking, embedding models, vector search, re-ranking, HyDE, REFRAG, CAG, Agentic RAG', category: 'Core', catClass: 'cat-core' },
|
| 8 |
{ id: 'vectordb', icon: '🗄️', title: 'Vector Databases', desc: 'FAISS, Pinecone, ChromaDB, HNSW, IVF algorithms', category: 'Core', catClass: 'cat-core' },
|
| 9 |
+
{ id: 'context-engineering', icon: '🧩', title: 'Context Engineering', desc: 'What to put in context, 6 context types for agents, manual RAG vs agentic context', category: 'Core', catClass: 'cat-core' },
|
| 10 |
+
{ id: 'agents', icon: '🤖', title: 'AI Agents & Frameworks', desc: 'ReAct, LangChain, LangGraph, CrewAI, AutoGen, 5 levels of agentic AI, memory types', category: 'Agentic', catClass: 'cat-agent' },
|
| 11 |
+
{ id: 'agentic-patterns', icon: '🔮', title: 'Agentic Design Patterns', desc: '5 design patterns, ReAct from scratch, 4 layers of agentic AI, 30 must-know terms', category: 'Agentic', catClass: 'cat-agent' },
|
| 12 |
+
{ id: 'multiagent', icon: '🕸️', title: 'Multi-Agent Systems', desc: '7 patterns, orchestration, supervisor, peer-to-peer, A2A & AG-UI protocols', category: 'Agentic', catClass: 'cat-agent' },
|
| 13 |
+
{ id: 'agent-protocols', icon: '📡', title: 'Agent Protocol Landscape', desc: 'MCP, A2A, AG-UI, Agent Protocol spec, comparison and when to use each', category: 'Agentic', catClass: 'cat-agent' },
|
| 14 |
+
{ id: 'tools', icon: '🔧', title: 'Function Calling & Tools', desc: 'OpenAI function calling, tool schemas, MCP protocol, JSON prompting', category: 'Agentic', catClass: 'cat-agent' },
|
| 15 |
{ id: 'evaluation', icon: '📊', title: 'Evaluation & Benchmarks', desc: 'LLM-as-a-judge, RAGAS, BLEU/ROUGE, human eval', category: 'Production', catClass: 'cat-production' },
|
| 16 |
{ id: 'guardrails', icon: '🛡️', title: 'Guardrails & Safety', desc: 'Hallucination detection, content filtering, red-teaming', category: 'Production', catClass: 'cat-production' },
|
| 17 |
{ id: 'deployment', icon: '🚀', title: 'Deployment & Serving', desc: 'vLLM, TGI, Ollama, quantization (GPTQ/AWQ/GGUF)', category: 'Production', catClass: 'cat-production' },
|
| 18 |
{ id: 'production', icon: '⚙️', title: 'Production Patterns', desc: 'Caching, streaming, rate limiting, cost optimization', category: 'Production', catClass: 'cat-production' }
|
| 19 |
];
|
| 20 |
|
| 21 |
+
|
| 22 |
const MODULE_CONTENT = {
|
| 23 |
'llm-fundamentals': {
|
| 24 |
concepts: `
|
|
|
|
| 1854 |
</div>`
|
| 1855 |
}
|
| 1856 |
});
|
| 1857 |
+
|
| 1858 |
+
// ─── NEW MODULES FROM AI ENGINEERING GUIDEBOOK 2025 ────────────────────────
|
| 1859 |
+
|
| 1860 |
+
MODULE_CONTENT['context-engineering'] = {
|
| 1861 |
+
concepts: `
|
| 1862 |
+
<div class="section">
|
| 1863 |
+
<h2>🧩 Context Engineering</h2>
|
| 1864 |
+
<div class="info-box">
|
| 1865 |
+
<div class="box-title">What is Context Engineering?</div>
|
| 1866 |
+
<div class="box-content">Context Engineering is the discipline of <strong>deciding what information to include in an LLM's context window</strong> at inference time. It's the evolution beyond simple prompt engineering — instead of crafting the right words, you're architecting the right information flow into the model.</div>
|
| 1867 |
+
</div>
|
| 1868 |
+
|
| 1869 |
+
<h3>Why Context Engineering Matters</h3>
|
| 1870 |
+
<p>LLMs have no persistent memory between calls. Every token in the context window costs compute and money. The quality of an LLM's output is directly bounded by the quality of its context. <strong>Garbage in = garbage out</strong> — no matter how capable the model.</p>
|
| 1871 |
+
|
| 1872 |
+
<div class="callout insight">
|
| 1873 |
+
<div class="callout-title">📖 Book Insight (AI Engineering Guidebook 2025)</div>
|
| 1874 |
+
Context Engineering is described as "the art and science of filling the context window with exactly the right information — not too much, not too little — to enable the model to perform the task."
|
| 1875 |
+
</div>
|
| 1876 |
+
|
| 1877 |
+
<h3>6 Types of Contexts for AI Agents</h3>
|
| 1878 |
+
<table>
|
| 1879 |
+
<tr><th>Context Type</th><th>What It Contains</th><th>Example</th></tr>
|
| 1880 |
+
<tr><td><strong>Instructions</strong></td><td>System prompt — role, rules, output format</td><td>"You are a helpful assistant. Always respond in JSON."</td></tr>
|
| 1881 |
+
<tr><td><strong>Memory</strong></td><td>Long-term facts about the user or world</td><td>User's name, preferences, past decisions</td></tr>
|
| 1882 |
+
<tr><td><strong>History</strong></td><td>Recent conversation turns</td><td>Last 10 messages in the chat</td></tr>
|
| 1883 |
+
<tr><td><strong>Retrieved Information</strong></td><td>Documents pulled from RAG / search</td><td>Top-3 relevant docs from vector DB</td></tr>
|
| 1884 |
+
<tr><td><strong>Tool Results</strong></td><td>Outputs from previous tool calls</td><td>API response from a weather service</td></tr>
|
| 1885 |
+
<tr><td><strong>Background Knowledge</strong></td><td>Domain facts injected at system level</td><td>Company product catalog, legal constraints</td></tr>
|
| 1886 |
+
</table>
|
| 1887 |
+
|
| 1888 |
+
<h3>Manual RAG Pipeline vs Agentic Context Engineering</h3>
|
| 1889 |
+
<div class="comparison">
|
| 1890 |
+
<div class="comparison-bad">
|
| 1891 |
+
<strong>❌ Manual RAG</strong><br><br>
|
| 1892 |
+
Static retrieval — always pull top-k docs.<br>
|
| 1893 |
+
Fixed chunking strategy.<br>
|
| 1894 |
+
No awareness of conversation state.<br>
|
| 1895 |
+
Context filled mechanically.
|
| 1896 |
+
</div>
|
| 1897 |
+
<div class="comparison-good">
|
| 1898 |
+
<strong>✅ Agentic Context Engineering</strong><br><br>
|
| 1899 |
+
Dynamic — agent decides <em>what</em> to retrieve.<br>
|
| 1900 |
+
Adaptive chunking based on query type.<br>
|
| 1901 |
+
Tracks conversation history intelligently.<br>
|
| 1902 |
+
Context assembled based on task needs.
|
| 1903 |
+
</div>
|
| 1904 |
+
</div>
|
| 1905 |
+
|
| 1906 |
+
<h3>Context Window Management</h3>
|
| 1907 |
+
<p>As context grows, three problems arise: <strong>(1) Lost in the middle</strong> — LLMs attend most to the start and end of context, ignoring the middle. <strong>(2) Cost</strong> — every token costs money. <strong>(3) Latency</strong> — processing longer context takes more time. Solutions: summarization of old history, selective retrieval, context compression with smaller models.</p>
|
| 1908 |
+
|
| 1909 |
+
<h3>Context Engineering for Claude</h3>
|
| 1910 |
+
<p>Claude's unique skill is <strong>long-context recall</strong> (200K tokens). Claude's "skills" pattern separates tool use from context assembly. The model reads structured XML context blocks and applies different attention patterns per block type. Claude recommends placing the most important context <strong>last</strong> in the prompt for best recall.</p>
|
| 1911 |
+
</div>`,
|
| 1912 |
+
code: `
|
| 1913 |
+
<div class="section">
|
| 1914 |
+
<h2>💻 Context Engineering — Code Examples</h2>
|
| 1915 |
+
<h3>Dynamic Context Assembly Pattern</h3>
|
| 1916 |
+
<div class="code-block"><span class="keyword">from</span> openai <span class="keyword">import</span> OpenAI
|
| 1917 |
+
<span class="keyword">from</span> dataclasses <span class="keyword">import</span> dataclass
|
| 1918 |
+
<span class="keyword">from</span> typing <span class="keyword">import</span> List, Dict
|
| 1919 |
+
|
| 1920 |
+
<span class="keyword">@dataclass</span>
|
| 1921 |
+
<span class="keyword">class</span> <span class="function">ContextBlock</span>:
|
| 1922 |
+
type: str <span class="comment"># 'instruction', 'memory', 'history', 'retrieved', 'tool_result'</span>
|
| 1923 |
+
content: str
|
| 1924 |
+
priority: int <span class="comment"># Higher = kept if context limit hit</span>
|
| 1925 |
+
token_count: int = 0
|
| 1926 |
+
|
| 1927 |
+
<span class="keyword">class</span> <span class="function">ContextEngineer</span>:
|
| 1928 |
+
<span class="keyword">def</span> <span class="function">__init__</span>(self, max_tokens: int = <span class="number">8000</span>):
|
| 1929 |
+
self.max_tokens = max_tokens
|
| 1930 |
+
self.blocks: List[ContextBlock] = []
|
| 1931 |
+
|
| 1932 |
+
<span class="keyword">def</span> <span class="function">add_block</span>(self, block: ContextBlock):
|
| 1933 |
+
self.blocks.append(block)
|
| 1934 |
+
|
| 1935 |
+
<span class="keyword">def</span> <span class="function">build_context</span>(self) -> List[Dict]:
|
| 1936 |
+
<span class="comment"># Sort by priority, trim to fit token budget</span>
|
| 1937 |
+
sorted_blocks = sorted(self.blocks, key=<span class="keyword">lambda</span> b: -b.priority)
|
| 1938 |
+
used_tokens = <span class="number">0</span>
|
| 1939 |
+
selected = []
|
| 1940 |
+
<span class="keyword">for</span> block <span class="keyword">in</span> sorted_blocks:
|
| 1941 |
+
<span class="keyword">if</span> used_tokens + block.token_count <= self.max_tokens:
|
| 1942 |
+
selected.append(block)
|
| 1943 |
+
used_tokens += block.token_count
|
| 1944 |
+
|
| 1945 |
+
<span class="comment"># Re-order: instructions first, history last</span>
|
| 1946 |
+
instructions = [b <span class="keyword">for</span> b <span class="keyword">in</span> selected <span class="keyword">if</span> b.type == <span class="string">'instruction'</span>]
|
| 1947 |
+
memory = [b <span class="keyword">for</span> b <span class="keyword">in</span> selected <span class="keyword">if</span> b.type == <span class="string">'memory'</span>]
|
| 1948 |
+
retrieved = [b <span class="keyword">for</span> b <span class="keyword">in</span> selected <span class="keyword">if</span> b.type == <span class="string">'retrieved'</span>]
|
| 1949 |
+
tool_res = [b <span class="keyword">for</span> b <span class="keyword">in</span> selected <span class="keyword">if</span> b.type == <span class="string">'tool_result'</span>]
|
| 1950 |
+
history = [b <span class="keyword">for</span> b <span class="keyword">in</span> selected <span class="keyword">if</span> b.type == <span class="string">'history'</span>]
|
| 1951 |
+
|
| 1952 |
+
ordered = instructions + memory + retrieved + tool_res + history
|
| 1953 |
+
messages = []
|
| 1954 |
+
<span class="keyword">for</span> b <span class="keyword">in</span> ordered:
|
| 1955 |
+
role = <span class="string">'system'</span> <span class="keyword">if</span> b.type == <span class="string">'instruction'</span> <span class="keyword">else</span> <span class="string">'user'</span>
|
| 1956 |
+
messages.append({<span class="string">'role'</span>: role, <span class="string">'content'</span>: b.content})
|
| 1957 |
+
<span class="keyword">return</span> messages
|
| 1958 |
+
|
| 1959 |
+
<span class="comment"># Usage</span>
|
| 1960 |
+
ctx = ContextEngineer(max_tokens=<span class="number">6000</span>)
|
| 1961 |
+
ctx.add_block(ContextBlock(<span class="string">'instruction'</span>, <span class="string">'You are a helpful AI.'</span>, priority=<span class="number">100</span>, token_count=<span class="number">10</span>))
|
| 1962 |
+
ctx.add_block(ContextBlock(<span class="string">'retrieved'</span>, retrieved_docs, priority=<span class="number">80</span>, token_count=<span class="number">500</span>))
|
| 1963 |
+
ctx.add_block(ContextBlock(<span class="string">'history'</span>, chat_history, priority=<span class="number">60</span>, token_count=<span class="number">200</span>))
|
| 1964 |
+
messages = ctx.build_context()
|
| 1965 |
+
</div>
|
| 1966 |
+
</div>`,
|
| 1967 |
+
interview: `
|
| 1968 |
+
<div class="section">
|
| 1969 |
+
<h2>🎯 Context Engineering — Interview Questions</h2>
|
| 1970 |
+
<div class="interview-box"><strong>Q1: What is context engineering and how does it differ from prompt engineering?</strong><p><strong>Answer:</strong> Prompt engineering focuses on crafting the right instructions/wording. Context engineering is broader — it's about deciding what information (memories, retrieved docs, tool results, history) to put into the context window, how to prioritize it when space is limited, and in what order. It's the systems-level discipline of managing an LLM's information diet at runtime.</p></div>
|
| 1971 |
+
<div class="interview-box"><strong>Q2: What is the "lost in the middle" problem?</strong><p><strong>Answer:</strong> LLMs tend to attend most strongly to content at the very beginning and very end of their context window, while information in the middle gets relatively less attention. This means placing critical information in the middle of a long context leads to degraded performance. Mitigation: put the most important facts last, or use retrieval to place relevant chunks near the query.</p></div>
|
| 1972 |
+
<div class="interview-box"><strong>Q3: Name the 6 types of context for AI agents.</strong><p><strong>Answer:</strong> (1) Instructions — system prompt with role and rules. (2) Memory — long-term facts about the user/world. (3) History — recent conversation. (4) Retrieved Information — documents from RAG. (5) Tool Results — outputs from function calls. (6) Background Knowledge — domain-specific injected facts. Each plays a different role in enabling the agent to reason correctly.</p></div>
|
| 1973 |
+
<div class="interview-box"><strong>Q4: How do you handle context window limits in production agents?</strong><p><strong>Answer:</strong> (1) Summarize older history instead of including raw messages. (2) Use a context prioritization system — rank blocks by importance and trim lowest-priority first. (3) Compress retrieved documents with a small "compressor LLM" before adding to context. (4) Use structured memory stores that selectively load relevant facts, not everything. (5) Use streaming to process very long documents in chunks.</p></div>
|
| 1974 |
+
</div>`
|
| 1975 |
+
};
|
| 1976 |
+
|
| 1977 |
+
MODULE_CONTENT['agentic-patterns'] = {
|
| 1978 |
+
concepts: `
|
| 1979 |
+
<div class="section">
|
| 1980 |
+
<h2>🔮 Agentic Design Patterns</h2>
|
| 1981 |
+
<div class="info-box">
|
| 1982 |
+
<div class="box-title">What are Agentic Design Patterns?</div>
|
| 1983 |
+
<div class="box-content">Agentic design patterns are <strong>reusable architectural blueprints</strong> for building reliable AI agents. Just as software design patterns (Strategy, Observer, etc.) solved recurring OOP problems, agentic patterns solve recurring agent problems like reliability, delegation, and parallelism.</div>
|
| 1984 |
+
</div>
|
| 1985 |
+
|
| 1986 |
+
<h3>5 Core Agentic AI Design Patterns</h3>
|
| 1987 |
+
<table>
|
| 1988 |
+
<tr><th>#</th><th>Pattern</th><th>Description</th><th>When to Use</th></tr>
|
| 1989 |
+
<tr><td>1</td><td><strong>Reflection</strong></td><td>Agent critiques its own output, then revises it</td><td>When accuracy matters more than speed</td></tr>
|
| 1990 |
+
<tr><td>2</td><td><strong>Tool Use</strong></td><td>Agent calls external tools (search, code, APIs)</td><td>When LLM alone can't solve the task</td></tr>
|
| 1991 |
+
<tr><td>3</td><td><strong>Planning</strong></td><td>Agent generates a multi-step plan before acting</td><td>Complex tasks with many sub-steps</td></tr>
|
| 1992 |
+
<tr><td>4</td><td><strong>Multi-Agent</strong></td><td>Multiple specialized agents collaborate via messages</td><td>Tasks requiring different expertise</td></tr>
|
| 1993 |
+
<tr><td>5</td><td><strong>Memory</strong></td><td>Agent stores and retrieves past interactions</td><td>Long-running or personalized applications</td></tr>
|
| 1994 |
+
</table>
|
| 1995 |
+
|
| 1996 |
+
<h3>5 Levels of Agentic AI Systems</h3>
|
| 1997 |
+
<table>
|
| 1998 |
+
<tr><th>Level</th><th>Name</th><th>Capability</th></tr>
|
| 1999 |
+
<tr><td>0</td><td><strong>No AI</strong></td><td>Purely rule-based or human-driven</td></tr>
|
| 2000 |
+
<tr><td>1</td><td><strong>AI-Assisted</strong></td><td>LLM suggests, human decides and executes</td></tr>
|
| 2001 |
+
<tr><td>2</td><td><strong>AI Co-pilot</strong></td><td>LLM acts, but human approves each action</td></tr>
|
| 2002 |
+
<tr><td>3</td><td><strong>AI Agent</strong></td><td>Autonomous execution within defined scope</td></tr>
|
| 2003 |
+
<tr><td>4</td><td><strong>Agentic AI</strong></td><td>Multi-step autonomous operation, self-correcting</td></tr>
|
| 2004 |
+
<tr><td>5</td><td><strong>AI Workforce</strong></td><td>Multiple agents, full autonomy over long horizons</td></tr>
|
| 2005 |
+
</table>
|
| 2006 |
+
|
| 2007 |
+
<h3>4 Layers of Agentic AI Architecture</h3>
|
| 2008 |
+
<p><strong>Layer 1 — Perception:</strong> Receiving inputs (text, images, tool results, sensor data). <strong>Layer 2 — Reasoning:</strong> The LLM processing context and deciding on actions. <strong>Layer 3 — Action:</strong> Executing tool calls, writing code, sending messages. <strong>Layer 4 — Memory:</strong> Storing results, updating knowledge, retrieving past context. This layered view clarifies where failures occur.</p>
|
| 2009 |
+
|
| 2010 |
+
<h3>ReAct Pattern — The Foundation of Modern Agents</h3>
|
| 2011 |
+
<p>ReAct (Reasoning + Acting) is the dominant agentic loop: <strong>Thought → Action → Observation</strong>. The model writes a thought explaining its reasoning, selects an action (tool call), observes the result, then loops until done. This interleaving of reasoning and acting proved more reliable than pure chain-of-thought or pure action-only approaches.</p>
|
| 2012 |
+
<div class="formula">Loop: [Thought] → [Action: tool(args)] → [Observation: result] → [Thought] → ... → [Final Answer]</div>
|
| 2013 |
+
|
| 2014 |
+
<h3>30 Must-Know Agentic AI Terms</h3>
|
| 2015 |
+
<p>Key vocabulary every AI Engineer must know: <strong>Tool</strong> (callable function), <strong>Handoff</strong> (passing control between agents), <strong>Orchestrator</strong> (agent that delegates), <strong>Sub-agent</strong> (executes delegated tasks), <strong>Scratchpad</strong> (agent's working memory mid-task), <strong>Guardrails</strong> (safety constraints), <strong>Grounding</strong> (connecting outputs to real-world facts), <strong>Hallucination</strong> (false confident output), <strong>Context window</strong> (max tokens the model sees), <strong>Token budget</strong> (allocated tokens for a task), <strong>System prompt</strong> (persistent instructions), <strong>Function schema</strong> (JSON spec for tools), <strong>Streaming</strong> (incremental output), <strong>Interrupts</strong> (human-in-the-loop breakpoints)...</p>
|
| 2016 |
+
</div>`,
|
| 2017 |
+
code: `
|
| 2018 |
+
<div class="section">
|
| 2019 |
+
<h2>💻 Agentic Design Patterns — Code Examples</h2>
|
| 2020 |
+
<h3>Reflection Pattern</h3>
|
| 2021 |
+
<div class="code-block"><span class="keyword">from</span> openai <span class="keyword">import</span> OpenAI
|
| 2022 |
+
client = OpenAI()
|
| 2023 |
+
|
| 2024 |
+
<span class="keyword">def</span> <span class="function">agent_with_reflection</span>(task: str, max_rounds: int = <span class="number">3</span>) -> str:
|
| 2025 |
+
<span class="comment"># Step 1: Generate initial response</span>
|
| 2026 |
+
response = client.chat.completions.create(
|
| 2027 |
+
model=<span class="string">"gpt-4o"</span>,
|
| 2028 |
+
messages=[{<span class="string">"role"</span>: <span class="string">"user"</span>, <span class="string">"content"</span>: task}]
|
| 2029 |
+
).choices[<span class="number">0</span>].message.content
|
| 2030 |
+
|
| 2031 |
+
<span class="keyword">for</span> _ <span class="keyword">in</span> range(max_rounds):
|
| 2032 |
+
<span class="comment"># Step 2: Self-critique</span>
|
| 2033 |
+
critique = client.chat.completions.create(
|
| 2034 |
+
model=<span class="string">"gpt-4o"</span>,
|
| 2035 |
+
messages=[
|
| 2036 |
+
{<span class="string">"role"</span>: <span class="string">"user"</span>, <span class="string">"content"</span>: task},
|
| 2037 |
+
{<span class="string">"role"</span>: <span class="string">"assistant"</span>, <span class="string">"content"</span>: response},
|
| 2038 |
+
{<span class="string">"role"</span>: <span class="string">"user"</span>, <span class="string">"content"</span>: <span class="string">"Review your answer. What's wrong or missing? Be specific."</span>}
|
| 2039 |
+
]
|
| 2040 |
+
).choices[<span class="number">0</span>].message.content
|
| 2041 |
+
|
| 2042 |
+
<span class="keyword">if</span> <span class="string">"looks correct"</span> <span class="keyword">in</span> critique.lower() <span class="keyword">or</span> <span class="string">"no issues"</span> <span class="keyword">in</span> critique.lower():
|
| 2043 |
+
<span class="keyword">break</span>
|
| 2044 |
+
|
| 2045 |
+
<span class="comment"># Step 3: Revise</span>
|
| 2046 |
+
response = client.chat.completions.create(
|
| 2047 |
+
model=<span class="string">"gpt-4o"</span>,
|
| 2048 |
+
messages=[
|
| 2049 |
+
{<span class="string">"role"</span>: <span class="string">"user"</span>, <span class="string">"content"</span>: task},
|
| 2050 |
+
{<span class="string">"role"</span>: <span class="string">"assistant"</span>, <span class="string">"content"</span>: response},
|
| 2051 |
+
{<span class="string">"role"</span>: <span class="string">"user"</span>, <span class="string">"content"</span>: f<span class="string">"Fix these issues: {critique}"</span>}
|
| 2052 |
+
]
|
| 2053 |
+
).choices[<span class="number">0</span>].message.content
|
| 2054 |
+
|
| 2055 |
+
<span class="keyword">return</span> response
|
| 2056 |
+
</div>
|
| 2057 |
+
<h3>ReAct Pattern from Scratch</h3>
|
| 2058 |
+
<div class="code-block"><span class="keyword">import</span> json, re
|
| 2059 |
+
<span class="keyword">from</span> openai <span class="keyword">import</span> OpenAI
|
| 2060 |
+
|
| 2061 |
+
client = OpenAI()
|
| 2062 |
+
|
| 2063 |
+
TOOLS = {
|
| 2064 |
+
<span class="string">"search"</span>: <span class="keyword">lambda</span> q: f<span class="string">"[Search result for '{q}': Paris is the capital of France]"</span>,
|
| 2065 |
+
<span class="string">"calculate"</span>: <span class="keyword">lambda</span> expr: str(eval(expr))
|
| 2066 |
+
}
|
| 2067 |
+
|
| 2068 |
+
SYSTEM = <span class="string">"""You are a ReAct agent. At each step output ONLY:
|
| 2069 |
+
Thought: your reasoning
|
| 2070 |
+
Action: {"tool": "search"|"calculate", "input": "..."}
|
| 2071 |
+
Or when done:
|
| 2072 |
+
Thought: I have the answer
|
| 2073 |
+
Final Answer: <your answer>"""</span>
|
| 2074 |
+
|
| 2075 |
+
<span class="keyword">def</span> <span class="function">react_agent</span>(question: str, max_steps: int = <span class="number">5</span>) -> str:
|
| 2076 |
+
messages = [
|
| 2077 |
+
{<span class="string">"role"</span>: <span class="string">"system"</span>, <span class="string">"content"</span>: SYSTEM},
|
| 2078 |
+
{<span class="string">"role"</span>: <span class="string">"user"</span>, <span class="string">"content"</span>: question}
|
| 2079 |
+
]
|
| 2080 |
+
<span class="keyword">for</span> step <span class="keyword">in</span> range(max_steps):
|
| 2081 |
+
response = client.chat.completions.create(
|
| 2082 |
+
model=<span class="string">"gpt-4o"</span>, messages=messages
|
| 2083 |
+
).choices[<span class="number">0</span>].message.content
|
| 2084 |
+
messages.append({<span class="string">"role"</span>: <span class="string">"assistant"</span>, <span class="string">"content"</span>: response})
|
| 2085 |
+
|
| 2086 |
+
<span class="keyword">if</span> <span class="string">"Final Answer:"</span> <span class="keyword">in</span> response:
|
| 2087 |
+
<span class="keyword">return</span> response.split(<span class="string">"Final Answer:"</span>)[<span class="number">1</span>].strip()
|
| 2088 |
+
|
| 2089 |
+
action_match = re.search(r<span class="string">'Action:\s*(\{.*?\})'</span>, response, re.DOTALL)
|
| 2090 |
+
<span class="keyword">if</span> action_match:
|
| 2091 |
+
action = json.loads(action_match.group(<span class="number">1</span>))
|
| 2092 |
+
observation = TOOLS[action[<span class="string">"tool"</span>]](action[<span class="string">"input"</span>])
|
| 2093 |
+
messages.append({<span class="string">"role"</span>: <span class="string">"user"</span>, <span class="string">"content"</span>: f<span class="string">"Observation: {observation}"</span>})
|
| 2094 |
+
<span class="keyword">return</span> <span class="string">"Max steps reached"</span>
|
| 2095 |
+
|
| 2096 |
+
print(react_agent(<span class="string">"What is the capital of France and what is 15 * 23?"</span>))
|
| 2097 |
+
</div>
|
| 2098 |
+
</div>`,
|
| 2099 |
+
interview: `
|
| 2100 |
+
<div class="section">
|
| 2101 |
+
<h2>🎯 Agentic Design Patterns — Interview Questions</h2>
|
| 2102 |
+
<div class="interview-box"><strong>Q1: What are the 5 agentic AI design patterns?</strong><p><strong>Answer:</strong> (1) Reflection — agent critiques and revises its own output. (2) Tool Use — agent calls external functions/APIs. (3) Planning — agent creates a multi-step plan before execution. (4) Multi-Agent — multiple agents collaborate, each specializing in different sub-tasks. (5) Memory — agent persists and retrieves information across interactions. These patterns are often combined: e.g., a planning agent that uses tools and reflects on results.</p></div>
|
| 2103 |
+
<div class="interview-box"><strong>Q2: Explain the ReAct loop.</strong><p><strong>Answer:</strong> ReAct (Reasoning + Acting) interleaves thinking with tool use: (1) Thought — model reasons about what to do next. (2) Action — model calls a tool with specific arguments. (3) Observation — tool result is appended to context. This loop repeats until the model outputs "Final Answer." The key insight is that grounding reasoning in real tool observations prevents hallucination chains and enables multi-step problem solving.</p></div>
|
| 2104 |
+
<div class="interview-box"><strong>Q3: What are the 5 levels of agentic AI systems?</strong><p><strong>Answer:</strong> Level 0: No AI. Level 1: AI-Assisted (suggests, human acts). Level 2: Co-pilot (AI acts, human approves). Level 3: AI Agent (autonomous in defined scope). Level 4: Agentic AI (multi-step autonomous, self-correcting). Level 5: AI Workforce (swarm of agents, long-horizon autonomy). Most production systems today are Level 2-3; Level 4-5 is emerging.</p></div>
|
| 2105 |
+
</div>`
|
| 2106 |
+
};
|
| 2107 |
+
|
| 2108 |
+
MODULE_CONTENT['agent-protocols'] = {
|
| 2109 |
+
concepts: `
|
| 2110 |
+
<div class="section">
|
| 2111 |
+
<h2>📡 Agent Protocol Landscape</h2>
|
| 2112 |
+
<div class="info-box">
|
| 2113 |
+
<div class="box-title">Why Do Agent Protocols Matter?</div>
|
| 2114 |
+
<div class="box-content">As AI agents move from single-LLM apps to interconnected systems, they need <strong>standardized communication protocols</strong>. Just like HTTP standardized web communication, agent protocols standardize how agents discover capabilities, delegate tasks, share context, and return results — enabling interoperability across vendors and frameworks.</div>
|
| 2115 |
+
</div>
|
| 2116 |
+
|
| 2117 |
+
<h3>The 4 Key Protocols You Must Know</h3>
|
| 2118 |
+
<table>
|
| 2119 |
+
<tr><th>Protocol</th><th>Created By</th><th>Purpose</th><th>Layer</th></tr>
|
| 2120 |
+
<tr><td><strong>MCP</strong> (Model Context Protocol)</td><td>Anthropic</td><td>Standardize how LLMs connect to tools & data sources</td><td>Tool/Data Layer</td></tr>
|
| 2121 |
+
<tr><td><strong>A2A</strong> (Agent-to-Agent)</td><td>Google</td><td>Standardize how agents discover and delegate to other agents</td><td>Agent Communication Layer</td></tr>
|
| 2122 |
+
<tr><td><strong>AG-UI</strong> (Agent-User Interaction)</td><td>CopilotKit</td><td>Standardize how agents communicate state to frontends</td><td>Presentation Layer</td></tr>
|
| 2123 |
+
<tr><td><strong>Agent Protocol</strong></td><td>AI Engineer Foundation</td><td>OpenAPI spec for agent task management</td><td>Task Management Layer</td></tr>
|
| 2124 |
+
</table>
|
| 2125 |
+
|
| 2126 |
+
<h3>MCP — Model Context Protocol (Deep Dive)</h3>
|
| 2127 |
+
<p>MCP solves the N×M integration problem. Before MCP: each LLM app needed custom integrations to each tool/database. With MCP: each tool builds one MCP Server; each LLM app builds one MCP Client. They interoperate automatically.</p>
|
| 2128 |
+
<table>
|
| 2129 |
+
<tr><th>MCP Component</th><th>Role</th><th>Analogy</th></tr>
|
| 2130 |
+
<tr><td><strong>MCP Host</strong></td><td>App running the LLM (Claude Desktop, Cursor, your app)</td><td>Web Browser</td></tr>
|
| 2131 |
+
<tr><td><strong>MCP Client</strong></td><td>Protocol client inside the host</td><td>HTTP Client</td></tr>
|
| 2132 |
+
<tr><td><strong>MCP Server</strong></td><td>Exposes tools, resources, prompts</td><td>Web Server</td></tr>
|
| 2133 |
+
</table>
|
| 2134 |
+
<p>MCP Servers expose 3 primitives: <strong>Tools</strong> (executable functions), <strong>Resources</strong> (read-only data), <strong>Prompts</strong> (reusable templates). Transport: stdio (local) or HTTP+SSE (remote).</p>
|
| 2135 |
+
|
| 2136 |
+
<h3>A2A — Agent-to-Agent Protocol (Google)</h3>
|
| 2137 |
+
<p>A2A allows agents built by different vendors to collaborate. An agent publishes an <strong>Agent Card</strong> (JSON) describing its capabilities. Other agents discover it and delegate tasks using standardized request/response schemas. Key concepts: <strong>Task</strong> (unit of work), <strong>Artifact</strong> (output produced), <strong>Part</strong> (typed content: text, file, data). A2A is transport-agnostic (HTTP, gRPC, etc.).</p>
|
| 2138 |
+
|
| 2139 |
+
<h3>AG-UI — Agent-User Interaction Protocol</h3>
|
| 2140 |
+
<p>AG-UI standardizes the real-time event stream between agents and frontend UIs. Instead of building custom WebSocket logic for every agent app, AG-UI defines: <strong>streaming text events</strong>, <strong>tool call events</strong>, <strong>state sync events</strong>, and <strong>lifecycle events</strong> (run_start, run_end). This means any AG-UI-compatible agent can power any AG-UI-compatible frontend.</p>
|
| 2141 |
+
|
| 2142 |
+
<h3>When to Use Which Protocol?</h3>
|
| 2143 |
+
<div class="comparison">
|
| 2144 |
+
<div class="comparison-good">
|
| 2145 |
+
<strong>Use MCP when:</strong><br>
|
| 2146 |
+
Connecting LLM to external tools, files, APIs, or databases. Building reusable tool servers that work with multiple LLM apps.
|
| 2147 |
+
</div>
|
| 2148 |
+
<div class="comparison-good">
|
| 2149 |
+
<strong>Use A2A when:</strong><br>
|
| 2150 |
+
Orchestrating multiple specialized agents from different teams/vendors. Delegating sub-tasks from one agent to another.
|
| 2151 |
+
</div>
|
| 2152 |
+
</div>
|
| 2153 |
+
</div>`,
|
| 2154 |
+
code: `
|
| 2155 |
+
<div class="section">
|
| 2156 |
+
<h2>💻 Agent Protocols — Code Examples</h2>
|
| 2157 |
+
<h3>Building a Simple MCP Server (Python SDK)</h3>
|
| 2158 |
+
<div class="code-block"><span class="preprocessor"># pip install mcp</span>
|
| 2159 |
+
<span class="keyword">from</span> mcp.server.fastmcp <span class="keyword">import</span> FastMCP
|
| 2160 |
+
<span class="keyword">import</span> httpx
|
| 2161 |
+
|
| 2162 |
+
<span class="comment"># Create an MCP server</span>
|
| 2163 |
+
mcp = FastMCP(<span class="string">"Weather Server"</span>)
|
| 2164 |
+
|
| 2165 |
+
<span class="keyword">@mcp.tool()</span>
|
| 2166 |
+
<span class="keyword">async def</span> <span class="function">get_weather</span>(city: str) -> str:
|
| 2167 |
+
<span class="string">"""Get current weather for a city."""</span>
|
| 2168 |
+
<span class="keyword">async with</span> httpx.AsyncClient() <span class="keyword">as</span> client:
|
| 2169 |
+
resp = <span class="keyword">await</span> client.get(
|
| 2170 |
+
f<span class="string">"https://wttr.in/{city}?format=3"</span>
|
| 2171 |
+
)
|
| 2172 |
+
<span class="keyword">return</span> resp.text
|
| 2173 |
+
|
| 2174 |
+
<span class="keyword">@mcp.resource(</span><span class="string">"weather://cities"</span><span class="keyword">)</span>
|
| 2175 |
+
<span class="keyword">def</span> <span class="function">list_cities</span>() -> str:
|
| 2176 |
+
<span class="string">"""List of supported cities."""</span>
|
| 2177 |
+
<span class="keyword">return</span> <span class="string">"London, Paris, Tokyo, New York, Sydney"</span>
|
| 2178 |
+
|
| 2179 |
+
<span class="keyword">@mcp.prompt()</span>
|
| 2180 |
+
<span class="keyword">def</span> <span class="function">weather_report_prompt</span>(city: str) -> str:
|
| 2181 |
+
<span class="string">"""Generate a professional weather report."""</span>
|
| 2182 |
+
<span class="keyword">return</span> f<span class="string">"Write a professional weather report for {city} in 3 sentences."</span>
|
| 2183 |
+
|
| 2184 |
+
<span class="comment"># Run the server (stdio transport for local use)</span>
|
| 2185 |
+
<span class="keyword">if</span> __name__ == <span class="string">"__main__"</span>:
|
| 2186 |
+
mcp.run() <span class="comment"># Connects with Claude Desktop, Cursor, etc.</span>
|
| 2187 |
+
</div>
|
| 2188 |
+
<h3>A2A Agent Card Example</h3>
|
| 2189 |
+
<div class="code-block"><span class="comment"># Agent Card: JSON descriptor other agents discover</span>
|
| 2190 |
+
agent_card = {
|
| 2191 |
+
<span class="string">"name"</span>: <span class="string">"CodeReviewAgent"</span>,
|
| 2192 |
+
<span class="string">"description"</span>: <span class="string">"Reviews Python code for bugs, style, and security"</span>,
|
| 2193 |
+
<span class="string">"version"</span>: <span class="string">"1.0"</span>,
|
| 2194 |
+
<span class="string">"capabilities"</span>: {
|
| 2195 |
+
<span class="string">"streaming"</span>: <span class="keyword">True</span>,
|
| 2196 |
+
<span class="string">"pushNotifications"</span>: <span class="keyword">False</span>,
|
| 2197 |
+
},
|
| 2198 |
+
<span class="string">"skills"</span>: [
|
| 2199 |
+
{
|
| 2200 |
+
<span class="string">"id"</span>: <span class="string">"review_code"</span>,
|
| 2201 |
+
<span class="string">"name"</span>: <span class="string">"Review Code"</span>,
|
| 2202 |
+
<span class="string">"description"</span>: <span class="string">"Review Python code and return issues"</span>,
|
| 2203 |
+
<span class="string">"inputModes"</span>: [<span class="string">"text"</span>],
|
| 2204 |
+
<span class="string">"outputModes"</span>: [<span class="string">"text"</span>]
|
| 2205 |
+
}
|
| 2206 |
+
],
|
| 2207 |
+
<span class="string">"url"</span>: <span class="string">"https://myagent.example.com/a2a"</span>
|
| 2208 |
+
}
|
| 2209 |
+
</div>
|
| 2210 |
+
</div>`,
|
| 2211 |
+
interview: `
|
| 2212 |
+
<div class="section">
|
| 2213 |
+
<h2>🎯 Agent Protocol Landscape — Interview Questions</h2>
|
| 2214 |
+
<div class="interview-box"><strong>Q1: What problem does MCP solve?</strong><p><strong>Answer:</strong> MCP solves the N×M integration problem. Before MCP, if you had N LLM applications and M tools/data sources, you needed N×M custom integrations. With MCP, each tool builds one MCP Server and each app builds one MCP Client — reducing to N+M integrations. It standardizes what tools expose (functions, data, templates) and how LLMs request them.</p></div>
|
| 2215 |
+
<div class="interview-box"><strong>Q2: How does A2A differ from MCP?</strong><p><strong>Answer:</strong> MCP is about an LLM connecting to tools and data sources — it's a vertical protocol (LLM → tools). A2A is about agents communicating with other agents — it's a horizontal protocol (agent ↔ agent). You'd use MCP to give your agent access to a database, and A2A to have your orchestrator agent delegate a coding task to a specialized code-writing agent.</p></div>
|
| 2216 |
+
<div class="interview-box"><strong>Q3: What is AG-UI and why does it matter?</strong><p><strong>Answer:</strong> AG-UI (Agent-User Interaction protocol) standardizes the streaming event protocol between AI agents and their frontend UIs. Without AG-UI, every agent app needs custom WebSocket or SSE logic for showing streaming responses, tool call status, and agent state in the UI. AG-UI defines a standard event schema so any compliant agent can power any compliant frontend — enabling a plugin-like ecosystem of agent UIs.</p></div>
|
| 2217 |
+
<div class="interview-box"><strong>Q4: What are the 3 primitives exposed by an MCP Server?</strong><p><strong>Answer:</strong> (1) <strong>Tools</strong> — executable functions the LLM can call (like "search_web", "run_sql"). (2) <strong>Resources</strong> — read-only data the LLM can reference for context (like database schemas, file contents). (3) <strong>Prompts</strong> — reusable prompt templates the host app can offer to users. Each primitive serves a different role in the LLM's workflow.</p></div>
|
| 2218 |
+
</div>`
|
| 2219 |
+
};
|
| 2220 |
+
|
| 2221 |
// ─── Dashboard Render ───────────────────────────────────────────────────────
|
| 2222 |
function renderDashboard() {
|
| 2223 |
const grid = document.getElementById('modulesGrid');
|
GenAI-AgenticAI/index.html
CHANGED
|
@@ -3,7 +3,7 @@
|
|
| 3 |
<head>
|
| 4 |
<meta charset="UTF-8">
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
-
<title>
|
| 7 |
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 8 |
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap" rel="stylesheet">
|
| 9 |
<style>
|
|
@@ -350,8 +350,8 @@
|
|
| 350 |
|
| 351 |
<div id="dashboard" class="active">
|
| 352 |
<div class="hero">
|
| 353 |
-
<h1>🤖
|
| 354 |
-
<p>From LLM fundamentals to production-ready agentic systems —
|
| 355 |
<div class="hero-stats">
|
| 356 |
<div class="hero-stat"><div class="num" id="statModules">13</div><div class="lbl">Modules</div></div>
|
| 357 |
<div class="hero-stat"><div class="num">3</div><div class="lbl">Tabs / Module</div></div>
|
|
|
|
| 3 |
<head>
|
| 4 |
<meta charset="UTF-8">
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>AI Engineer Masterclass | DataScience</title>
|
| 7 |
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 8 |
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap" rel="stylesheet">
|
| 9 |
<style>
|
|
|
|
| 350 |
|
| 351 |
<div id="dashboard" class="active">
|
| 352 |
<div class="hero">
|
| 353 |
+
<h1>🤖 AI Engineer Masterclass</h1>
|
| 354 |
+
<p>From LLM fundamentals to production-ready agentic systems — context engineering, RAG, agents, MCP, A2A, AG-UI and more. Based on the AI Engineering Guidebook 2025.</p>
|
| 355 |
<div class="hero-stats">
|
| 356 |
<div class="hero-stat"><div class="num" id="statModules">13</div><div class="lbl">Modules</div></div>
|
| 357 |
<div class="hero-stat"><div class="num">3</div><div class="lbl">Tabs / Module</div></div>
|
index.html
CHANGED
|
@@ -240,8 +240,8 @@
|
|
| 240 |
<a href="GenAI-AgenticAI/index.html" class="card genai-card">
|
| 241 |
<div>
|
| 242 |
<span class="badge">🤖 New</span>
|
| 243 |
-
<h2>
|
| 244 |
-
<p>LLMs, RAG, Vector DBs, AI Agents, LangChain, MCP, A2A
|
| 245 |
</div>
|
| 246 |
<div class="card-footer">
|
| 247 |
<span style="color: #58a6ff">Explore GenAI</span>
|
|
|
|
| 240 |
<a href="GenAI-AgenticAI/index.html" class="card genai-card">
|
| 241 |
<div>
|
| 242 |
<span class="badge">🤖 New</span>
|
| 243 |
+
<h2>AI Engineer Masterclass</h2>
|
| 244 |
+
<p>LLMs, Context Engineering, RAG, Vector DBs, AI Agents, LangChain, MCP, A2A, AG-UI and Production deployment. Based on the AI Engineering Guidebook 2025.</p>
|
| 245 |
</div>
|
| 246 |
<div class="card-footer">
|
| 247 |
<span style="color: #58a6ff">Explore GenAI</span>
|