File size: 5,232 Bytes
e706de2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
/**
* Exercise 12: Composition and Pipelines
*
* Goal: Learn to compose LLM with other Runnables
*
* In this exercise, you'll:
* 1. Create helper Runnables to work with LLM
* 2. Build a pipeline by chaining operations
* 3. Create a reusable agent pipeline
* 4. See the power of composition
*
* This is where the Runnable pattern really shines!
*/
import {HumanMessage, SystemMessage, LlamaCppLLM, Runnable} from '../../../../src/index.js';
// TODO: Part 1 - Create a PromptFormatter Runnable
// This should:
// - Take a string input (user question)
// - Return an array of messages with a system prompt and the user question
// - System prompt: "You are a helpful assistant. Be concise."
class PromptFormatter extends Runnable {
async _call(input, config) {
// Your code here
return null;
}
}
// TODO: Part 2 - Create a ResponseParser Runnable
// This should:
// - Take an AIMessage input
// - Extract and return just the content string
// - Trim whitespace
class ResponseParser extends Runnable {
async _call(input, config) {
// Your code here
return null;
}
}
// TODO: Part 3 - Create an AnswerValidator Runnable
// This should:
// - Take a string input (the parsed response)
// - Check if it's longer than 10 characters
// - If too short, return "Error: Response too short"
// - Otherwise return the original response
class AnswerValidator extends Runnable {
async _call(input, config) {
// Your code here
return null;
}
}
async function exercise4() {
console.log('=== Exercise 4: Composition and Pipelines ===\n');
const llm = new LlamaCppLLM({
modelPath: './models/Meta-Llama-3.1-8B-Instruct-Q5_K_S.gguf',
temperature: 0.7,
maxTokens: 100
});
try {
// Part 1: Test individual components
console.log('Part 1: Testing individual components');
const formatter = new PromptFormatter();
const parser = new ResponseParser();
const validator = new AnswerValidator();
// TODO: Test the formatter
console.log('Testing formatter:');
const formatted = null; // await formatter.invoke("What is AI?")
console.log(formatted);
console.log();
// TODO: Test with LLM + parser
console.log('Testing LLM + parser:');
const llmResponse = null; // await llm.invoke(formatted)
const parsed = null; // await parser.invoke(llmResponse)
console.log('Parsed:', parsed);
console.log();
// TODO: Test validator with short input
console.log('Testing validator with short input:');
const shortResult = null; // await validator.invoke("Hi")
console.log(shortResult);
console.log();
// Part 2: Build a complete pipeline
console.log('Part 2: Complete pipeline');
// TODO: Chain all components together
// formatter -> llm -> parser -> validator
const pipeline = null; // Your code here
// TODO: Test the pipeline
const result1 = null; // await pipeline.invoke("What is machine learning?")
console.log('Result:', result1);
console.log();
// Part 3: Reusable agent pipeline
console.log('Part 3: Reusable agent pipeline');
// TODO: Create different pipelines for different tasks
// Creative pipeline: high temperature, no validator
const creativePipeline = null; // Your code here
// Factual pipeline: low temperature, with validator
const factualPipeline = null; // Your code here
// TODO: Test both pipelines
console.log('Creative (temp=0.9):');
const creative = null; // await creativePipeline.invoke("Describe a sunset")
console.log(creative);
console.log();
console.log('Factual (temp=0.1):');
const factual = null; // await factualPipeline.invoke("What is the capital of France?")
console.log(factual);
console.log();
// Part 4: Batch processing with pipelines
console.log('Part 4: Batch processing with pipeline');
// TODO: Use the pipeline with batch()
const questions = [
"What is Python?",
"What is JavaScript?",
"What is Rust?"
];
const answers = null; // await pipeline.batch(questions)
// TODO: Print results
// questions.forEach((q, i) => ...)
} finally {
await llm.dispose();
}
console.log('\n✓ Exercise 4 complete!');
}
// Run the exercise
exercise4().catch(console.error);
/**
* Expected Output:
* - Part 1: Each component works independently
* - Part 2: Full pipeline processes input -> output
* - Part 3: Different pipelines for different tasks
* - Part 4: Pipeline works with batch processing
*
* Learning Points:
* 1. Runnables are composable building blocks
* 2. .pipe() chains operations together
* 3. Pipelines are themselves Runnables
* 4. Easy to create specialized pipelines
* 5. Composition makes testing and reuse easy
*/ |