repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/langchain_adapter_test.go | memory/langchain_adapter_test.go | package memory
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tmc/langchaingo/llms"
langchainmemory "github.com/tmc/langchaingo/memory"
)
func TestLangChainMemory_ConversationBuffer(t *testing.T) {
ctx := context.Background()
// Create a conversation buffer memory with return messages enabled
mem := NewConversationBufferMemory(
langchainmemory.WithReturnMessages(true),
)
// Test SaveContext
err := mem.SaveContext(ctx, map[string]any{
"input": "Hello, my name is Alice",
}, map[string]any{
"output": "Hi Alice! Nice to meet you.",
})
require.NoError(t, err)
err = mem.SaveContext(ctx, map[string]any{
"input": "What's my name?",
}, map[string]any{
"output": "Your name is Alice.",
})
require.NoError(t, err)
// Test LoadMemoryVariables
memVars, err := mem.LoadMemoryVariables(ctx, map[string]any{})
require.NoError(t, err)
assert.Contains(t, memVars, "history")
// Test GetMessages
messages, err := mem.GetMessages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 4) // 2 user messages + 2 AI messages
// Verify message content
assert.Equal(t, llms.ChatMessageTypeHuman, messages[0].GetType())
assert.Equal(t, "Hello, my name is Alice", messages[0].GetContent())
assert.Equal(t, llms.ChatMessageTypeAI, messages[1].GetType())
assert.Equal(t, "Hi Alice! Nice to meet you.", messages[1].GetContent())
// Test Clear
err = mem.Clear(ctx)
require.NoError(t, err)
messages, err = mem.GetMessages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 0)
}
func TestLangChainMemory_ConversationWindowBuffer(t *testing.T) {
ctx := context.Background()
// Create a conversation window buffer that keeps only the last 2 turns (4 messages)
mem := NewConversationWindowBufferMemory(2,
langchainmemory.WithReturnMessages(true),
)
// Add 3 conversation turns
err := mem.SaveContext(ctx, map[string]any{
"input": "First message",
}, map[string]any{
"output": "First response",
})
require.NoError(t, err)
err = mem.SaveContext(ctx, map[string]any{
"input": "Second message",
}, map[string]any{
"output": "Second response",
})
require.NoError(t, err)
err = mem.SaveContext(ctx, map[string]any{
"input": "Third message",
}, map[string]any{
"output": "Third response",
})
require.NoError(t, err)
// Should only keep the last 2 turns (4 messages)
messages, err := mem.GetMessages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 4)
// Verify it kept the last 2 turns
assert.Equal(t, "Second message", messages[0].GetContent())
assert.Equal(t, "Third response", messages[3].GetContent())
}
func TestChatMessageHistory(t *testing.T) {
ctx := context.Background()
// Create a new chat message history
history := NewChatMessageHistory()
// Test AddUserMessage
err := history.AddUserMessage(ctx, "Hello!")
require.NoError(t, err)
// Test AddAIMessage
err = history.AddAIMessage(ctx, "Hi there!")
require.NoError(t, err)
// Test AddMessage with custom message
err = history.AddMessage(ctx, llms.SystemChatMessage{
Content: "You are a helpful assistant.",
})
require.NoError(t, err)
// Test Messages
messages, err := history.Messages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 3)
assert.Equal(t, llms.ChatMessageTypeHuman, messages[0].GetType())
assert.Equal(t, "Hello!", messages[0].GetContent())
assert.Equal(t, llms.ChatMessageTypeAI, messages[1].GetType())
assert.Equal(t, "Hi there!", messages[1].GetContent())
assert.Equal(t, llms.ChatMessageTypeSystem, messages[2].GetType())
assert.Equal(t, "You are a helpful assistant.", messages[2].GetContent())
// Test Clear
err = history.Clear(ctx)
require.NoError(t, err)
messages, err = history.Messages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 0)
}
func TestChatMessageHistory_WithPreviousMessages(t *testing.T) {
ctx := context.Background()
// Create history with previous messages
previousMessages := []llms.ChatMessage{
llms.HumanChatMessage{Content: "Previous message 1"},
llms.AIChatMessage{Content: "Previous response 1"},
}
history := NewChatMessageHistory(
langchainmemory.WithPreviousMessages(previousMessages),
)
// Verify previous messages are loaded
messages, err := history.Messages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 2)
assert.Equal(t, "Previous message 1", messages[0].GetContent())
assert.Equal(t, "Previous response 1", messages[1].GetContent())
// Add new message
err = history.AddUserMessage(ctx, "New message")
require.NoError(t, err)
messages, err = history.Messages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 3)
}
func TestLangChainMemory_CustomKeys(t *testing.T) {
ctx := context.Background()
// Create memory with custom input/output keys
mem := NewConversationBufferMemory(
langchainmemory.WithInputKey("user_input"),
langchainmemory.WithOutputKey("ai_output"),
langchainmemory.WithMemoryKey("chat_history"),
langchainmemory.WithReturnMessages(true),
)
// Save context with custom keys
err := mem.SaveContext(ctx, map[string]any{
"user_input": "What's the weather?",
}, map[string]any{
"ai_output": "It's sunny today!",
})
require.NoError(t, err)
// Load memory variables
memVars, err := mem.LoadMemoryVariables(ctx, map[string]any{})
require.NoError(t, err)
assert.Contains(t, memVars, "chat_history")
// Verify messages
messages, err := mem.GetMessages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 2)
assert.Equal(t, "What's the weather?", messages[0].GetContent())
assert.Equal(t, "It's sunny today!", messages[1].GetContent())
}
func TestLangChainMemory_WithChatHistory(t *testing.T) {
ctx := context.Background()
// Create a custom chat history
chatHistory := NewChatMessageHistory()
err := chatHistory.AddUserMessage(ctx, "Initial message")
require.NoError(t, err)
// Create memory with the custom chat history
mem := NewConversationBufferMemory(
langchainmemory.WithChatHistory(chatHistory.GetHistory()),
langchainmemory.WithReturnMessages(true),
)
// Verify initial message is present
messages, err := mem.GetMessages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 1)
assert.Equal(t, "Initial message", messages[0].GetContent())
// Add more messages
err = mem.SaveContext(ctx, map[string]any{
"input": "Follow-up message",
}, map[string]any{
"output": "Follow-up response",
})
require.NoError(t, err)
messages, err = mem.GetMessages(ctx)
require.NoError(t, err)
assert.Len(t, messages, 3)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/memory_test.go | memory/memory_test.go | package memory
import (
"context"
"testing"
)
func TestSequentialMemory(t *testing.T) {
ctx := context.Background()
mem := NewSequentialMemory()
// Add messages
msg1 := NewMessage("user", "Hello")
msg2 := NewMessage("assistant", "Hi there!")
msg3 := NewMessage("user", "How are you?")
if err := mem.AddMessage(ctx, msg1); err != nil {
t.Fatalf("Failed to add message: %v", err)
}
if err := mem.AddMessage(ctx, msg2); err != nil {
t.Fatalf("Failed to add message: %v", err)
}
if err := mem.AddMessage(ctx, msg3); err != nil {
t.Fatalf("Failed to add message: %v", err)
}
// Get context
messages, err := mem.GetContext(ctx, "")
if err != nil {
t.Fatalf("Failed to get context: %v", err)
}
if len(messages) != 3 {
t.Errorf("Expected 3 messages, got %d", len(messages))
}
// Check stats
stats, err := mem.GetStats(ctx)
if err != nil {
t.Fatalf("Failed to get stats: %v", err)
}
if stats.TotalMessages != 3 {
t.Errorf("Expected 3 total messages, got %d", stats.TotalMessages)
}
// Clear
if err := mem.Clear(ctx); err != nil {
t.Fatalf("Failed to clear: %v", err)
}
messages, _ = mem.GetContext(ctx, "")
if len(messages) != 0 {
t.Errorf("Expected 0 messages after clear, got %d", len(messages))
}
}
func TestSlidingWindowMemory(t *testing.T) {
ctx := context.Background()
mem := NewSlidingWindowMemory(2) // Window size of 2
// Add 3 messages
msg1 := NewMessage("user", "Message 1")
msg2 := NewMessage("user", "Message 2")
msg3 := NewMessage("user", "Message 3")
mem.AddMessage(ctx, msg1)
mem.AddMessage(ctx, msg2)
mem.AddMessage(ctx, msg3)
// Should only keep last 2
messages, err := mem.GetContext(ctx, "")
if err != nil {
t.Fatalf("Failed to get context: %v", err)
}
if len(messages) != 2 {
t.Errorf("Expected 2 messages in window, got %d", len(messages))
}
// Should have message 2 and 3
if messages[0].Content != "Message 2" || messages[1].Content != "Message 3" {
t.Errorf("Window contains wrong messages")
}
}
func TestBufferMemory(t *testing.T) {
ctx := context.Background()
// Test with message limit
mem := NewBufferMemory(&BufferConfig{
MaxMessages: 2,
})
msg1 := NewMessage("user", "Message 1")
msg2 := NewMessage("user", "Message 2")
msg3 := NewMessage("user", "Message 3")
mem.AddMessage(ctx, msg1)
mem.AddMessage(ctx, msg2)
mem.AddMessage(ctx, msg3)
messages, _ := mem.GetContext(ctx, "")
if len(messages) != 2 {
t.Errorf("Expected 2 messages with limit, got %d", len(messages))
}
// Test GetMessages
msgs := mem.GetMessages()
if len(msgs) != 2 {
t.Errorf("Expected 2 messages from GetMessages, got %d", len(msgs))
}
// Test LoadMessages
newMessages := []*Message{
NewMessage("user", "Loaded 1"),
NewMessage("user", "Loaded 2"),
}
mem.LoadMessages(newMessages)
messages, _ = mem.GetContext(ctx, "")
if len(messages) != 2 {
t.Errorf("Expected 2 messages after load, got %d", len(messages))
}
if messages[0].Content != "Loaded 1" {
t.Errorf("Loaded messages incorrect")
}
}
func TestSummarizationMemory(t *testing.T) {
ctx := context.Background()
mem := NewSummarizationMemory(&SummarizationConfig{
RecentWindowSize: 2,
SummarizeAfter: 3,
})
// Add messages
for i := 1; i <= 4; i++ {
msg := NewMessage("user", "Message content")
if err := mem.AddMessage(ctx, msg); err != nil {
t.Fatalf("Failed to add message %d: %v", i, err)
}
}
messages, err := mem.GetContext(ctx, "")
if err != nil {
t.Fatalf("Failed to get context: %v", err)
}
// Should have summary + recent messages
if len(messages) < 2 {
t.Errorf("Expected at least 2 messages (summary + recent), got %d", len(messages))
}
// First message should be a summary
if messages[0].Role != "system" {
t.Errorf("Expected first message to be system (summary), got %s", messages[0].Role)
}
}
func TestRetrievalMemory(t *testing.T) {
ctx := context.Background()
mem := NewRetrievalMemory(&RetrievalConfig{
TopK: 2,
})
// Add messages
msg1 := NewMessage("user", "Hello world")
msg2 := NewMessage("user", "Goodbye world")
msg3 := NewMessage("user", "Python programming")
mem.AddMessage(ctx, msg1)
mem.AddMessage(ctx, msg2)
mem.AddMessage(ctx, msg3)
// Query similar to "Hello"
messages, err := mem.GetContext(ctx, "Hello")
if err != nil {
t.Fatalf("Failed to get context: %v", err)
}
if len(messages) != 2 {
t.Errorf("Expected top 2 messages, got %d", len(messages))
}
}
func TestHierarchicalMemory(t *testing.T) {
ctx := context.Background()
mem := NewHierarchicalMemory(&HierarchicalConfig{
RecentLimit: 2,
ImportantLimit: 2,
})
// Add messages with varying importance
msg1 := NewMessage("user", "Regular message")
msg2 := NewMessage("user", "Important message")
msg2.Metadata["importance"] = 0.9
msg3 := NewMessage("user", "Another regular")
mem.AddMessage(ctx, msg1)
mem.AddMessage(ctx, msg2)
mem.AddMessage(ctx, msg3)
messages, err := mem.GetContext(ctx, "")
if err != nil {
t.Fatalf("Failed to get context: %v", err)
}
// Should include important and recent messages
if len(messages) == 0 {
t.Error("Expected some messages from hierarchical memory")
}
stats, err := mem.GetStats(ctx)
if err != nil {
t.Fatalf("Failed to get stats: %v", err)
}
if stats.TotalMessages == 0 {
t.Error("Expected non-zero total messages")
}
}
func TestMessageCreation(t *testing.T) {
msg := NewMessage("user", "Test content")
if msg.Role != "user" {
t.Errorf("Expected role 'user', got %s", msg.Role)
}
if msg.Content != "Test content" {
t.Errorf("Expected content 'Test content', got %s", msg.Content)
}
if msg.ID == "" {
t.Error("Expected non-empty ID")
}
if msg.TokenCount == 0 {
t.Error("Expected non-zero token count")
}
if msg.Metadata == nil {
t.Error("Expected non-nil metadata")
}
}
func TestGraphBasedMemory(t *testing.T) {
ctx := context.Background()
mem := NewGraphBasedMemory(&GraphConfig{
TopK: 5,
})
// Add related messages
msg1 := NewMessage("user", "What's the price of the product?")
msg2 := NewMessage("assistant", "The price is $99")
msg3 := NewMessage("user", "Tell me about features")
msg4 := NewMessage("user", "What's the price again?")
mem.AddMessage(ctx, msg1)
mem.AddMessage(ctx, msg2)
mem.AddMessage(ctx, msg3)
mem.AddMessage(ctx, msg4)
// Query should retrieve related messages
messages, err := mem.GetContext(ctx, "price information")
if err != nil {
t.Fatalf("Failed to get context: %v", err)
}
if len(messages) == 0 {
t.Error("Expected some messages from graph memory")
}
// Check stats
stats, err := mem.GetStats(ctx)
if err != nil {
t.Fatalf("Failed to get stats: %v", err)
}
if stats.TotalMessages != 4 {
t.Errorf("Expected 4 total messages, got %d", stats.TotalMessages)
}
// Check relationships
relations := mem.GetRelationships()
if len(relations) == 0 {
t.Error("Expected some relationships in graph")
}
}
func TestCompressionMemory(t *testing.T) {
ctx := context.Background()
mem := NewCompressionMemory(&CompressionConfig{
CompressionTrigger: 3, // Compress after 3 messages
})
// Add messages to trigger compression
for range 5 {
msg := NewMessage("user", "Message content for compression")
if err := mem.AddMessage(ctx, msg); err != nil {
t.Fatalf("Failed to add message: %v", err)
}
}
messages, err := mem.GetContext(ctx, "")
if err != nil {
t.Fatalf("Failed to get context: %v", err)
}
// Should have compressed block(s) plus remaining messages
if len(messages) == 0 {
t.Error("Expected some messages from compression memory")
}
stats, err := mem.GetStats(ctx)
if err != nil {
t.Fatalf("Failed to get stats: %v", err)
}
// Should show compression
if stats.CompressionRate >= 1.0 {
t.Logf("Compression rate: %.2f (expected < 1.0 for compression)", stats.CompressionRate)
}
}
func TestOSLikeMemory(t *testing.T) {
ctx := context.Background()
mem := NewOSLikeMemory(&OSLikeConfig{
ActiveLimit: 2,
CacheLimit: 3,
})
// Add messages
for range 10 {
msg := NewMessage("user", "Message content")
if err := mem.AddMessage(ctx, msg); err != nil {
t.Fatalf("Failed to add message: %v", err)
}
}
messages, err := mem.GetContext(ctx, "")
if err != nil {
t.Fatalf("Failed to get context: %v", err)
}
if len(messages) == 0 {
t.Error("Expected some messages from OS-like memory")
}
stats, err := mem.GetStats(ctx)
if err != nil {
t.Fatalf("Failed to get stats: %v", err)
}
if stats.TotalMessages == 0 {
t.Error("Expected non-zero total messages")
}
// Check memory info
info := mem.GetMemoryInfo()
if info == nil {
t.Error("Expected memory info")
}
if activePages, ok := info["active_pages"].(int); ok {
t.Logf("Active pages: %d", activePages)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine.go | rag/engine.go | package rag
import (
"context"
"fmt"
"math"
"strings"
)
// BaseEngine provides common functionality for RAG engines
type BaseEngine struct {
retriever Retriever
embedder Embedder
config *Config
metrics *Metrics
}
// NewBaseEngine creates a new base RAG engine
func NewBaseEngine(retriever Retriever, embedder Embedder, config *Config) *BaseEngine {
if config == nil {
config = &Config{
VectorRAG: &VectorRAGConfig{
RetrieverConfig: RetrievalConfig{
K: 4,
ScoreThreshold: 0.5,
SearchType: "similarity",
},
},
}
}
return &BaseEngine{
retriever: retriever,
embedder: embedder,
config: config,
metrics: &Metrics{},
}
}
// Query performs a RAG query using the base engine
func (e *BaseEngine) Query(ctx context.Context, query string) (*QueryResult, error) {
config := &RetrievalConfig{
K: 4,
ScoreThreshold: 0.5,
SearchType: "similarity",
IncludeScores: true,
}
if e.config.VectorRAG != nil {
config.K = e.config.VectorRAG.RetrieverConfig.K
config.ScoreThreshold = e.config.VectorRAG.RetrieverConfig.ScoreThreshold
config.SearchType = e.config.VectorRAG.RetrieverConfig.SearchType
}
return e.QueryWithConfig(ctx, query, config)
}
// QueryWithConfig performs a RAG query with custom configuration
func (e *BaseEngine) QueryWithConfig(ctx context.Context, query string, config *RetrievalConfig) (*QueryResult, error) {
if config == nil {
config = &RetrievalConfig{
K: 4,
ScoreThreshold: 0.5,
SearchType: "similarity",
}
if e.config.VectorRAG != nil {
config.K = e.config.VectorRAG.RetrieverConfig.K
config.ScoreThreshold = e.config.VectorRAG.RetrieverConfig.ScoreThreshold
config.SearchType = e.config.VectorRAG.RetrieverConfig.SearchType
}
}
// Perform retrieval
searchResults, err := e.retriever.RetrieveWithConfig(ctx, query, config)
if err != nil {
return nil, fmt.Errorf("retrieval failed: %w", err)
}
if len(searchResults) == 0 {
return &QueryResult{
Query: query,
Answer: "No relevant information found.",
Sources: []Document{},
Context: "",
Confidence: 0.0,
}, nil
}
// Extract documents from search results
docs := make([]Document, len(searchResults))
for i, result := range searchResults {
docs[i] = result.Document
}
// Build context from retrieved documents
context := e.buildContext(searchResults, config.IncludeScores)
// Calculate confidence based on average score
confidence := e.calculateConfidence(searchResults)
return &QueryResult{
Query: query,
Sources: docs,
Context: context,
Metadata: map[string]any{
"retrieval_config": config,
"num_documents": len(docs),
"avg_score": confidence,
},
Confidence: confidence,
}, nil
}
// AddDocuments adds documents to the base engine
func (e *BaseEngine) AddDocuments(ctx context.Context, docs []Document) error {
// This base implementation doesn't store documents directly
// Subclasses should override this method to implement actual storage
return fmt.Errorf("AddDocuments not implemented for base engine")
}
// DeleteDocument removes a document from the base engine
func (e *BaseEngine) DeleteDocument(ctx context.Context, docID string) error {
return fmt.Errorf("DeleteDocument not implemented for base engine")
}
// UpdateDocument updates an existing document in the base engine
func (e *BaseEngine) UpdateDocument(ctx context.Context, doc Document) error {
return fmt.Errorf("UpdateDocument not implemented for base engine")
}
// SimilaritySearch performs similarity search without generation
func (e *BaseEngine) SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error) {
scoreThreshold := 0.5
if e.config.VectorRAG != nil {
scoreThreshold = e.config.VectorRAG.RetrieverConfig.ScoreThreshold
}
config := &RetrievalConfig{
K: k,
ScoreThreshold: scoreThreshold,
SearchType: "similarity",
IncludeScores: false,
}
searchResults, err := e.retriever.RetrieveWithConfig(ctx, query, config)
if err != nil {
return nil, err
}
docs := make([]Document, len(searchResults))
for i, result := range searchResults {
docs[i] = result.Document
}
return docs, nil
}
// SimilaritySearchWithScores performs similarity search with scores
func (e *BaseEngine) SimilaritySearchWithScores(ctx context.Context, query string, k int) ([]DocumentSearchResult, error) {
scoreThreshold := 0.5
if e.config.VectorRAG != nil {
scoreThreshold = e.config.VectorRAG.RetrieverConfig.ScoreThreshold
}
config := &RetrievalConfig{
K: k,
ScoreThreshold: scoreThreshold,
SearchType: "similarity",
IncludeScores: true,
}
return e.retriever.RetrieveWithConfig(ctx, query, config)
}
// buildContext builds context string from search results
func (e *BaseEngine) buildContext(results []DocumentSearchResult, includeScores bool) string {
if len(results) == 0 {
return ""
}
var context strings.Builder
for i, result := range results {
doc := result.Document
context.WriteString(fmt.Sprintf("Document %d:\n", i+1))
if includeScores {
context.WriteString(fmt.Sprintf("Score: %.4f\n", result.Score))
}
// Add key metadata if available
if doc.Metadata != nil {
if title, ok := doc.Metadata["title"]; ok {
context.WriteString(fmt.Sprintf("Title: %v\n", title))
}
if source, ok := doc.Metadata["source"]; ok {
context.WriteString(fmt.Sprintf("Source: %v\n", source))
}
}
context.WriteString(fmt.Sprintf("Content: %s\n\n", doc.Content))
}
return context.String()
}
// calculateConfidence calculates average confidence from search results
func (e *BaseEngine) calculateConfidence(results []DocumentSearchResult) float64 {
if len(results) == 0 {
return 0.0
}
totalScore := 0.0
for _, result := range results {
totalScore += math.Abs(result.Score)
}
return totalScore / float64(len(results))
}
// GetMetrics returns the current metrics
func (e *BaseEngine) GetMetrics() *Metrics {
return e.metrics
}
// ResetMetrics resets all metrics
func (e *BaseEngine) ResetMetrics() {
e.metrics = &Metrics{}
}
// CompositeEngine combines multiple RAG engines
type CompositeEngine struct {
engines []Engine
aggregator func(results []*QueryResult) *QueryResult
config *Config
}
// NewCompositeEngine creates a new composite RAG engine
func NewCompositeEngine(engines []Engine, aggregator func([]*QueryResult) *QueryResult) *CompositeEngine {
if aggregator == nil {
aggregator = DefaultAggregator
}
return &CompositeEngine{
engines: engines,
aggregator: aggregator,
config: &Config{},
}
}
// Query performs a query using all composite engines and aggregates results
func (c *CompositeEngine) Query(ctx context.Context, query string) (*QueryResult, error) {
results := make([]*QueryResult, len(c.engines))
// Execute queries in parallel
for i, engine := range c.engines {
result, err := engine.Query(ctx, query)
if err != nil {
result = &QueryResult{
Query: query,
Answer: fmt.Sprintf("Engine %d failed: %v", i, err),
Confidence: 0.0,
}
}
results[i] = result
}
return c.aggregator(results), nil
}
// QueryWithConfig performs a query with custom configuration
func (c *CompositeEngine) QueryWithConfig(ctx context.Context, query string, config *RetrievalConfig) (*QueryResult, error) {
results := make([]*QueryResult, len(c.engines))
// Execute queries in parallel
for i, engine := range c.engines {
result, err := engine.QueryWithConfig(ctx, query, config)
if err != nil {
result = &QueryResult{
Query: query,
Answer: fmt.Sprintf("Engine %d failed: %v", i, err),
Confidence: 0.0,
}
}
results[i] = result
}
return c.aggregator(results), nil
}
// AddDocuments adds documents to all composite engines
func (c *CompositeEngine) AddDocuments(ctx context.Context, docs []Document) error {
var errors []error
for _, engine := range c.engines {
if err := engine.AddDocuments(ctx, docs); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return fmt.Errorf("multiple engines failed: %v", errors)
}
return nil
}
// DeleteDocument removes a document from all composite engines
func (c *CompositeEngine) DeleteDocument(ctx context.Context, docID string) error {
var errors []error
for _, engine := range c.engines {
if err := engine.DeleteDocument(ctx, docID); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return fmt.Errorf("multiple engines failed: %v", errors)
}
return nil
}
// UpdateDocument updates a document in all composite engines
func (c *CompositeEngine) UpdateDocument(ctx context.Context, doc Document) error {
var errors []error
for _, engine := range c.engines {
if err := engine.UpdateDocument(ctx, doc); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return fmt.Errorf("multiple engines failed: %v", errors)
}
return nil
}
// SimilaritySearch performs similarity search using the first available engine
func (c *CompositeEngine) SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error) {
// Try engines in order until one succeeds
for _, engine := range c.engines {
docs, err := engine.SimilaritySearch(ctx, query, k)
if err == nil {
return docs, nil
}
}
return nil, fmt.Errorf("all engines failed similarity search")
}
// SimilaritySearchWithScores performs similarity search with scores using the first available engine
func (c *CompositeEngine) SimilaritySearchWithScores(ctx context.Context, query string, k int) ([]DocumentSearchResult, error) {
// Try engines in order until one succeeds
for _, engine := range c.engines {
results, err := engine.SimilaritySearchWithScores(ctx, query, k)
if err == nil {
return results, nil
}
}
return nil, fmt.Errorf("all engines failed similarity search")
}
// DefaultAggregator provides default result aggregation logic
func DefaultAggregator(results []*QueryResult) *QueryResult {
if len(results) == 0 {
return nil
}
if len(results) == 1 {
return results[0]
}
// Find the result with highest confidence
bestResult := results[0]
for _, result := range results[1:] {
if result.Confidence > bestResult.Confidence {
bestResult = result
}
}
// Combine sources from all results
allSources := make([]Document, 0)
seenIDs := make(map[string]bool)
for _, result := range results {
for _, doc := range result.Sources {
if !seenIDs[doc.ID] {
allSources = append(allSources, doc)
seenIDs[doc.ID] = true
}
}
}
// Update the best result with combined sources
bestResult.Sources = allSources
bestResult.Metadata["engines_used"] = len(results)
bestResult.Metadata["total_sources"] = len(allSources)
return bestResult
}
// WeightedAggregator provides weighted result aggregation logic
func WeightedAggregator(weights []float64) func([]*QueryResult) *QueryResult {
return func(results []*QueryResult) *QueryResult {
if len(results) == 0 {
return nil
}
if len(weights) != len(results) {
// Use equal weights if length mismatch
weights = make([]float64, len(results))
for i := range weights {
weights[i] = 1.0
}
}
// Calculate weighted score for each result
weightedScores := make([]float64, len(results))
for i, result := range results {
weightedScores[i] = result.Confidence * weights[i]
}
// Find result with highest weighted score
bestIndex := 0
for i := 1; i < len(weightedScores); i++ {
if weightedScores[i] > weightedScores[bestIndex] {
bestIndex = i
}
}
result := results[bestIndex]
result.Metadata["weighted_score"] = weightedScores[bestIndex]
result.Metadata["engines_used"] = len(results)
return result
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/langgraph_adapter_test.go | rag/langgraph_adapter_test.go | package rag
import (
"context"
"errors"
"testing"
"time"
)
// mockEngine is a mock implementation of Engine for testing
type mockEngine struct {
queryResult *QueryResult
queryError error
queryCalled bool
queryInput string
}
func (m *mockEngine) Query(ctx context.Context, query string) (*QueryResult, error) {
// Check for context cancellation
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
m.queryCalled = true
m.queryInput = query
if m.queryError != nil {
return nil, m.queryError
}
if m.queryResult != nil {
return m.queryResult, nil
}
// Default result
return &QueryResult{
Query: query,
Context: "Default context for: " + query,
Answer: "Default answer",
Confidence: 0.9,
}, nil
}
func (m *mockEngine) QueryWithConfig(ctx context.Context, query string, config *RetrievalConfig) (*QueryResult, error) {
return m.Query(ctx, query)
}
func (m *mockEngine) AddDocuments(ctx context.Context, docs []Document) error {
return nil
}
func (m *mockEngine) DeleteDocument(ctx context.Context, docID string) error {
return nil
}
func (m *mockEngine) UpdateDocument(ctx context.Context, doc Document) error {
return nil
}
func (m *mockEngine) SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error) {
return []Document{}, nil
}
func (m *mockEngine) SimilaritySearchWithScores(ctx context.Context, query string, k int) ([]DocumentSearchResult, error) {
return []DocumentSearchResult{}, nil
}
// ========================================
// NewRetrievalNode Tests
// ========================================
func TestNewRetrievalNode(t *testing.T) {
engine := &mockEngine{
queryResult: &QueryResult{
Query: "test query",
Context: "retrieved context",
},
}
node := NewRetrievalNode(engine, "question", "context")
if node == nil {
t.Fatal("NewRetrievalNode returned nil")
}
}
func TestNewRetrievalNode_Success(t *testing.T) {
tests := []struct {
name string
inputState map[string]any
inputKey string
outputKey string
queryResult *QueryResult
expectedOutput map[string]any
}{
{
name: "successful retrieval",
inputState: map[string]any{
"question": "What is the capital of France?",
},
inputKey: "question",
outputKey: "context",
queryResult: &QueryResult{
Query: "What is the capital of France?",
Context: "Paris is the capital of France.",
Answer: "Paris",
},
expectedOutput: map[string]any{
"context": "Paris is the capital of France.",
},
},
{
name: "custom input and output keys",
inputState: map[string]any{
"user_query": "How does RAG work?",
},
inputKey: "user_query",
outputKey: "retrieved_docs",
queryResult: &QueryResult{
Query: "How does RAG work?",
Context: "RAG combines retrieval and generation.",
},
expectedOutput: map[string]any{
"retrieved_docs": "RAG combines retrieval and generation.",
},
},
{
name: "empty query",
inputState: map[string]any{
"question": "",
},
inputKey: "question",
outputKey: "context",
queryResult: &QueryResult{
Query: "",
Context: "empty result",
},
expectedOutput: map[string]any{
"context": "empty result",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngine{queryResult: tt.queryResult}
node := NewRetrievalNode(engine, tt.inputKey, tt.outputKey)
ctx := context.Background()
result, err := node(ctx, tt.inputState)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
resultMap, ok := result.(map[string]any)
if !ok {
t.Fatalf("expected map[string]any, got %T", result)
}
for key, expectedValue := range tt.expectedOutput {
actualValue, ok := resultMap[key]
if !ok {
t.Errorf("missing key %q in result", key)
continue
}
if actualValue != expectedValue {
t.Errorf("for key %q: expected %v, got %v", key, expectedValue, actualValue)
}
}
})
}
}
func TestNewRetrievalNode_Errors(t *testing.T) {
tests := []struct {
name string
inputState any
inputKey string
outputKey string
queryError error
expectedErr string
}{
{
name: "state is not a map",
inputState: "not a map",
inputKey: "question",
outputKey: "context",
expectedErr: "state is not a map[string]any, got string",
},
{
name: "input key not found",
inputState: map[string]any{"other_key": "value"},
inputKey: "question",
outputKey: "context",
expectedErr: "input key 'question' not found or not a string",
},
{
name: "input key is not a string",
inputState: map[string]any{"question": 123},
inputKey: "question",
outputKey: "context",
expectedErr: "input key 'question' not found or not a string",
},
{
name: "engine query error",
inputState: map[string]any{"question": "test"},
inputKey: "question",
outputKey: "context",
queryError: errors.New("query failed"),
expectedErr: "query failed",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngine{queryError: tt.queryError}
node := NewRetrievalNode(engine, tt.inputKey, tt.outputKey)
ctx := context.Background()
_, err := node(ctx, tt.inputState)
if err == nil {
t.Error("expected error but got nil")
}
if err != nil && tt.expectedErr != "" && err.Error() != tt.expectedErr {
t.Errorf("expected error %q, got %q", tt.expectedErr, err.Error())
}
})
}
}
func TestNewRetrievalNode_WithContext(t *testing.T) {
engine := &mockEngine{
queryResult: &QueryResult{
Query: "test query",
Context: "test context",
},
}
node := NewRetrievalNode(engine, "question", "context")
// Test with context cancellation
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := node(ctx, map[string]any{"question": "test"})
if err == nil {
t.Error("expected error due to context cancellation")
}
}
// ========================================
// RetrieverTool Tests
// ========================================
func TestNewRetrieverTool(t *testing.T) {
engine := &mockEngine{}
t.Run("with all parameters", func(t *testing.T) {
tool := NewRetrieverTool(engine, "my_tool", "My custom tool description")
if tool == nil {
t.Fatal("NewRetrieverTool returned nil")
}
if tool.NameVal != "my_tool" {
t.Errorf("expected name 'my_tool', got %q", tool.NameVal)
}
if tool.DescVal != "My custom tool description" {
t.Errorf("expected description 'My custom tool description', got %q", tool.DescVal)
}
})
t.Run("with empty name", func(t *testing.T) {
tool := NewRetrieverTool(engine, "", "")
if tool.NameVal != "knowledge_base" {
t.Errorf("expected default name 'knowledge_base', got %q", tool.NameVal)
}
if tool.DescVal != "A knowledge base tool. Use this to search for information to answer questions." {
t.Errorf("expected default description, got %q", tool.DescVal)
}
})
t.Run("with empty description", func(t *testing.T) {
tool := NewRetrieverTool(engine, "custom_name", "")
if tool.NameVal != "custom_name" {
t.Errorf("expected name 'custom_name', got %q", tool.NameVal)
}
if tool.DescVal != "A knowledge base tool. Use this to search for information to answer questions." {
t.Errorf("expected default description, got %q", tool.DescVal)
}
})
}
func TestRetrieverTool_Name(t *testing.T) {
engine := &mockEngine{}
tool := NewRetrieverTool(engine, "test_tool", "test description")
if tool.Name() != "test_tool" {
t.Errorf("expected 'test_tool', got %q", tool.Name())
}
}
func TestRetrieverTool_Description(t *testing.T) {
engine := &mockEngine{}
tool := NewRetrieverTool(engine, "test_tool", "test description")
if tool.Description() != "test description" {
t.Errorf("expected 'test description', got %q", tool.Description())
}
}
func TestRetrieverTool_Call(t *testing.T) {
tests := []struct {
name string
input string
queryResult *QueryResult
queryError error
expectedResult string
expectedErr bool
}{
{
name: "successful call",
input: "What is RAG?",
queryResult: &QueryResult{
Query: "What is RAG?",
Context: "RAG stands for Retrieval Augmented Generation.",
Answer: "It's a technique...",
},
expectedResult: "RAG stands for Retrieval Augmented Generation.",
expectedErr: false,
},
{
name: "empty input",
input: "",
queryResult: &QueryResult{Context: "empty result"},
expectedResult: "empty result",
expectedErr: false,
},
{
name: "engine error",
input: "test",
queryError: errors.New("engine failed"),
expectedErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngine{queryResult: tt.queryResult, queryError: tt.queryError}
tool := NewRetrieverTool(engine, "", "")
ctx := context.Background()
result, err := tool.Call(ctx, tt.input)
if tt.expectedErr && err == nil {
t.Error("expected error but got nil")
}
if !tt.expectedErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
if result != tt.expectedResult {
t.Errorf("expected %q, got %q", tt.expectedResult, result)
}
})
}
}
func TestRetrieverTool_WithEngine(t *testing.T) {
// Test that the tool properly uses the engine
engine := &mockEngine{
queryResult: &QueryResult{
Query: "test query",
Context: "test context",
},
}
tool := NewRetrieverTool(engine, "test", "test tool")
ctx := context.Background()
result, err := tool.Call(ctx, "test query")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !engine.queryCalled {
t.Error("engine.Query was not called")
}
if engine.queryInput != "test query" {
t.Errorf("expected query input 'test query', got %q", engine.queryInput)
}
if result != "test context" {
t.Errorf("expected 'test context', got %q", result)
}
}
func TestRetrieverTool_CallWithContextCancellation(t *testing.T) {
engine := &mockEngine{
queryResult: &QueryResult{Context: "result"},
}
tool := NewRetrieverTool(engine, "", "")
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := tool.Call(ctx, "test")
if err == nil {
t.Error("expected error due to context cancellation")
}
}
// ========================================
// Interface Compliance Tests
// ========================================
func TestRetrieverTool_InterfaceCompliance(t *testing.T) {
// Ensure RetrieverTool implements the expected interface
engine := &mockEngine{}
tool := NewRetrieverTool(engine, "", "")
// Check that Name and Description methods exist
if tool.Name() == "" {
t.Error("Name() should not return empty string")
}
if tool.Description() == "" {
t.Error("Description() should not return empty string")
}
}
// ========================================
// Edge Cases and Integration Tests
// ========================================
func TestNewRetrievalNode_PreservesOtherState(t *testing.T) {
// Test that the node only updates the output key and doesn't affect other state
engine := &mockEngine{
queryResult: &QueryResult{
Context: "retrieved context",
},
}
node := NewRetrievalNode(engine, "question", "context")
inputState := map[string]any{
"question": "test query",
"other_key": "other_value",
"number": 42,
}
ctx := context.Background()
result, err := node(ctx, inputState)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
resultMap := result.(map[string]any)
// Should only contain the output key
if len(resultMap) != 1 {
t.Errorf("expected result map with 1 key, got %d keys", len(resultMap))
}
if resultMap["context"] != "retrieved context" {
t.Errorf("expected context 'retrieved context', got %v", resultMap["context"])
}
}
func TestRetrieverTool_WithComplexQueryResult(t *testing.T) {
engine := &mockEngine{
queryResult: &QueryResult{
Query: "complex query",
Context: "complex context with newlines\nand multiple\nlines",
Answer: "complex answer",
Confidence: 0.95,
ResponseTime: 100 * time.Millisecond,
Metadata: map[string]any{
"source": "test",
"count": 5,
},
},
}
tool := NewRetrieverTool(engine, "", "")
ctx := context.Background()
result, err := tool.Call(ctx, "complex query")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
expectedResult := "complex context with newlines\nand multiple\nlines"
if result != expectedResult {
t.Errorf("expected %q, got %q", expectedResult, result)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/adapters_test.go | rag/adapters_test.go | package rag
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tmc/langchaingo/schema"
"github.com/tmc/langchaingo/textsplitter"
)
type mockLCEmbedder struct{}
func (m *mockLCEmbedder) EmbedDocuments(ctx context.Context, texts []string) ([][]float32, error) {
res := make([][]float32, len(texts))
for i := range texts {
res[i] = []float32{0.1, 0.2}
}
return res, nil
}
func (m *mockLCEmbedder) EmbedQuery(ctx context.Context, text string) ([]float32, error) {
return []float32{0.1, 0.2}, nil
}
type mockLCLoader struct{}
func (m *mockLCLoader) Load(ctx context.Context) ([]schema.Document, error) {
return []schema.Document{{PageContent: "lc content", Metadata: map[string]any{"source": "lc"}}}, nil
}
func (m *mockLCLoader) LoadAndSplit(ctx context.Context, s textsplitter.TextSplitter) ([]schema.Document, error) {
return m.Load(ctx)
}
func TestLangChainAdapters(t *testing.T) {
ctx := context.Background()
t.Run("LangChainDocumentLoader", func(t *testing.T) {
lcLoader := &mockLCLoader{}
adapter := NewLangChainDocumentLoader(lcLoader)
docs, err := adapter.Load(ctx)
assert.NoError(t, err)
assert.Len(t, docs, 1)
assert.Equal(t, "lc content", docs[0].Content)
docs2, _ := adapter.LoadWithMetadata(ctx, map[string]any{"a": "b"})
assert.Equal(t, "b", docs2[0].Metadata["a"])
docs3, _ := adapter.LoadAndSplit(ctx, nil)
assert.NotEmpty(t, docs3)
})
t.Run("LangChainEmbedder", func(t *testing.T) {
lcEmb := &mockLCEmbedder{}
adapter := NewLangChainEmbedder(lcEmb)
emb, err := adapter.EmbedDocument(ctx, "test")
assert.NoError(t, err)
assert.Equal(t, []float32{0.1, 0.2}, emb)
embs, err := adapter.EmbedDocuments(ctx, []string{"test"})
assert.NoError(t, err)
assert.Equal(t, [][]float32{{0.1, 0.2}}, embs)
assert.Equal(t, 2, adapter.GetDimension())
})
t.Run("Conversion functions", func(t *testing.T) {
schemaDocs := []schema.Document{
{PageContent: "content", Metadata: map[string]any{"source": "src1"}},
}
docs := convertSchemaDocuments(schemaDocs)
assert.Len(t, docs, 1)
assert.Equal(t, "content", docs[0].Content)
assert.Equal(t, "src1", docs[0].ID)
})
t.Run("LangChainTextSplitter", func(t *testing.T) {
lcSplitter := NewLangChainTextSplitter(nil)
text := "para1\n\npara2"
splits := lcSplitter.SplitText(text)
assert.Len(t, splits, 2)
docs := lcSplitter.SplitDocuments([]Document{{Content: text}})
assert.Len(t, docs, 2)
joined := lcSplitter.JoinText([]string{"a", "b"})
assert.Equal(t, "a b", joined)
})
t.Run("LangChainEmbedder Dimension", func(t *testing.T) {
lcEmb := &mockLCEmbedder{}
adapter := NewLangChainEmbedder(lcEmb)
assert.Equal(t, 2, adapter.GetDimension())
})
t.Run("LangChainRetriever", func(t *testing.T) {
// We can't easily mock vectorstores.VectorStore due to its complexity and
// generic return types, but we can test the adapter logic where possible.
adapter := NewLangChainRetriever(nil, 3)
assert.NotNil(t, adapter)
assert.Equal(t, 3, adapter.topK)
})
t.Run("LangChainVectorStore", func(t *testing.T) {
adapter := NewLangChainVectorStore(nil)
assert.NotNil(t, adapter)
stats, _ := adapter.GetStats(ctx)
assert.NotNil(t, stats)
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/adapters.go | rag/adapters.go | package rag
import (
"context"
"fmt"
"maps"
"strings"
"time"
"github.com/tmc/langchaingo/documentloaders"
"github.com/tmc/langchaingo/embeddings"
"github.com/tmc/langchaingo/schema"
"github.com/tmc/langchaingo/textsplitter"
"github.com/tmc/langchaingo/vectorstores"
)
// LangChainDocumentLoader adapts langchaingo's documentloaders.Loader to our DocumentLoader interface
type LangChainDocumentLoader struct {
loader documentloaders.Loader
}
// NewLangChainDocumentLoader creates a new adapter for langchaingo document loaders
func NewLangChainDocumentLoader(loader documentloaders.Loader) *LangChainDocumentLoader {
return &LangChainDocumentLoader{
loader: loader,
}
}
// Load loads documents using the underlying langchaingo loader
func (l *LangChainDocumentLoader) Load(ctx context.Context) ([]Document, error) {
schemaDocs, err := l.loader.Load(ctx)
if err != nil {
return nil, err
}
return convertSchemaDocuments(schemaDocs), nil
}
// LoadWithMetadata loads documents with additional metadata using the underlying langchaingo loader
func (l *LangChainDocumentLoader) LoadWithMetadata(ctx context.Context, metadata map[string]any) ([]Document, error) {
docs, err := l.Load(ctx)
if err != nil {
return nil, err
}
// Add additional metadata to all documents
if metadata != nil {
for i := range docs {
if docs[i].Metadata == nil {
docs[i].Metadata = make(map[string]any)
}
maps.Copy(docs[i].Metadata, metadata)
}
}
return docs, nil
}
// LoadAndSplit loads and splits documents using langchaingo's text splitter
func (l *LangChainDocumentLoader) LoadAndSplit(ctx context.Context, splitter textsplitter.TextSplitter) ([]Document, error) {
// Note: langchaingo's LoadAndSplit method signature might be different
// For now, load first and then split
schemaDocs, err := l.loader.Load(ctx)
if err != nil {
return nil, err
}
// Use the splitter to split documents
var splitDocs []schema.Document
for _, doc := range schemaDocs {
// Simple split by paragraphs for now
paragraphs := strings.SplitSeq(doc.PageContent, "\n\n")
for para := range paragraphs {
if strings.TrimSpace(para) != "" {
splitDocs = append(splitDocs, schema.Document{
PageContent: strings.TrimSpace(para),
Metadata: doc.Metadata,
})
}
}
}
return convertSchemaDocuments(splitDocs), nil
}
// convertSchemaDocuments converts langchaingo schema.Document to our Document type
func convertSchemaDocuments(schemaDocs []schema.Document) []Document {
docs := make([]Document, len(schemaDocs))
for i, schemaDoc := range schemaDocs {
docs[i] = Document{
Content: schemaDoc.PageContent,
Metadata: convertSchemaMetadata(schemaDoc.Metadata),
}
// Set ID if available in metadata
if source, ok := schemaDoc.Metadata["source"]; ok {
docs[i].ID = fmt.Sprintf("%v", source)
} else {
docs[i].ID = fmt.Sprintf("doc_%d", i)
}
}
return docs
}
// convertSchemaMetadata converts langchaingo metadata to our format
func convertSchemaMetadata(metadata map[string]any) map[string]any {
result := make(map[string]any)
maps.Copy(result, metadata)
return result
}
// LangChainTextSplitter adapts langchaingo's textsplitter.TextSplitter to our TextSplitter interface
type LangChainTextSplitter struct {
splitter textsplitter.TextSplitter
}
// NewLangChainTextSplitter creates a new adapter for langchaingo text splitters
func NewLangChainTextSplitter(splitter textsplitter.TextSplitter) *LangChainTextSplitter {
return &LangChainTextSplitter{
splitter: splitter,
}
}
// SplitText splits text using simple paragraph splitting
func (l *LangChainTextSplitter) SplitText(text string) []string {
// Simple split by paragraphs
paragraphs := strings.Split(text, "\n\n")
result := make([]string, 0, len(paragraphs))
for _, para := range paragraphs {
if strings.TrimSpace(para) != "" {
result = append(result, strings.TrimSpace(para))
}
}
return result
}
// SplitDocuments splits documents using simple paragraph splitting
func (l *LangChainTextSplitter) SplitDocuments(docs []Document) []Document {
var result []Document
for _, doc := range docs {
// Simple split by paragraphs
paragraphs := strings.SplitSeq(doc.Content, "\n\n")
for para := range paragraphs {
if strings.TrimSpace(para) != "" {
newDoc := Document{
Content: strings.TrimSpace(para),
Metadata: doc.Metadata,
}
result = append(result, newDoc)
}
}
}
return result
}
// JoinText joins text chunks back together
func (l *LangChainTextSplitter) JoinText(chunks []string) string {
// Simple implementation
var result strings.Builder
for i, chunk := range chunks {
if i > 0 {
result.WriteString(" ")
}
result.WriteString(chunk)
}
return result.String()
}
// LangChainEmbedder adapts langchaingo's embeddings.Embedder to our Embedder interface
type LangChainEmbedder struct {
embedder embeddings.Embedder
}
// NewLangChainEmbedder creates a new adapter for langchaingo embedders
func NewLangChainEmbedder(embedder embeddings.Embedder) *LangChainEmbedder {
return &LangChainEmbedder{
embedder: embedder,
}
}
// EmbedDocument embeds a single document using the underlying langchaingo embedder
func (l *LangChainEmbedder) EmbedDocument(ctx context.Context, text string) ([]float32, error) {
embedding, err := l.embedder.EmbedQuery(ctx, text)
if err != nil {
return nil, err
}
// Convert float64 to float32
result := make([]float32, len(embedding))
for i, val := range embedding {
result[i] = float32(val)
}
return result, nil
}
// EmbedDocuments embeds multiple documents using the underlying langchaingo embedder
func (l *LangChainEmbedder) EmbedDocuments(ctx context.Context, texts []string) ([][]float32, error) {
embeddings, err := l.embedder.EmbedDocuments(ctx, texts)
if err != nil {
return nil, err
}
// Convert float64 to float32
result := make([][]float32, len(embeddings))
for i, embedding := range embeddings {
result[i] = make([]float32, len(embedding))
for j, val := range embedding {
result[i][j] = float32(val)
}
}
return result, nil
}
// GetDimension returns the embedding dimension
func (l *LangChainEmbedder) GetDimension() int {
// LangChain embedders don't typically expose dimension directly
// We could try to embed a test document to determine it
testEmbedding, err := l.embedder.EmbedQuery(context.Background(), "test")
if err != nil {
return 0
}
return len(testEmbedding)
}
// LangChainVectorStore adapts langchaingo's vectorstores.VectorStore to our VectorStore interface
type LangChainVectorStore struct {
store vectorstores.VectorStore
}
// NewLangChainVectorStore creates a new adapter for langchaingo vector stores
func NewLangChainVectorStore(store vectorstores.VectorStore) *LangChainVectorStore {
return &LangChainVectorStore{
store: store,
}
}
// Add adds documents to the vector store
func (l *LangChainVectorStore) Add(ctx context.Context, docs []Document) error {
schemaDocs := make([]schema.Document, len(docs))
for i, doc := range docs {
schemaDocs[i] = schema.Document{
PageContent: doc.Content,
Metadata: doc.Metadata,
}
}
ids, err := l.store.AddDocuments(ctx, schemaDocs)
if err != nil {
return err
}
// Update document IDs if they were empty
for i, id := range ids {
if docs[i].ID == "" {
docs[i].ID = id
}
}
return nil
}
// Search performs similarity search
func (l *LangChainVectorStore) Search(ctx context.Context, query []float32, k int) ([]DocumentSearchResult, error) {
// Vector search not supported by generic LangChain adapter as the interface differs
return []DocumentSearchResult{}, nil
}
// LangChainRetriever adapts langchaingo's vectorstores.VectorStore to our Retriever interface
type LangChainRetriever struct {
store vectorstores.VectorStore
topK int
}
// NewLangChainRetriever creates a new adapter for langchaingo vector stores as a retriever
func NewLangChainRetriever(store vectorstores.VectorStore, topK int) *LangChainRetriever {
if topK <= 0 {
topK = 4
}
return &LangChainRetriever{
store: store,
topK: topK,
}
}
// Retrieve retrieves documents based on a query
func (r *LangChainRetriever) Retrieve(ctx context.Context, query string) ([]Document, error) {
return r.RetrieveWithK(ctx, query, r.topK)
}
// RetrieveWithK retrieves exactly k documents
func (r *LangChainRetriever) RetrieveWithK(ctx context.Context, query string, k int) ([]Document, error) {
docs, err := r.store.SimilaritySearch(ctx, query, k)
if err != nil {
return nil, err
}
return convertSchemaDocuments(docs), nil
}
// RetrieveWithConfig retrieves documents with custom configuration
func (r *LangChainRetriever) RetrieveWithConfig(ctx context.Context, query string, config *RetrievalConfig) ([]DocumentSearchResult, error) {
k := r.topK
if config != nil && config.K > 0 {
k = config.K
}
// Use SimilaritySearch
// Note: Generic SimilaritySearch doesn't return scores.
// If the underlying store supports SimilaritySearchWithScore, we can't access it via the generic interface easily here.
docs, err := r.store.SimilaritySearch(ctx, query, k)
if err != nil {
return nil, err
}
results := make([]DocumentSearchResult, len(docs))
for i, doc := range docs {
// Try to extract score from metadata if present (some stores put it there)
score := 0.0
if s, ok := doc.Metadata["_score"]; ok {
if f, ok := s.(float64); ok {
score = f
}
} else if s, ok := doc.Metadata["score"]; ok {
if f, ok := s.(float64); ok {
score = f
}
}
results[i] = DocumentSearchResult{
Document: Document{
Content: doc.PageContent,
Metadata: convertSchemaMetadata(doc.Metadata),
},
Score: score,
}
}
// Apply threshold if possible (post-filtering)
if config != nil && config.ScoreThreshold > 0 {
var filtered []DocumentSearchResult
for _, res := range results {
if res.Score >= config.ScoreThreshold {
filtered = append(filtered, res)
}
}
results = filtered
}
return results, nil
}
// SearchWithFilter performs similarity search with filters
func (l *LangChainVectorStore) SearchWithFilter(ctx context.Context, query []float32, k int, filter map[string]any) ([]DocumentSearchResult, error) {
// Simple implementation that returns empty results
// In a real implementation, you'd need to use the specific vector store's methods
return []DocumentSearchResult{}, nil
}
// Delete removes documents by IDs
func (l *LangChainVectorStore) Delete(ctx context.Context, ids []string) error {
// Simple implementation - LangChain vector stores may not have a standard Delete method
// In a real implementation, you'd need to use the specific vector store's methods
return nil
}
// Update updates existing documents
func (l *LangChainVectorStore) Update(ctx context.Context, docs []Document) error {
// LangChain vector stores typically don't have a direct update method
// We implement this as delete + add
ids := make([]string, len(docs))
for i, doc := range docs {
ids[i] = doc.ID
}
if err := l.Delete(ctx, ids); err != nil {
return err
}
return l.Add(ctx, docs)
}
// GetStats returns vector store statistics
func (l *LangChainVectorStore) GetStats(ctx context.Context) (*VectorStoreStats, error) {
// LangChain vector stores don't typically provide statistics
// Return basic information
return &VectorStoreStats{
TotalDocuments: 0,
TotalVectors: 0,
Dimension: 0,
LastUpdated: time.Now(),
}, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/pipeline.go | rag/pipeline.go | package rag
import (
"context"
"fmt"
"maps"
"strings"
"time"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms"
)
// RAGState represents the state flowing through a RAG pipeline
type RAGState struct {
Query string
Documents []RAGDocument
RetrievedDocuments []RAGDocument
RankedDocuments []DocumentSearchResult
Context string
Answer string
Citations []string
Metadata map[string]any
}
// PipelineConfig configures a RAG pipeline
type PipelineConfig struct {
// Retrieval configuration
TopK int // Number of documents to retrieve
ScoreThreshold float64 // Minimum relevance score
UseReranking bool // Whether to use reranking
UseFallback bool // Whether to use fallback search
// Generation configuration
SystemPrompt string
IncludeCitations bool
MaxTokens int
Temperature float64
// Components
Loader RAGDocumentLoader
Splitter RAGTextSplitter
Embedder Embedder
VectorStore VectorStore
Retriever Retriever
Reranker Reranker
LLM llms.Model
}
// DefaultPipelineConfig returns a default RAG configuration
func DefaultPipelineConfig() *PipelineConfig {
return &PipelineConfig{
TopK: 4,
ScoreThreshold: 0.7,
UseReranking: false,
UseFallback: false,
SystemPrompt: "You are a helpful assistant. Answer the question based on the provided context. If you cannot answer based on the context, say so.",
IncludeCitations: true,
MaxTokens: 1000,
Temperature: 0.0,
}
}
// RAGPipeline represents a complete RAG pipeline
type RAGPipeline struct {
config *PipelineConfig
graph *graph.StateGraph[map[string]any]
}
// NewRAGPipeline creates a new RAG pipeline with the given configuration
func NewRAGPipeline(config *PipelineConfig) *RAGPipeline {
if config == nil {
config = DefaultPipelineConfig()
}
g := graph.NewStateGraph[map[string]any]()
g.SetSchema(&ragStateSchema{})
return &RAGPipeline{
config: config,
graph: g,
}
}
type ragStateSchema struct{}
func (s *ragStateSchema) Init() map[string]any {
return map[string]any{
"query": "",
"context": "",
"answer": "",
"documents": []Document{},
"retrieved_documents": []Document{},
"ranked_documents": []Document{},
"citations": []string{},
"metadata": make(map[string]any),
}
}
func (s *ragStateSchema) Update(current, new map[string]any) (map[string]any, error) {
result := make(map[string]any)
maps.Copy(result, current)
maps.Copy(result, new)
return result, nil
}
// BuildBasicRAG builds a basic RAG pipeline: Retrieve -> Generate
func (p *RAGPipeline) BuildBasicRAG() error {
if p.config.Retriever == nil {
return fmt.Errorf("retriever is required for basic RAG")
}
if p.config.LLM == nil {
return fmt.Errorf("LLM is required for basic RAG")
}
// Add retrieval node
p.graph.AddNode("retrieve", "Document retrieval node", p.retrieveNode)
// Add generation node
p.graph.AddNode("generate", "Answer generation node", p.generateNode)
// Build pipeline
p.graph.SetEntryPoint("retrieve")
p.graph.AddEdge("retrieve", "generate")
p.graph.AddEdge("generate", graph.END)
return nil
}
// BuildAdvancedRAG builds an advanced RAG pipeline: Retrieve -> Rerank -> Generate
func (p *RAGPipeline) BuildAdvancedRAG() error {
if p.config.Retriever == nil {
return fmt.Errorf("retriever is required for advanced RAG")
}
if p.config.LLM == nil {
return fmt.Errorf("LLM is required for advanced RAG")
}
// Add retrieval node
p.graph.AddNode("retrieve", "Document retrieval node", p.retrieveNode)
// Add reranking node if enabled
if p.config.UseReranking && p.config.Reranker != nil {
p.graph.AddNode("rerank", "Document reranking node", p.rerankNode)
}
// Add generation node
p.graph.AddNode("generate", "Answer generation node", p.generateNode)
// Add citation formatting node if enabled
if p.config.IncludeCitations {
p.graph.AddNode("format_citations", "Citation formatting node", p.formatCitationsNode)
}
// Build pipeline
p.graph.SetEntryPoint("retrieve")
if p.config.UseReranking && p.config.Reranker != nil {
p.graph.AddEdge("retrieve", "rerank")
p.graph.AddEdge("rerank", "generate")
} else {
p.graph.AddEdge("retrieve", "generate")
}
if p.config.IncludeCitations {
p.graph.AddEdge("generate", "format_citations")
p.graph.AddEdge("format_citations", graph.END)
} else {
p.graph.AddEdge("generate", graph.END)
}
return nil
}
// BuildConditionalRAG builds a RAG pipeline with conditional routing based on relevance
func (p *RAGPipeline) BuildConditionalRAG() error {
if p.config.Retriever == nil {
return fmt.Errorf("retriever is required for conditional RAG")
}
if p.config.LLM == nil {
return fmt.Errorf("LLM is required for conditional RAG")
}
// Add retrieval node
p.graph.AddNode("retrieve", "Document retrieval node", p.retrieveNode)
// Add reranking node
p.graph.AddNode("rerank", "Document reranking node", p.rerankNode)
// Add fallback search node if enabled
if p.config.UseFallback {
p.graph.AddNode("fallback_search", "Fallback search node", p.fallbackSearchNode)
}
// Add generation node
p.graph.AddNode("generate", "Answer generation node", p.generateNode)
// Add citation formatting node
if p.config.IncludeCitations {
p.graph.AddNode("format_citations", "Citation formatting node", p.formatCitationsNode)
}
// Build pipeline with conditional routing
p.graph.SetEntryPoint("retrieve")
p.graph.AddEdge("retrieve", "rerank")
// Conditional edge based on relevance score
p.graph.AddConditionalEdge("rerank", func(ctx context.Context, state map[string]any) string {
rankedDocs, _ := state["ranked_documents"].([]DocumentSearchResult)
if len(rankedDocs) > 0 && rankedDocs[0].Score >= p.config.ScoreThreshold {
return "generate"
}
if p.config.UseFallback {
return "fallback_search"
}
return "generate"
})
if p.config.UseFallback {
p.graph.AddEdge("fallback_search", "generate")
}
if p.config.IncludeCitations {
p.graph.AddEdge("generate", "format_citations")
p.graph.AddEdge("format_citations", graph.END)
} else {
p.graph.AddEdge("generate", graph.END)
}
return nil
}
// Compile compiles the RAG pipeline into a runnable graph
func (p *RAGPipeline) Compile() (*graph.StateRunnable[map[string]any], error) {
return p.graph.Compile()
}
// GetGraph returns the underlying graph for visualization
func (p *RAGPipeline) GetGraph() *graph.StateGraph[map[string]any] {
return p.graph
}
// Node implementations
func (p *RAGPipeline) retrieveNode(ctx context.Context, state map[string]any) (map[string]any, error) {
query, _ := state["query"].(string)
docs, err := p.config.Retriever.Retrieve(ctx, query)
if err != nil {
return nil, fmt.Errorf("retrieval failed: %w", err)
}
state["retrieved_documents"] = convertToRAGDocuments(docs)
state["documents"] = convertToRAGDocuments(docs)
return state, nil
}
func (p *RAGPipeline) rerankNode(ctx context.Context, state map[string]any) (map[string]any, error) {
query, _ := state["query"].(string)
retrievedDocs, _ := state["retrieved_documents"].([]RAGDocument)
if p.config.Reranker == nil {
// If no reranker, just assign scores based on order
rankedDocs := make([]DocumentSearchResult, len(retrievedDocs))
for i, doc := range retrievedDocs {
rankedDocs[i] = DocumentSearchResult{
Document: doc.Document(),
Score: 1.0 - float64(i)*0.1, // Simple decreasing score
}
}
state["ranked_documents"] = rankedDocs
return state, nil
}
// Convert to DocumentSearchResult for reranking
searchResults := make([]DocumentSearchResult, len(retrievedDocs))
for i, doc := range retrievedDocs {
searchResults[i] = DocumentSearchResult{
Document: doc.Document(),
Score: 1.0 - float64(i)*0.1,
}
}
rerankedResults, err := p.config.Reranker.Rerank(ctx, query, searchResults)
if err != nil {
return nil, fmt.Errorf("reranking failed: %w", err)
}
state["ranked_documents"] = rerankedResults
// Update documents with reranked order
docs := make([]RAGDocument, len(rerankedResults))
for i, rd := range rerankedResults {
docs[i] = DocumentFromRAGDocument(rd.Document)
}
state["documents"] = docs
return state, nil
}
func (p *RAGPipeline) fallbackSearchNode(ctx context.Context, state map[string]any) (map[string]any, error) {
// Placeholder for fallback search (e.g., web search)
// In a real implementation, this would call an external search API
state["metadata"] = map[string]any{
"fallback_used": true,
}
return state, nil
}
func (p *RAGPipeline) generateNode(ctx context.Context, state map[string]any) (map[string]any, error) {
query, _ := state["query"].(string)
documents, _ := state["documents"].([]RAGDocument)
// Build context from retrieved documents
var contextParts []string
for i, doc := range documents {
source := "Unknown"
if s, ok := doc.Metadata["source"]; ok {
source = fmt.Sprintf("%v", s)
}
contextParts = append(contextParts, fmt.Sprintf("[%d] Source: %s\nContent: %s", i+1, source, doc.Content))
}
contextStr := strings.Join(contextParts, "\n\n")
// Build prompt
prompt := fmt.Sprintf("Context:\n%s\n\nQuestion: %s\n\nAnswer:", contextStr, query)
messages := []llms.MessageContent{
llms.TextParts("system", p.config.SystemPrompt),
llms.TextParts("human", prompt),
}
// Generate answer
response, err := p.config.LLM.GenerateContent(ctx, messages)
if err != nil {
return nil, fmt.Errorf("generation failed: %w", err)
}
if len(response.Choices) > 0 {
state["answer"] = response.Choices[0].Content
}
state["context"] = contextStr
return state, nil
}
func (p *RAGPipeline) formatCitationsNode(ctx context.Context, state map[string]any) (map[string]any, error) {
documents, _ := state["documents"].([]RAGDocument)
// Extract citations from documents
citations := make([]string, len(documents))
for i, doc := range documents {
source := "Unknown"
if s, ok := doc.Metadata["source"]; ok {
source = fmt.Sprintf("%v", s)
}
citations[i] = fmt.Sprintf("[%d] %s", i+1, source)
}
state["citations"] = citations
return state, nil
}
// RAGDocument represents a document with content and metadata (for pipeline compatibility)
type RAGDocument struct {
Content string `json:"content"`
Metadata map[string]any `json:"metadata"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// ConvertToDocument converts RAGDocument to Document
func (d RAGDocument) Document() Document {
return Document{
Content: d.Content,
Metadata: d.Metadata,
CreatedAt: d.CreatedAt,
UpdatedAt: d.UpdatedAt,
}
}
// DocumentFromRAGDocument converts Document to RAGDocument
func DocumentFromRAGDocument(doc Document) RAGDocument {
return RAGDocument{
Content: doc.Content,
Metadata: doc.Metadata,
CreatedAt: doc.CreatedAt,
UpdatedAt: doc.UpdatedAt,
}
}
// RAGDocumentLoader represents a document loader for RAG pipelines
type RAGDocumentLoader interface {
Load(ctx context.Context) ([]RAGDocument, error)
}
// RAGTextSplitter represents a text splitter for RAG pipelines
type RAGTextSplitter interface {
SplitDocuments(documents []RAGDocument) ([]RAGDocument, error)
}
// convertToRAGDocuments converts Document to RAGDocument
func convertToRAGDocuments(docs []Document) []RAGDocument {
result := make([]RAGDocument, len(docs))
for i, doc := range docs {
result[i] = DocumentFromRAGDocument(doc)
}
return result
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/doc.go | rag/doc.go | // RAG (Retrieval-Augmented Generation) Package
//
// The rag package provides comprehensive RAG (Retrieval-Augmented Generation) capabilities
// for the LangGraph Go framework. It integrates various RAG approaches including traditional
// vector-based retrieval and advanced GraphRAG techniques.
//
// # Features
//
// - Vector-based RAG: Traditional retrieval using vector similarity
// - GraphRAG: Knowledge graph-based retrieval for enhanced context understanding
// - Multiple Embedding Models: Support for OpenAI, local models, and more
// - Flexible Document Processing: Various document loaders and splitters
// - Hybrid Search: Combine vector and graph-based retrieval
// - Integration Ready: Seamless integration with LangGraph agents
//
// # Quick Start
//
// Basic vector RAG:
//
// import (
// "context"
// "github.com/smallnest/langgraphgo/rag/engine"
// "github.com/tmc/langchaingo/embeddings/openai"
// "github.com/tmc/langchaingo/vectorstores/pgvector"
// )
//
// func main() {
// llm := initLLM()
// embedder, _ := openai.NewEmbedder()
// store, _ := pgvector.New(ctx, pgvector.WithEmbedder(embedder))
//
// ragEngine, _ := engine.NewVectorRAGEngine(llm, embedder, store, 5)
//
// result, err := ragEngine.Query(ctx, "What is quantum computing?")
// }
//
// GraphRAG integration:
//
// import (
// "context"
// "github.com/smallnest/langgraphgo/rag/engine"
// )
//
// func main() {
// graphRAG, _ := engine.NewGraphRAGEngine(engine.GraphRAGConfig{
// DatabaseURL: "redis://localhost:6379",
// ModelProvider: "openai",
// EmbeddingModel: "text-embedding-3-small",
// }, llm, embedder, kg)
//
// // Extract and store knowledge graph
// err := graphRAG.AddDocuments(ctx, documents)
//
// // Query using graph-enhanced retrieval
// response, err := graphRAG.Query(ctx, "Who directed the Matrix?")
// }
//
// # Architecture
//
// The rag package consists of several key components:
//
// # Core Components
//
// rag/engine.go
// Main RAG engine interfaces and base implementations
//
// type Engine interface {
// Query(ctx context.Context, query string) (*QueryResult, error)
// AddDocuments(ctx context.Context, docs []Document) error
// SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error)
// }
//
// rag/engine/vector.go
// Traditional vector-based RAG implementation
//
// vectorEngine, _ := engine.NewVectorRAGEngine(llm, embedder, vectorStore, k)
//
// rag/engine/graph.go
// GraphRAG implementation with knowledge graph extraction
//
// graphEngine, _ := engine.NewGraphRAGEngine(config, llm, embedder, kg)
//
// # Document Processing
//
// rag/types.go
// Core document and entity types
//
// type Document struct {
// ID string
// Content string
// Metadata map[string]any
// }
//
// rag/loader/
// Various document loaders (text, static, etc.)
//
// loader := loader.NewTextLoader("document.txt")
// docs, err := loader.Load(ctx)
//
// rag/splitter/
// Text splitting strategies
//
// splitter := splitter.NewRecursiveCharacterTextSplitter(
// splitter.WithChunkSize(1000),
// splitter.WithChunkOverlap(200),
// )
//
// # Retrieval Strategies
//
// rag/retriever/
// Various retrieval implementations
//
// vectorRetriever := retriever.NewVectorRetriever(vectorStore, embedder, 5)
// graphRetriever := retriever.NewGraphRetriever(knowledgeGraph, 5)
// hybridRetriever := retriever.NewHybridRetriever([]Retriever{r1, r2}, weights, config)
//
// # Integration with LangGraph
//
// The rag package integrates seamlessly with LangGraph agents:
//
// // Create a RAG pipeline
// pipeline := rag.NewRAGPipeline(config)
// runnable, _ := pipeline.Compile()
// result, _ := runnable.Invoke(ctx, rag.RAGState{Query: "..."})
//
// # Configuration
//
// The rag package supports various configuration options:
//
// type Config struct {
// VectorRAG *VectorRAGConfig `json:"vector_rag,omitempty"`
// GraphRAG *GraphRAGConfig `json:"graph_rag,omitempty"`
// }
//
// # Supported Data Sources
//
// - Local files (TXT, MD, etc.)
// - Static documents
// - Web pages and websites (via adapters)
//
// # Supported Vector Stores
//
// - pgvector (via adapter)
// - Redis (via adapter)
// - Mock/In-memory store for testing
//
// # GraphRAG Features
//
// - Automatic entity extraction
// - Relationship detection
// - Multi-hop reasoning
// - Context-aware retrieval
package rag // import "github.com/smallnest/langgraphgo/rag"
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/langgraph_adapter.go | rag/langgraph_adapter.go | package rag
import (
"context"
"fmt"
"github.com/tmc/langchaingo/tools"
)
// ========================================
// LangGraph Adapters
// ========================================
// NewRetrievalNode creates a LangGraph node function that retrieves documents using the RAG engine.
// It expects the input state to be a map[string]any.
//
// Parameters:
// - engine: The RAG engine to use for retrieval.
// - inputKey: The key in the state map where the query string is stored.
// - outputKey: The key in the returned map where the retrieved context (string) will be stored.
//
// Usage:
//
// graph.AddNode("retrieve", rag.NewRetrievalNode(myEngine, "question", "context"))
func NewRetrievalNode(engine Engine, inputKey, outputKey string) func(context.Context, any) (any, error) {
return func(ctx context.Context, state any) (any, error) {
m, ok := state.(map[string]any)
if !ok {
return nil, fmt.Errorf("state is not a map[string]any, got %T", state)
}
query, ok := m[inputKey].(string)
if !ok {
return nil, fmt.Errorf("input key '%s' not found or not a string", inputKey)
}
// Perform the query
result, err := engine.Query(ctx, query)
if err != nil {
return nil, err
}
// Return the update
return map[string]any{
outputKey: result.Context,
}, nil
}
}
// RetrieverTool wraps a RAG Engine as a LangChain Tool, allowing agents to query the knowledge base.
type RetrieverTool struct {
Engine Engine
NameVal string
DescVal string
}
// NewRetrieverTool creates a new RetrieverTool.
// If name or description are empty, defaults will be used.
func NewRetrieverTool(engine Engine, name, description string) *RetrieverTool {
if name == "" {
name = "knowledge_base"
}
if description == "" {
description = "A knowledge base tool. Use this to search for information to answer questions."
}
return &RetrieverTool{
Engine: engine,
NameVal: name,
DescVal: description,
}
}
var _ tools.Tool = &RetrieverTool{}
// Name returns the name of the tool.
func (t *RetrieverTool) Name() string {
return t.NameVal
}
// Description returns the description of the tool.
func (t *RetrieverTool) Description() string {
return t.DescVal
}
// Call executes the retrieval query.
func (t *RetrieverTool) Call(ctx context.Context, input string) (string, error) {
result, err := t.Engine.Query(ctx, input)
if err != nil {
return "", err
}
return result.Context, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/types.go | rag/types.go | package rag
import (
"context"
"time"
)
// ========================================
// Core Types
// ========================================
// Document represents a document or document chunk in the RAG system
type Document struct {
ID string `json:"id"`
Content string `json:"content"`
Metadata map[string]any `json:"metadata"`
Embedding []float32 `json:"embedding,omitempty"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// Entity represents a knowledge graph entity
type Entity struct {
ID string `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Properties map[string]any `json:"properties"`
Embedding []float32 `json:"embedding,omitempty"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// Relationship represents a relationship between entities
type Relationship struct {
ID string `json:"id"`
Source string `json:"source"`
Target string `json:"target"`
Type string `json:"type"`
Properties map[string]any `json:"properties"`
Weight float64 `json:"weight,omitempty"`
Confidence float64 `json:"confidence,omitempty"`
CreatedAt time.Time `json:"created_at"`
}
// Community represents a community of entities in the knowledge graph
type Community struct {
ID string `json:"id"`
Level int `json:"level"`
Title string `json:"title"`
Summary string `json:"summary"`
Entities []string `json:"entities"`
ParentID string `json:"parent_id,omitempty"`
Children []string `json:"children,omitempty"`
Properties map[string]any `json:"properties,omitempty"`
Score float64 `json:"score,omitempty"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// DocumentSearchResult represents a document search result with relevance score
type DocumentSearchResult struct {
Document Document `json:"document"`
Score float64 `json:"score"`
Metadata map[string]any `json:"metadata,omitempty"`
}
// GraphQuery represents a query to the knowledge graph
type GraphQuery struct {
EntityTypes []string `json:"entity_types,omitempty"`
Relationships []string `json:"relationships,omitempty"`
Filters map[string]any `json:"filters,omitempty"`
Limit int `json:"limit,omitempty"`
MaxDepth int `json:"max_depth,omitempty"`
StartEntity string `json:"start_entity,omitempty"`
EntityType string `json:"entity_type,omitempty"`
}
// GraphQueryResult represents the result of a graph query
type GraphQueryResult struct {
Entities []*Entity `json:"entities"`
Relationships []*Relationship `json:"relationships"`
Paths [][]*Entity `json:"paths,omitempty"`
Score float64 `json:"score"`
Scores []float64 `json:"scores,omitempty"`
Metadata map[string]any `json:"metadata,omitempty"`
}
// ========================================
// Configuration Types
// ========================================
// RetrievalConfig contains configuration for retrieval operations
type RetrievalConfig struct {
K int `json:"k"`
ScoreThreshold float64 `json:"score_threshold"`
SearchType string `json:"search_type"`
Filter map[string]any `json:"filter,omitempty"`
IncludeScores bool `json:"include_scores"`
}
// VectorStoreStats contains statistics about a vector store
type VectorStoreStats struct {
TotalDocuments int `json:"total_documents"`
TotalVectors int `json:"total_vectors"`
Dimension int `json:"dimension"`
LastUpdated time.Time `json:"last_updated"`
}
// VectorRAGConfig represents configuration for vector-based RAG
type VectorRAGConfig struct {
EmbeddingModel string `json:"embedding_model"`
VectorStoreType string `json:"vector_store_type"`
VectorStoreConfig map[string]any `json:"vector_store_config"`
ChunkSize int `json:"chunk_size"`
ChunkOverlap int `json:"chunk_overlap"`
EnableReranking bool `json:"enable_reranking"`
RetrieverConfig RetrievalConfig `json:"retriever_config"`
}
// GraphRAGConfig represents configuration for graph-based RAG
type GraphRAGConfig struct {
DatabaseURL string `json:"database_url"`
ModelProvider string `json:"model_provider"`
EmbeddingModel string `json:"embedding_model"`
ChatModel string `json:"chat_model"`
EntityTypes []string `json:"entity_types"`
Relationships map[string][]string `json:"relationships"`
MaxDepth int `json:"max_depth"`
EnableReasoning bool `json:"enable_reasoning"`
ExtractionPrompt string `json:"extraction_prompt"`
}
// LightRAGConfig represents configuration for LightRAG
// LightRAG combines low-level semantic chunks with high-level graph structures
type LightRAGConfig struct {
// Retrieval mode: "local", "global", "hybrid", or "naive"
Mode string `json:"mode"`
// Local retrieval configuration
LocalConfig LocalRetrievalConfig `json:"local_config"`
// Global retrieval configuration
GlobalConfig GlobalRetrievalConfig `json:"global_config"`
// Hybrid retrieval configuration
HybridConfig HybridRetrievalConfig `json:"hybrid_config"`
// Chunk size for text splitting
ChunkSize int `json:"chunk_size"`
// Chunk overlap for text splitting
ChunkOverlap int `json:"chunk_overlap"`
// Threshold for entity extraction
EntityExtractionThreshold float64 `json:"entity_extraction_threshold"`
// Maximum number of entities to extract per chunk
MaxEntitiesPerChunk int `json:"max_entities_per_chunk"`
// Enable community detection for global retrieval
EnableCommunityDetection bool `json:"enable_community_detection"`
// Community detection algorithm: "louvain", "leiden", or "label_propagation"
CommunityDetectionAlgorithm string `json:"community_detection_algorithm"`
// Number of communities to return in global retrieval
MaxCommunities int `json:"max_communities"`
// Temperature for LLM-based operations
Temperature float64 `json:"temperature"`
// Custom prompt templates
PromptTemplates map[string]string `json:"prompt_templates,omitempty"`
}
// LocalRetrievalConfig configures local mode retrieval
// Local mode retrieves relevant entities and their relationships within a localized context
type LocalRetrievalConfig struct {
// Maximum number of hops in the knowledge graph
MaxHops int `json:"max_hops"`
// Number of entities to retrieve
TopK int `json:"top_k"`
// Include entity descriptions
IncludeDescriptions bool `json:"include_descriptions"`
// Weight for entity relevance
EntityWeight float64 `json:"entity_weight"`
}
// GlobalRetrievalConfig configures global mode retrieval
// Global mode retrieves information from community-level summaries
type GlobalRetrievalConfig struct {
// Maximum number of communities to retrieve
MaxCommunities int `json:"max_communities"`
// Include community hierarchy
IncludeHierarchy bool `json:"include_hierarchy"`
// Weight for community relevance
CommunityWeight float64 `json:"community_weight"`
// Maximum hierarchy depth
MaxHierarchyDepth int `json:"max_hierarchy_depth"`
}
// HybridRetrievalConfig configures hybrid mode retrieval
// Hybrid mode combines local and global retrieval results
type HybridRetrievalConfig struct {
// Weight for local retrieval results (0-1)
LocalWeight float64 `json:"local_weight"`
// Weight for global retrieval results (0-1)
GlobalWeight float64 `json:"global_weight"`
// Fusion method: "rrf" (reciprocal rank fusion) or "weighted"
FusionMethod string `json:"fusion_method"`
// RRF parameter for rank fusion
RFFK int `json:"rrf_k"`
}
// Config is a generic RAG configuration
type Config struct {
VectorRAG *VectorRAGConfig `json:"vector_rag,omitempty"`
GraphRAG *GraphRAGConfig `json:"graph_rag,omitempty"`
LightRAG *LightRAGConfig `json:"lightrag,omitempty"`
}
// RAGConfig represents the main RAG configuration
type RAGConfig struct {
Config `json:"config"`
EnableCache bool `json:"enable_cache"`
CacheSize int `json:"cache_size"`
EnableMetrics bool `json:"enable_metrics"`
Debug bool `json:"debug"`
Timeout time.Duration `json:"timeout"`
}
// ========================================
// Core Interfaces
// ========================================
// TextSplitter interface for splitting text into chunks
type TextSplitter interface {
SplitText(text string) []string
SplitDocuments(documents []Document) []Document
JoinText(chunks []string) string
}
// Embedder interface for text embeddings
type Embedder interface {
EmbedDocument(ctx context.Context, text string) ([]float32, error)
EmbedDocuments(ctx context.Context, texts []string) ([][]float32, error)
GetDimension() int
}
// VectorStore interface for vector storage and retrieval
type VectorStore interface {
Add(ctx context.Context, documents []Document) error
Search(ctx context.Context, query []float32, k int) ([]DocumentSearchResult, error)
SearchWithFilter(ctx context.Context, query []float32, k int, filter map[string]any) ([]DocumentSearchResult, error)
Delete(ctx context.Context, ids []string) error
Update(ctx context.Context, documents []Document) error
GetStats(ctx context.Context) (*VectorStoreStats, error)
}
// Retriever interface for document retrieval
type Retriever interface {
Retrieve(ctx context.Context, query string) ([]Document, error)
RetrieveWithK(ctx context.Context, query string, k int) ([]Document, error)
RetrieveWithConfig(ctx context.Context, query string, config *RetrievalConfig) ([]DocumentSearchResult, error)
}
// Reranker interface for reranking search results
type Reranker interface {
Rerank(ctx context.Context, query string, documents []DocumentSearchResult) ([]DocumentSearchResult, error)
}
// DocumentLoader interface for loading documents
type DocumentLoader interface {
Load(ctx context.Context) ([]Document, error)
}
// LLMInterface defines the interface for language models
type LLMInterface interface {
Generate(ctx context.Context, prompt string) (string, error)
GenerateWithConfig(ctx context.Context, prompt string, config map[string]any) (string, error)
GenerateWithSystem(ctx context.Context, system, prompt string) (string, error)
}
// KnowledgeGraph interface for graph-based retrieval
type KnowledgeGraph interface {
AddEntity(ctx context.Context, entity *Entity) error
AddRelationship(ctx context.Context, relationship *Relationship) error
Query(ctx context.Context, query *GraphQuery) (*GraphQueryResult, error)
GetRelatedEntities(ctx context.Context, entityID string, maxDepth int) ([]*Entity, error)
GetEntity(ctx context.Context, entityID string) (*Entity, error)
}
// Engine interface for RAG engines
type Engine interface {
Query(ctx context.Context, query string) (*QueryResult, error)
QueryWithConfig(ctx context.Context, query string, config *RetrievalConfig) (*QueryResult, error)
AddDocuments(ctx context.Context, docs []Document) error
DeleteDocument(ctx context.Context, docID string) error
UpdateDocument(ctx context.Context, doc Document) error
SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error)
SimilaritySearchWithScores(ctx context.Context, query string, k int) ([]DocumentSearchResult, error)
}
// ========================================
// Result Types
// ========================================
// Metrics contains performance metrics for RAG engines
type Metrics struct {
TotalQueries int64 `json:"total_queries"`
TotalDocuments int64 `json:"total_documents"`
AverageLatency time.Duration `json:"average_latency"`
MinLatency time.Duration `json:"min_latency"`
MaxLatency time.Duration `json:"max_latency"`
LastQueryTime time.Time `json:"last_query_time"`
CacheHits int64 `json:"cache_hits"`
CacheMisses int64 `json:"cache_misses"`
IndexingLatency time.Duration `json:"indexing_latency"`
}
// QueryResult represents the result of a RAG query
type QueryResult struct {
Query string `json:"query"`
Answer string `json:"answer"`
Sources []Document `json:"sources"`
Context string `json:"context"`
Confidence float64 `json:"confidence"`
ResponseTime time.Duration `json:"response_time"`
Metadata map[string]any `json:"metadata"`
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/pipeline_test.go | rag/pipeline_test.go | package rag
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tmc/langchaingo/llms"
)
type mockLLM struct{}
func (m *mockLLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) {
return &llms.ContentResponse{
Choices: []*llms.ContentChoice{
{Content: "Mock Answer"},
},
}, nil
}
func (m *mockLLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) {
return "Mock Answer", nil
}
type mockRetriever struct {
docs []Document
}
func (m *mockRetriever) Retrieve(ctx context.Context, query string) ([]Document, error) {
return m.docs, nil
}
func (m *mockRetriever) RetrieveWithK(ctx context.Context, query string, k int) ([]Document, error) {
return m.docs, nil
}
func (m *mockRetriever) RetrieveWithConfig(ctx context.Context, query string, config *RetrievalConfig) ([]DocumentSearchResult, error) {
res := make([]DocumentSearchResult, len(m.docs))
for i, d := range m.docs {
res[i] = DocumentSearchResult{Document: d, Score: 0.9}
}
return res, nil
}
type mockEmbedder struct{}
func (m *mockEmbedder) EmbedDocument(ctx context.Context, text string) ([]float32, error) {
return []float32{0.1, 0.2}, nil
}
func (m *mockEmbedder) EmbedDocuments(ctx context.Context, texts []string) ([][]float32, error) {
return [][]float32{{0.1, 0.2}}, nil
}
func (m *mockEmbedder) GetDimension() int { return 2 }
func TestRAGPipelineNodes(t *testing.T) {
ctx := context.Background()
llm := &mockLLM{}
retriever := &mockRetriever{
docs: []Document{
{Content: "Context doc 1", Metadata: map[string]any{"source": "src1"}},
},
}
config := DefaultPipelineConfig()
config.LLM = llm
config.Retriever = retriever
p := NewRAGPipeline(config)
t.Run("Retrieve Node", func(t *testing.T) {
state := map[string]any{"query": "test", "documents": []Document{}}
res, err := p.retrieveNode(ctx, state)
assert.NoError(t, err)
docs, _ := res["documents"].([]RAGDocument)
assert.Len(t, docs, 1)
})
t.Run("Generate Node", func(t *testing.T) {
state := map[string]any{
"query": "test",
"documents": []RAGDocument{{Content: "context", Metadata: map[string]any{"source": "src1"}}},
}
res, err := p.generateNode(ctx, state)
assert.NoError(t, err)
answer, _ := res["answer"].(string)
assert.Equal(t, "Mock Answer", answer)
})
t.Run("Format Citations Node", func(t *testing.T) {
state := map[string]any{
"documents": []RAGDocument{{Metadata: map[string]any{"source": "src1"}}},
}
res, err := p.formatCitationsNode(ctx, state)
assert.NoError(t, err)
citations, _ := res["citations"].([]string)
assert.Len(t, citations, 1)
assert.Contains(t, citations[0], "src1")
})
}
func TestRAGPipelineBuilds(t *testing.T) {
config := DefaultPipelineConfig()
config.LLM = &mockLLM{}
config.Retriever = &mockRetriever{}
p := NewRAGPipeline(config)
assert.NoError(t, p.BuildBasicRAG())
assert.NoError(t, p.BuildAdvancedRAG())
assert.NoError(t, p.BuildConditionalRAG())
}
func TestRerankNode(t *testing.T) {
ctx := context.Background()
p := NewRAGPipeline(nil)
state := map[string]any{
"retrieved_documents": []RAGDocument{{Content: "doc1"}},
}
res, err := p.rerankNode(ctx, state)
assert.NoError(t, err)
rankedDocs, _ := res["ranked_documents"].([]DocumentSearchResult)
assert.Len(t, rankedDocs, 1)
}
func TestRAGStateSchema(t *testing.T) {
s := &ragStateSchema{}
init := s.Init()
assert.NotNil(t, init["metadata"])
update := map[string]any{
"query": "new query",
"metadata": map[string]any{"key": "val"},
}
merged, err := s.Update(init, update)
assert.NoError(t, err)
query, _ := merged["query"].(string)
metadata, _ := merged["metadata"].(map[string]any)
assert.Equal(t, "new query", query)
assert.Equal(t, "val", metadata["key"])
}
func TestBaseEngine(t *testing.T) {
ctx := context.Background()
retriever := &mockRetriever{docs: []Document{{Content: "context"}}}
embedder := &mockEmbedder{}
engine := NewBaseEngine(retriever, embedder, nil)
assert.NotNil(t, engine)
t.Run("Engine Query", func(t *testing.T) {
res, err := engine.Query(ctx, "test")
assert.NoError(t, err)
assert.NotEmpty(t, res.Context)
})
t.Run("Engine Search", func(t *testing.T) {
docs, err := engine.SimilaritySearch(ctx, "test", 1)
assert.NoError(t, err)
assert.Len(t, docs, 1)
})
}
func TestCompositeEngine(t *testing.T) {
ctx := context.Background()
retriever := &mockRetriever{docs: []Document{{ID: "1", Content: "c1"}}}
embedder := &mockEmbedder{}
engine1 := NewBaseEngine(retriever, embedder, nil)
comp := NewCompositeEngine([]Engine{engine1}, nil)
t.Run("Composite Query", func(t *testing.T) {
res, err := comp.Query(ctx, "test")
assert.NoError(t, err)
assert.Len(t, res.Sources, 1)
})
t.Run("Aggregators", func(t *testing.T) {
res1 := &QueryResult{Confidence: 0.5, Sources: []Document{{ID: "1"}}, Metadata: make(map[string]any)}
res2 := &QueryResult{Confidence: 0.8, Sources: []Document{{ID: "2"}}, Metadata: make(map[string]any)}
agg := DefaultAggregator([]*QueryResult{res1, res2})
assert.Equal(t, 0.8, agg.Confidence)
assert.Len(t, agg.Sources, 2)
wAgg := WeightedAggregator([]float64{1.0, 0.1})([]*QueryResult{res1, res2})
assert.Equal(t, 0.5, wAgg.Confidence)
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/vector.go | rag/retriever/vector.go | package retriever
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/rag"
)
// VectorRetriever implements document retrieval using vector similarity
type VectorRetriever struct {
vectorStore rag.VectorStore
embedder rag.Embedder
config rag.RetrievalConfig
}
// NewVectorRetriever creates a new vector retriever
func NewVectorRetriever(vectorStore rag.VectorStore, embedder rag.Embedder, config rag.RetrievalConfig) *VectorRetriever {
if config.K == 0 {
config.K = 4
}
if config.ScoreThreshold == 0 {
config.ScoreThreshold = 0.5
}
if config.SearchType == "" {
config.SearchType = "similarity"
}
return &VectorRetriever{
vectorStore: vectorStore,
embedder: embedder,
config: config,
}
}
// Retrieve retrieves documents based on a query
func (r *VectorRetriever) Retrieve(ctx context.Context, query string) ([]rag.Document, error) {
return r.RetrieveWithK(ctx, query, r.config.K)
}
// RetrieveWithK retrieves exactly k documents
func (r *VectorRetriever) RetrieveWithK(ctx context.Context, query string, k int) ([]rag.Document, error) {
config := r.config
config.K = k
results, err := r.RetrieveWithConfig(ctx, query, &config)
if err != nil {
return nil, err
}
docs := make([]rag.Document, len(results))
for i, result := range results {
docs[i] = result.Document
}
return docs, nil
}
// RetrieveWithConfig retrieves documents with custom configuration
func (r *VectorRetriever) RetrieveWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) ([]rag.DocumentSearchResult, error) {
if config == nil {
config = &r.config
}
// Embed the query
queryEmbedding, err := r.embedder.EmbedDocument(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to embed query: %w", err)
}
// Perform vector search
var results []rag.DocumentSearchResult
if len(config.Filter) > 0 {
// Search with filters
results, err = r.vectorStore.SearchWithFilter(ctx, queryEmbedding, config.K, config.Filter)
} else {
// Simple search
results, err = r.vectorStore.Search(ctx, queryEmbedding, config.K)
}
if err != nil {
return nil, fmt.Errorf("vector search failed: %w", err)
}
// Filter by score threshold
if config.ScoreThreshold > 0 {
filtered := make([]rag.DocumentSearchResult, 0)
for _, result := range results {
if result.Score >= config.ScoreThreshold {
filtered = append(filtered, result)
}
}
results = filtered
}
// Apply different search strategies
switch config.SearchType {
case "mmr":
results = r.applyMMR(results, config.K)
case "diversity":
results = r.applyDiversitySearch(results, config.K)
}
return results, nil
}
// applyMMR applies Maximal Marginal Relevance to ensure diversity
func (r *VectorRetriever) applyMMR(results []rag.DocumentSearchResult, k int) []rag.DocumentSearchResult {
if len(results) <= k {
return results
}
selected := make([]rag.DocumentSearchResult, 0, k)
selected = append(selected, results[0]) // Always select the highest scoring result
candidates := results[1:]
for len(selected) < k && len(candidates) > 0 {
// Find the candidate with highest MMR score
bestIdx := 0
bestScore := 0.0
for i, candidate := range candidates {
// Calculate relevance score
relevance := candidate.Score
// Calculate maximal similarity to already selected documents
maxSimilarity := 0.0
for _, selectedDoc := range selected {
similarity := r.calculateSimilarity(candidate.Document, selectedDoc.Document)
if similarity > maxSimilarity {
maxSimilarity = similarity
}
}
// MMR score: ฮป * relevance - (1-ฮป) * maxSimilarity
lambda := 0.5 // Balance between relevance and diversity
mmrScore := lambda*relevance - (1-lambda)*maxSimilarity
if mmrScore > bestScore {
bestScore = mmrScore
bestIdx = i
}
}
// Add the best candidate to selected results
selected = append(selected, candidates[bestIdx])
// Remove from candidates
candidates = append(candidates[:bestIdx], candidates[bestIdx+1:]...)
}
return selected
}
// applyDiversitySearch applies diversity-based selection
func (r *VectorRetriever) applyDiversitySearch(results []rag.DocumentSearchResult, k int) []rag.DocumentSearchResult {
if len(results) <= k {
return results
}
// Group results by content type or source to ensure diversity
groups := make(map[string][]rag.DocumentSearchResult)
for _, result := range results {
// Try to group by source or type
var groupKey string
if result.Document.Metadata != nil {
if source, ok := result.Document.Metadata["source"]; ok {
groupKey = fmt.Sprintf("%v", source)
} else if docType, ok := result.Document.Metadata["type"]; ok {
groupKey = fmt.Sprintf("%v", docType)
}
}
if groupKey == "" {
groupKey = "default"
}
groups[groupKey] = append(groups[groupKey], result)
}
// Select top results from each group to ensure diversity
selected := make([]rag.DocumentSearchResult, 0, k)
for _, group := range groups {
if len(group) > 0 {
selected = append(selected, group[0]) // Take the best from each group
if len(selected) >= k {
break
}
}
}
// If we need more results to reach k, take the next best from any group
if len(selected) < k {
remaining := k - len(selected)
remainingResults := make([]rag.DocumentSearchResult, 0)
for _, group := range groups {
if len(group) > 1 {
remainingResults = append(remainingResults, group[1:]...)
}
}
// Add the top remaining results
for i := 0; i < remaining && i < len(remainingResults); i++ {
selected = append(selected, remainingResults[i])
}
}
return selected
}
// calculateSimilarity calculates similarity between two documents
func (r *VectorRetriever) calculateSimilarity(doc1, doc2 rag.Document) float64 {
// Use embeddings if available
if len(doc1.Embedding) > 0 && len(doc2.Embedding) > 0 {
return cosineSimilarity(doc1.Embedding, doc2.Embedding)
}
// Fallback to content similarity
return contentSimilarity(doc1.Content, doc2.Content)
}
// cosineSimilarity calculates cosine similarity between two embeddings
func cosineSimilarity(a, b []float32) float64 {
if len(a) != len(b) {
return 0.0
}
var dotProduct, normA, normB float32
for i := range a {
dotProduct += a[i] * b[i]
normA += a[i] * a[i]
normB += b[i] * b[i]
}
if normA == 0 || normB == 0 {
return 0.0
}
return float64(dotProduct / (normA * normB))
}
// contentSimilarity calculates similarity between document contents
func contentSimilarity(a, b string) float64 {
// Simple word overlap similarity
wordsA := make(map[string]bool)
wordsB := make(map[string]bool)
// Extract words from both documents
for _, word := range splitWords(a) {
wordsA[word] = true
}
for _, word := range splitWords(b) {
wordsB[word] = true
}
// Calculate Jaccard similarity
intersection := 0
for word := range wordsA {
if wordsB[word] {
intersection++
}
}
union := len(wordsA) + len(wordsB) - intersection
if union == 0 {
return 1.0
}
return float64(intersection) / float64(union)
}
// splitWords splits text into words
func splitWords(text string) []string {
// Simple word splitting - in practice, you'd use more sophisticated tokenization
words := make([]string, 0)
current := ""
for _, char := range text {
if isAlphaNumeric(char) {
current += string(char)
} else {
if current != "" {
words = append(words, current)
current = ""
}
}
}
if current != "" {
words = append(words, current)
}
return words
}
// isAlphaNumeric checks if a character is alphanumeric
func isAlphaNumeric(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') || (char >= '0' && char <= '9')
}
// VectorStoreRetriever implements Retriever using a VectorStore with backward compatibility
type VectorStoreRetriever struct {
vectorStore rag.VectorStore
embedder rag.Embedder
topK int
}
// NewVectorStoreRetriever creates a new VectorStoreRetriever
func NewVectorStoreRetriever(vectorStore rag.VectorStore, embedder rag.Embedder, topK int) *VectorStoreRetriever {
if topK <= 0 {
topK = 4
}
return &VectorStoreRetriever{
vectorStore: vectorStore,
embedder: embedder,
topK: topK,
}
}
// Retrieve retrieves relevant documents for a query
func (r *VectorStoreRetriever) Retrieve(ctx context.Context, query string) ([]rag.Document, error) {
return r.RetrieveWithK(ctx, query, r.topK)
}
// RetrieveWithK retrieves exactly k documents
func (r *VectorStoreRetriever) RetrieveWithK(ctx context.Context, query string, k int) ([]rag.Document, error) {
// Embed the query
queryEmbedding, err := r.embedder.EmbedDocument(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to embed query: %w", err)
}
// Search in vector store
results, err := r.vectorStore.Search(ctx, queryEmbedding, k)
if err != nil {
return nil, fmt.Errorf("vector search failed: %w", err)
}
// Extract documents from results
docs := make([]rag.Document, len(results))
for i, result := range results {
docs[i] = result.Document
}
return docs, nil
}
// RetrieveWithConfig retrieves documents with custom configuration
func (r *VectorStoreRetriever) RetrieveWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) ([]rag.DocumentSearchResult, error) {
if config == nil {
config = &rag.RetrievalConfig{
K: r.topK,
ScoreThreshold: 0.0,
SearchType: "similarity",
IncludeScores: false,
}
}
// Embed the query
queryEmbedding, err := r.embedder.EmbedDocument(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to embed query: %w", err)
}
// Perform search
var results []rag.DocumentSearchResult
if len(config.Filter) > 0 {
results, err = r.vectorStore.SearchWithFilter(ctx, queryEmbedding, config.K, config.Filter)
} else {
results, err = r.vectorStore.Search(ctx, queryEmbedding, config.K)
}
if err != nil {
return nil, fmt.Errorf("vector search failed: %w", err)
}
// Filter by score threshold if specified
if config.ScoreThreshold > 0 {
filtered := make([]rag.DocumentSearchResult, 0)
for _, result := range results {
if result.Score >= config.ScoreThreshold {
filtered = append(filtered, result)
}
}
results = filtered
}
return results, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/llm_reranker.go | rag/retriever/llm_reranker.go | package retriever
import (
"context"
"fmt"
"maps"
"strings"
"github.com/smallnest/langgraphgo/rag"
"github.com/tmc/langchaingo/llms"
)
// LLMRerankerConfig configures the LLM-based reranker
type LLMRerankerConfig struct {
// TopK is the number of documents to return
TopK int
// ScoreThreshold is the minimum relevance score (0-1)
ScoreThreshold float64
// SystemPrompt is a custom system prompt for scoring
SystemPrompt string
// BatchSize is the number of documents to score in a single request (for efficiency)
BatchSize int
}
// DefaultLLMRerankerConfig returns the default configuration for LLM reranker
func DefaultLLMRerankerConfig() LLMRerankerConfig {
return LLMRerankerConfig{
TopK: 5,
ScoreThreshold: 0.0,
SystemPrompt: "You are a relevance scoring assistant. Rate how well each document answers " +
"the query on a scale of 0.0 to 1.0, where 1.0 is perfectly relevant and 0.0 is not relevant. " +
"Consider semantic meaning, factual accuracy, and completeness.",
BatchSize: 5,
}
}
// LLMReranker uses an LLM to score query-document pairs for reranking
type LLMReranker struct {
llm llms.Model
config LLMRerankerConfig
}
// NewLLMReranker creates a new LLM-based reranker
func NewLLMReranker(llm llms.Model, config LLMRerankerConfig) *LLMReranker {
if config.TopK <= 0 {
config.TopK = 5
}
if config.BatchSize <= 0 {
config.BatchSize = 5
}
return &LLMReranker{
llm: llm,
config: config,
}
}
// Rerank reranks documents based on query relevance using LLM scoring
func (r *LLMReranker) Rerank(ctx context.Context, query string, documents []rag.DocumentSearchResult) ([]rag.DocumentSearchResult, error) {
if len(documents) == 0 {
return []rag.DocumentSearchResult{}, nil
}
// Score all documents
scores := make([]float64, len(documents))
// Score documents in batches for efficiency
for i := 0; i < len(documents); i += r.config.BatchSize {
end := min(i+r.config.BatchSize, len(documents))
batch := documents[i:end]
batchScores, err := r.scoreBatch(ctx, query, batch)
if err != nil {
// If batch scoring fails, use original scores
for j := i; j < end; j++ {
scores[j] = documents[j].Score
}
continue
}
copy(scores[i:end], batchScores)
}
// Combine original scores with LLM scores (weighted average)
type docScore struct {
doc rag.DocumentSearchResult
score float64
}
combinedScores := make([]docScore, len(documents))
for i, doc := range documents {
// Weight LLM score higher than original retrieval score
llmWeight := 0.7
originalWeight := 0.3
finalScore := llmWeight*scores[i] + originalWeight*doc.Score
combinedScores[i] = docScore{
doc: rag.DocumentSearchResult{
Document: doc.Document,
Score: finalScore,
Metadata: r.mergeMetadata(doc.Metadata, map[string]any{
"llm_rerank_score": scores[i],
"original_score": doc.Score,
"reranking_method": "llm",
}),
},
score: finalScore,
}
}
// Sort by score (descending)
for i := range combinedScores {
for j := i + 1; j < len(combinedScores); j++ {
if combinedScores[j].score > combinedScores[i].score {
combinedScores[i], combinedScores[j] = combinedScores[j], combinedScores[i]
}
}
}
// Filter by score threshold
var filtered []docScore
if r.config.ScoreThreshold > 0 {
for _, ds := range combinedScores {
if ds.score >= r.config.ScoreThreshold {
filtered = append(filtered, ds)
}
}
} else {
filtered = combinedScores
}
// Limit to TopK
if len(filtered) > r.config.TopK {
filtered = filtered[:r.config.TopK]
}
// Extract results
results := make([]rag.DocumentSearchResult, len(filtered))
for i, ds := range filtered {
results[i] = ds.doc
}
return results, nil
}
// scoreBatch scores a batch of documents using a single LLM call
func (r *LLMReranker) scoreBatch(ctx context.Context, query string, documents []rag.DocumentSearchResult) ([]float64, error) {
// Build prompt with all documents
var promptParts []string
promptParts = append(promptParts, fmt.Sprintf("Query: %s\n\n", query))
promptParts = append(promptParts, "Rate the relevance of each document to the query. Return scores in JSON format.\n\n")
promptParts = append(promptParts, "Documents:\n")
for i, doc := range documents {
// Truncate content to avoid token limits
content := doc.Document.Content
maxContentLen := 500
if len(content) > maxContentLen {
content = content[:maxContentLen] + "..."
}
promptParts = append(promptParts, fmt.Sprintf("[%d] %s\n", i+1, content))
}
promptParts = append(promptParts, "\nReturn scores in format: [score1, score2, ...] where each score is between 0.0 and 1.0")
prompt := strings.Join(promptParts, "")
messages := []llms.MessageContent{
llms.TextParts("system", r.config.SystemPrompt),
llms.TextParts("human", prompt),
}
// Generate response
response, err := r.llm.GenerateContent(ctx, messages)
if err != nil {
return nil, fmt.Errorf("LLM generation failed: %w", err)
}
if len(response.Choices) == 0 {
return nil, fmt.Errorf("no response from LLM")
}
// Parse scores from response
scores, err := r.parseScores(response.Choices[0].Content, len(documents))
if err != nil {
return nil, fmt.Errorf("failed to parse scores: %w", err)
}
return scores, nil
}
// parseScores parses LLM response to extract scores
func (r *LLMReranker) parseScores(response string, expectedCount int) ([]float64, error) {
// Try to parse JSON array
response = strings.TrimSpace(response)
// Look for JSON array pattern
startIdx := strings.Index(response, "[")
endIdx := strings.LastIndex(response, "]")
if startIdx == -1 || endIdx == -1 {
// Fallback: extract numbers from text
return r.extractNumbers(response, expectedCount)
}
arrayStr := response[startIdx+1 : endIdx]
parts := strings.Split(arrayStr, ",")
scores := make([]float64, 0, expectedCount)
for _, part := range parts {
part = strings.TrimSpace(part)
var score float64
_, err := fmt.Sscanf(part, "%f", &score)
if err == nil {
scores = append(scores, score)
}
}
// If we didn't get the expected count, try alternative parsing
if len(scores) != expectedCount {
return r.extractNumbers(response, expectedCount)
}
return scores, nil
}
// extractNumbers extracts numbers from text as fallback
func (r *LLMReranker) extractNumbers(text string, expectedCount int) ([]float64, error) {
// Simple number extraction
scores := make([]float64, 0, expectedCount)
var num float64
for s := range strings.FieldsSeq(text) {
_, err := fmt.Sscanf(s, "%f", &num)
if err == nil && num >= 0 && num <= 1 {
scores = append(scores, num)
if len(scores) == expectedCount {
break
}
}
}
// If we still don't have enough scores, return default scores
if len(scores) < expectedCount {
defaultScores := make([]float64, expectedCount)
for i := range defaultScores {
defaultScores[i] = 0.5 // Default middle score
}
// Copy whatever scores we got
copy(defaultScores, scores)
return defaultScores, nil
}
return scores, nil
}
// mergeMetadata merges two metadata maps
func (r *LLMReranker) mergeMetadata(m1, m2 map[string]any) map[string]any {
result := make(map[string]any)
maps.Copy(result, m1)
maps.Copy(result, m2)
return result
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/graph.go | rag/retriever/graph.go | package retriever
import (
"context"
"fmt"
"strings"
"github.com/smallnest/langgraphgo/rag"
)
// GraphRetriever implements document retrieval using knowledge graphs
type GraphRetriever struct {
knowledgeGraph rag.KnowledgeGraph
embedder rag.Embedder
config rag.RetrievalConfig
}
// NewGraphRetriever creates a new graph retriever
func NewGraphRetriever(knowledgeGraph rag.KnowledgeGraph, embedder rag.Embedder, config rag.RetrievalConfig) *GraphRetriever {
if config.K == 0 {
config.K = 4
}
if config.ScoreThreshold == 0 {
config.ScoreThreshold = 0.3
}
if config.SearchType == "" {
config.SearchType = "graph"
}
return &GraphRetriever{
knowledgeGraph: knowledgeGraph,
embedder: embedder,
config: config,
}
}
// Retrieve retrieves documents based on a query using the knowledge graph
func (r *GraphRetriever) Retrieve(ctx context.Context, query string) ([]rag.Document, error) {
return r.RetrieveWithK(ctx, query, r.config.K)
}
// RetrieveWithK retrieves exactly k documents
func (r *GraphRetriever) RetrieveWithK(ctx context.Context, query string, k int) ([]rag.Document, error) {
config := r.config
config.K = k
results, err := r.RetrieveWithConfig(ctx, query, &config)
if err != nil {
return nil, err
}
docs := make([]rag.Document, len(results))
for i, result := range results {
docs[i] = result.Document
}
return docs, nil
}
// RetrieveWithConfig retrieves documents with custom configuration
func (r *GraphRetriever) RetrieveWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) ([]rag.DocumentSearchResult, error) {
if config == nil {
config = &r.config
}
// Extract entities from the query
entities, err := r.extractEntitiesFromQuery(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to extract entities from query: %w", err)
}
// If no entities found, perform simple similarity search on entity names
if len(entities) == 0 {
return r.performEntitySimilaritySearch(ctx, query, config)
}
// Build graph query based on extracted entities
graphQuery := &rag.GraphQuery{
MaxDepth: 3, // Default depth for entity traversal
}
// Use the first entity as the starting point
graphQuery.StartEntity = entities[0].ID
graphQuery.EntityType = entities[0].Type
// Add filters from config
if config.Filter != nil {
graphQuery.Filters = config.Filter
}
// Perform graph query
graphResult, err := r.knowledgeGraph.Query(ctx, graphQuery)
if err != nil {
return nil, fmt.Errorf("graph query failed: %w", err)
}
// Convert graph results to document search results
results := r.graphResultsToSearchResults(graphResult, entities)
// Apply score threshold filter
if config.ScoreThreshold > 0 {
filtered := make([]rag.DocumentSearchResult, 0)
for _, result := range results {
if result.Score >= config.ScoreThreshold {
filtered = append(filtered, result)
}
}
results = filtered
}
// Limit results to K
if len(results) > config.K {
results = results[:config.K]
}
return results, nil
}
// extractEntitiesFromQuery extracts entities from the query string
func (r *GraphRetriever) extractEntitiesFromQuery(ctx context.Context, query string) ([]*rag.Entity, error) {
// This is a simplified entity extraction
// In a real implementation, you'd use NLP models or external services
entities := make([]*rag.Entity, 0)
// Look for potential entities based on patterns
// This is a placeholder - actual implementation would be more sophisticated
words := r.extractWords(query)
for _, word := range words {
// Skip very short words or common words
if len(word) < 3 || r.isCommonWord(word) {
continue
}
// Try to find this entity in the knowledge graph
entity, err := r.knowledgeGraph.GetEntity(ctx, word)
if err == nil && entity != nil {
entities = append(entities, entity)
}
}
return entities, nil
}
// performEntitySimilaritySearch performs similarity search on entity names
func (r *GraphRetriever) performEntitySimilaritySearch(ctx context.Context, query string, config *rag.RetrievalConfig) ([]rag.DocumentSearchResult, error) {
// This is a fallback method when no entities are extracted from the query
// It performs similarity search on entity names/descriptions
results := make([]rag.DocumentSearchResult, 0)
// Get all entities from the knowledge graph
// Note: In a real implementation, you'd have a method to list or search entities
// For now, we'll return an empty result set
// This would need to be implemented based on the specific knowledge graph interface
return results, nil
}
// graphResultsToSearchResults converts graph query results to document search results
func (r *GraphRetriever) graphResultsToSearchResults(graphResult *rag.GraphQueryResult, queryEntities []*rag.Entity) []rag.DocumentSearchResult {
results := make([]rag.DocumentSearchResult, 0)
// Create documents from entities
for _, entity := range graphResult.Entities {
content := r.entityToDocumentContent(entity)
doc := rag.Document{
ID: entity.ID,
Content: content,
Metadata: map[string]any{
"entity_type": entity.Type,
"entity_name": entity.Name,
"properties": entity.Properties,
"source": "knowledge_graph",
"created_at": entity.CreatedAt,
},
CreatedAt: entity.CreatedAt,
UpdatedAt: entity.UpdatedAt,
}
// Calculate score based on entity relevance to query
score := r.calculateEntityScore(entity, queryEntities)
results = append(results, rag.DocumentSearchResult{
Document: doc,
Score: score,
Metadata: map[string]any{
"entity_match": true,
"entity_type": entity.Type,
},
})
}
// Create documents from relationships
for _, relationship := range graphResult.Relationships {
content := r.relationshipToDocumentContent(relationship)
doc := rag.Document{
ID: relationship.ID,
Content: content,
Metadata: map[string]any{
"relationship_type": relationship.Type,
"source_entity": relationship.Source,
"target_entity": relationship.Target,
"source": "knowledge_graph",
"confidence": relationship.Confidence,
},
CreatedAt: relationship.CreatedAt,
}
// Use relationship confidence as score
score := relationship.Confidence
results = append(results, rag.DocumentSearchResult{
Document: doc,
Score: score,
Metadata: map[string]any{
"relationship_match": true,
"relationship_type": relationship.Type,
},
})
}
return results
}
// entityToDocumentContent converts an entity to document content
func (r *GraphRetriever) entityToDocumentContent(entity *rag.Entity) string {
var content strings.Builder
content.WriteString(fmt.Sprintf("Entity: %s\nType: %s\n", entity.Name, entity.Type))
if entity.Properties != nil {
if description, ok := entity.Properties["description"]; ok {
content.WriteString(fmt.Sprintf("Description: %v\n", description))
}
// Add other relevant properties
for key, value := range entity.Properties {
if key != "description" {
content.WriteString(fmt.Sprintf("%s: %v\n", key, value))
}
}
}
return content.String()
}
// relationshipToDocumentContent converts a relationship to document content
func (r *GraphRetriever) relationshipToDocumentContent(relationship *rag.Relationship) string {
var content strings.Builder
content.WriteString(fmt.Sprintf("Relationship: %s -> %s\nType: %s\n",
relationship.Source, relationship.Target, relationship.Type))
if relationship.Confidence > 0 {
content.WriteString(fmt.Sprintf("Confidence: %.2f\n", relationship.Confidence))
}
if relationship.Properties != nil {
for key, value := range relationship.Properties {
content.WriteString(fmt.Sprintf("%s: %v\n", key, value))
}
}
return content.String()
}
// calculateEntityScore calculates relevance score for an entity
func (r *GraphRetriever) calculateEntityScore(entity *rag.Entity, queryEntities []*rag.Entity) float64 {
// Base score
score := 0.5
// Boost score if entity matches query entities
for _, queryEntity := range queryEntities {
if entity.ID == queryEntity.ID || entity.Name == queryEntity.Name {
score += 0.5
}
// Boost if entities are of the same type
if entity.Type == queryEntity.Type {
score += 0.2
}
}
// Cap score at 1.0
if score > 1.0 {
score = 1.0
}
return score
}
// extractWords extracts words from text
func (r *GraphRetriever) extractWords(text string) []string {
words := make([]string, 0)
current := ""
for _, char := range text {
if isAlphaNumeric(char) {
current += string(char)
} else {
if current != "" {
words = append(words, current)
current = ""
}
}
}
if current != "" {
words = append(words, current)
}
return words
}
// isCommonWord checks if a word is a common stop word
func (r *GraphRetriever) isCommonWord(word string) bool {
commonWords := map[string]bool{
"the": true, "a": true, "an": true, "and": true, "or": true, "but": true,
"in": true, "on": true, "at": true, "to": true, "for": true, "of": true,
"with": true, "by": true, "is": true, "are": true, "was": true, "were": true,
"be": true, "have": true, "has": true, "had": true, "do": true, "does": true,
"did": true, "will": true, "would": true, "could": true, "should": true,
"may": true, "might": true, "can": true, "this": true, "that": true, "these": true,
"those": true, "i": true, "you": true, "he": true, "she": true, "it": true,
"we": true, "they": true, "what": true, "where": true, "when": true, "why": true,
"how": true, "who": true, "which": true, "whose": true, "whom": true,
}
lowerWord := strings.ToLower(word)
return commonWords[lowerWord]
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/hybrid.go | rag/retriever/hybrid.go | package retriever
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/rag"
)
// HybridRetriever combines multiple retrieval strategies
type HybridRetriever struct {
retrievers []rag.Retriever
weights []float64
config rag.RetrievalConfig
}
// NewHybridRetriever creates a new hybrid retriever that combines multiple retrievers
func NewHybridRetriever(retrievers []rag.Retriever, weights []float64, config rag.RetrievalConfig) *HybridRetriever {
if len(weights) == 0 {
// Use equal weights if none provided
weights = make([]float64, len(retrievers))
for i := range weights {
weights[i] = 1.0
}
}
if len(weights) != len(retrievers) {
// Adjust weights to match number of retrievers
newWeights := make([]float64, len(retrievers))
for i := range newWeights {
if i < len(weights) {
newWeights[i] = weights[i]
} else {
newWeights[i] = 1.0
}
}
weights = newWeights
}
return &HybridRetriever{
retrievers: retrievers,
weights: weights,
config: config,
}
}
// Retrieve retrieves documents using all configured retrievers and combines results
func (h *HybridRetriever) Retrieve(ctx context.Context, query string) ([]rag.Document, error) {
return h.RetrieveWithK(ctx, query, h.config.K)
}
// RetrieveWithK retrieves exactly k documents using hybrid strategy
func (h *HybridRetriever) RetrieveWithK(ctx context.Context, query string, k int) ([]rag.Document, error) {
config := h.config
config.K = k
results, err := h.RetrieveWithConfig(ctx, query, &config)
if err != nil {
return nil, err
}
docs := make([]rag.Document, len(results))
for i, result := range results {
docs[i] = result.Document
}
return docs, nil
}
// RetrieveWithConfig retrieves documents with custom configuration using hybrid strategy
func (h *HybridRetriever) RetrieveWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) ([]rag.DocumentSearchResult, error) {
if config == nil {
config = &h.config
}
// Collect results from all retrievers
allResults := make([][]rag.DocumentSearchResult, len(h.retrievers))
for i, retriever := range h.retrievers {
results, err := retriever.RetrieveWithConfig(ctx, query, config)
if err != nil {
// Continue with other retrievers if one fails
allResults[i] = []rag.DocumentSearchResult{}
} else {
allResults[i] = results
}
}
// Combine and score results
combinedResults := h.combineResults(allResults)
// Filter by score threshold
if config.ScoreThreshold > 0 {
filtered := make([]rag.DocumentSearchResult, 0)
for _, result := range combinedResults {
if result.Score >= config.ScoreThreshold {
filtered = append(filtered, result)
}
}
combinedResults = filtered
}
// Limit to K results
if len(combinedResults) > config.K {
combinedResults = combinedResults[:config.K]
}
return combinedResults, nil
}
// combineResults combines results from multiple retrievers using weighted scoring
func (h *HybridRetriever) combineResults(allResults [][]rag.DocumentSearchResult) []rag.DocumentSearchResult {
// Create a map to track documents and their scores
documentScores := make(map[string]*CombinedDocumentScore)
// Process results from each retriever
for retrieverIdx, results := range allResults {
weight := h.weights[retrieverIdx]
for _, result := range results {
docID := result.Document.ID
if existing, found := documentScores[docID]; found {
// Update existing document score
existing.TotalScore += float64(result.Score) * weight
existing.RetrieverCount++
existing.Sources = append(existing.Sources, fmt.Sprintf("retriever_%d", retrieverIdx))
} else {
// Add new document
documentScores[docID] = &CombinedDocumentScore{
Document: result.Document,
TotalScore: float64(result.Score) * weight,
RetrieverCount: 1,
Sources: []string{fmt.Sprintf("retriever_%d", retrieverIdx)},
Metadata: result.Metadata,
}
}
}
}
// Convert map back to slice and calculate final scores
combinedResults := make([]rag.DocumentSearchResult, 0, len(documentScores))
for _, combined := range documentScores {
// Calculate final score as weighted average
finalScore := combined.TotalScore / float64(combined.RetrieverCount)
// Boost score if document comes from multiple retrievers
if combined.RetrieverCount > 1 {
finalScore *= 1.1 // 10% boost for multi-source documents
}
// Cap score at 1.0
if finalScore > 1.0 {
finalScore = 1.0
}
result := rag.DocumentSearchResult{
Document: combined.Document,
Score: finalScore,
Metadata: map[string]any{
"retriever_count": combined.RetrieverCount,
"sources": combined.Sources,
"original_metadata": combined.Metadata,
},
}
combinedResults = append(combinedResults, result)
}
// Sort results by score (descending)
h.sortResults(combinedResults)
return combinedResults
}
// CombinedDocumentScore tracks score information for a document from multiple retrievers
type CombinedDocumentScore struct {
Document rag.Document
TotalScore float64
RetrieverCount int
Sources []string
Metadata map[string]any
}
// sortResults sorts results by score in descending order
func (h *HybridRetriever) sortResults(results []rag.DocumentSearchResult) {
// Simple bubble sort - in practice, you'd use a more efficient sorting algorithm
for i := 0; i < len(results)-1; i++ {
for j := 0; j < len(results)-i-1; j++ {
if results[j].Score < results[j+1].Score {
results[j], results[j+1] = results[j+1], results[j]
}
}
}
}
// GetRetrieverCount returns the number of retrievers being used
func (h *HybridRetriever) GetRetrieverCount() int {
return len(h.retrievers)
}
// GetWeights returns the weights being used for each retriever
func (h *HybridRetriever) GetWeights() []float64 {
weights := make([]float64, len(h.weights))
copy(weights, h.weights)
return weights
}
// SetWeights updates the weights for each retriever
func (h *HybridRetriever) SetWeights(weights []float64) error {
if len(weights) != len(h.retrievers) {
return fmt.Errorf("number of weights (%d) must match number of retrievers (%d)",
len(weights), len(h.retrievers))
}
h.weights = make([]float64, len(weights))
copy(h.weights, weights)
return nil
}
// AddRetriever adds a new retriever to the hybrid strategy
func (h *HybridRetriever) AddRetriever(retriever rag.Retriever, weight float64) {
h.retrievers = append(h.retrievers, retriever)
h.weights = append(h.weights, weight)
}
// RemoveRetriever removes a retriever by index
func (h *HybridRetriever) RemoveRetriever(index int) error {
if index < 0 || index >= len(h.retrievers) {
return fmt.Errorf("index %d out of range", index)
}
h.retrievers = append(h.retrievers[:index], h.retrievers[index+1:]...)
h.weights = append(h.weights[:index], h.weights[index+1:]...)
return nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/common_test.go | rag/retriever/common_test.go | package retriever
import (
"context"
"github.com/smallnest/langgraphgo/rag"
)
type mockEmbedder struct{}
func (m *mockEmbedder) EmbedDocument(ctx context.Context, text string) ([]float32, error) {
return []float32{0.1, 0.2}, nil
}
func (m *mockEmbedder) EmbedDocuments(ctx context.Context, texts []string) ([][]float32, error) {
return [][]float32{{0.1, 0.2}}, nil
}
func (m *mockEmbedder) GetDimension() int { return 2 }
type mockRetriever struct {
docs []rag.Document
}
func (m *mockRetriever) Retrieve(ctx context.Context, query string) ([]rag.Document, error) {
return m.docs, nil
}
func (m *mockRetriever) RetrieveWithK(ctx context.Context, query string, k int) ([]rag.Document, error) {
return m.docs, nil
}
func (m *mockRetriever) RetrieveWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) ([]rag.DocumentSearchResult, error) {
res := make([]rag.DocumentSearchResult, len(m.docs))
for i, d := range m.docs {
res[i] = rag.DocumentSearchResult{Document: d, Score: 0.9}
}
return res, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/cohere_reranker.go | rag/retriever/cohere_reranker.go | package retriever
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"maps"
"net/http"
"os"
"time"
"github.com/smallnest/langgraphgo/rag"
)
// CohereRerankerConfig configures the Cohere reranker
type CohereRerankerConfig struct {
// Model is the Cohere rerank model to use
// Options: "rerank-v3.5", "rerank-english-v3.0", "rerank-multilingual-v3.0"
Model string
// TopK is the number of documents to return
TopK int
// APIBase is the custom API base URL (optional)
APIBase string
// Timeout is the HTTP request timeout
Timeout time.Duration
}
// DefaultCohereRerankerConfig returns the default configuration for Cohere reranker
func DefaultCohereRerankerConfig() CohereRerankerConfig {
return CohereRerankerConfig{
Model: "rerank-v3.5",
TopK: 5,
APIBase: "https://api.cohere.ai/v1/rerank",
Timeout: 30 * time.Second,
}
}
// CohereReranker uses Cohere's Rerank API to rerank documents
type CohereReranker struct {
apiKey string
client *http.Client
config CohereRerankerConfig
}
// NewCohereReranker creates a new Cohere reranker
// The API key can be provided via the apiKey parameter or COHERE_API_KEY environment variable
func NewCohereReranker(apiKey string, config CohereRerankerConfig) *CohereReranker {
if apiKey == "" {
apiKey = os.Getenv("COHERE_API_KEY")
}
if config.Model == "" {
config = DefaultCohereRerankerConfig()
}
if config.Timeout == 0 {
config.Timeout = 30 * time.Second
}
if config.APIBase == "" {
config.APIBase = "https://api.cohere.ai/v1/rerank"
}
return &CohereReranker{
apiKey: apiKey,
client: &http.Client{
Timeout: config.Timeout,
},
config: config,
}
}
// cohereRerankRequest represents the request body for Cohere Rerank API
type cohereRerankRequest struct {
Query string `json:"query"`
Documents []cohereDocument `json:"documents"`
TopN int `json:"top_n,omitempty"`
Model string `json:"model,omitempty"`
RankFields []string `json:"rank_fields,omitempty"`
}
// cohereDocument represents a document in the Cohere API
type cohereDocument struct {
Text string `json:"text"`
Title string `json:"title,omitempty"`
}
// cohereRerankResponse represents the response from Cohere Rerank API
type cohereRerankResponse struct {
Results []cohereRerankResult `json:"results"`
Meta cohereMeta `json:"meta"`
}
// cohereRerankResult represents a single rerank result
type cohereRerankResult struct {
Index int `json:"index"`
RelevanceScore float64 `json:"relevance_score"`
Document cohereDocument `json:"document"`
}
// cohereMeta represents metadata in the response
type cohereMeta struct {
APIVersion struct {
Version string `json:"version"`
} `json:"api_version"`
}
// Rerank reranks documents based on query relevance using Cohere's Rerank API
func (r *CohereReranker) Rerank(ctx context.Context, query string, documents []rag.DocumentSearchResult) ([]rag.DocumentSearchResult, error) {
if len(documents) == 0 {
return []rag.DocumentSearchResult{}, nil
}
if r.apiKey == "" {
return nil, fmt.Errorf("Cohere API key is required. Set COHERE_API_KEY environment variable or pass apiKey parameter")
}
// Prepare request body
reqDocs := make([]cohereDocument, len(documents))
for i, doc := range documents {
title := ""
if t, ok := doc.Document.Metadata["title"].(string); ok {
title = t
}
reqDocs[i] = cohereDocument{
Text: doc.Document.Content,
Title: title,
}
}
reqBody := cohereRerankRequest{
Query: query,
Documents: reqDocs,
TopN: r.config.TopK,
Model: r.config.Model,
}
jsonBody, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
// Create HTTP request
req, err := http.NewRequestWithContext(ctx, "POST", r.config.APIBase, bytes.NewReader(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+r.apiKey)
req.Header.Set("X-Client-Name", "langgraphgo")
// Send request
resp, err := r.client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
// Read response
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
// Check status code
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Cohere API returned status %d: %s", resp.StatusCode, string(body))
}
// Parse response
var rerankResp cohereRerankResponse
if err := json.Unmarshal(body, &rerankResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
// Map results back to documents
results := make([]rag.DocumentSearchResult, len(rerankResp.Results))
for i, result := range rerankResp.Results {
originalDoc := documents[result.Index]
results[i] = rag.DocumentSearchResult{
Document: originalDoc.Document,
Score: result.RelevanceScore,
Metadata: r.mergeMetadata(originalDoc.Metadata, map[string]any{
"cohere_rerank_score": result.RelevanceScore,
"original_score": originalDoc.Score,
"original_index": result.Index,
"reranking_method": "cohere",
"rerank_model": r.config.Model,
}),
}
}
return results, nil
}
// mergeMetadata merges two metadata maps
func (r *CohereReranker) mergeMetadata(m1, m2 map[string]any) map[string]any {
result := make(map[string]any)
maps.Copy(result, m1)
maps.Copy(result, m2)
return result
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/graph_test.go | rag/retriever/graph_test.go | package retriever
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
type mockKG struct {
entities []*rag.Entity
}
func (m *mockKG) Query(ctx context.Context, q *rag.GraphQuery) (*rag.GraphQueryResult, error) {
scores := make([]float64, len(m.entities))
for i := range scores {
scores[i] = 0.9
}
return &rag.GraphQueryResult{Entities: m.entities, Scores: scores}, nil
}
func (m *mockKG) AddEntity(ctx context.Context, e *rag.Entity) error { return nil }
func (m *mockKG) AddRelationship(ctx context.Context, r *rag.Relationship) error { return nil }
func (m *mockKG) GetRelatedEntities(ctx context.Context, id string, d int) ([]*rag.Entity, error) {
return nil, nil
}
func (m *mockKG) GetEntity(ctx context.Context, id string) (*rag.Entity, error) {
for _, e := range m.entities {
if e.ID == id || e.Name == id {
return e, nil
}
}
return nil, nil
}
func TestGraphRetriever(t *testing.T) {
ctx := context.Background()
kg := &mockKG{entities: []*rag.Entity{{ID: "e1", Name: "entity1", Type: "person"}}}
embedder := &mockEmbedder{}
r := NewGraphRetriever(kg, embedder, rag.RetrievalConfig{K: 1})
assert.NotNil(t, r)
t.Run("Retrieve", func(t *testing.T) {
// Use a name that will be matched as an entity ID in extractEntitiesFromQuery
docs, err := r.Retrieve(ctx, "entity1")
assert.NoError(t, err)
assert.NotEmpty(t, docs)
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/jina_reranker.go | rag/retriever/jina_reranker.go | package retriever
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"maps"
"net/http"
"os"
"time"
"github.com/smallnest/langgraphgo/rag"
)
// JinaRerankerConfig configures the Jina reranker
type JinaRerankerConfig struct {
// Model is the Jina rerank model to use
// Options: "jina-reranker-v1-base-en", "jina-reranker-v2-base-multilingual"
Model string
// TopK is the number of documents to return
TopK int
// APIBase is the custom API base URL (optional)
APIBase string
// Timeout is the HTTP request timeout
Timeout time.Duration
}
// DefaultJinaRerankerConfig returns the default configuration for Jina reranker
func DefaultJinaRerankerConfig() JinaRerankerConfig {
return JinaRerankerConfig{
Model: "jina-reranker-v2-base-multilingual",
TopK: 5,
APIBase: "https://api.jina.ai/v1/rerank",
Timeout: 30 * time.Second,
}
}
// JinaReranker uses Jina AI's Rerank API to rerank documents
type JinaReranker struct {
apiKey string
client *http.Client
config JinaRerankerConfig
}
// NewJinaReranker creates a new Jina reranker
// The API key can be provided via the apiKey parameter or JINA_API_KEY environment variable
func NewJinaReranker(apiKey string, config JinaRerankerConfig) *JinaReranker {
if apiKey == "" {
apiKey = os.Getenv("JINA_API_KEY")
}
if config.Model == "" {
config = DefaultJinaRerankerConfig()
}
if config.Timeout == 0 {
config.Timeout = 30 * time.Second
}
if config.APIBase == "" {
config.APIBase = "https://api.jina.ai/v1/rerank"
}
return &JinaReranker{
apiKey: apiKey,
client: &http.Client{
Timeout: config.Timeout,
},
config: config,
}
}
// jinaRerankRequest represents the request body for Jina Rerank API
type jinaRerankRequest struct {
Query string `json:"query"`
Documents []jinaDocument `json:"documents"`
TopN int `json:"top_n,omitempty"`
Model string `json:"model,omitempty"`
}
// jinaDocument represents a document in the Jina API
type jinaDocument struct {
Text string `json:"text"`
Title string `json:"title,omitempty"`
}
// jinaRerankResponse represents the response from Jina Rerank API
type jinaRerankResponse struct {
Model string `json:"model"`
Results []jinaRerankResult `json:"results"`
Usage jinaUsage `json:"usage"`
}
// jinaRerankResult represents a single rerank result
type jinaRerankResult struct {
Index int `json:"index"`
Document jinaDocument `json:"document"`
RelevanceScore float64 `json:"relevance_score"`
}
// jinaUsage represents token usage in the response
type jinaUsage struct {
TotalTokens int `json:"total_tokens"`
}
// Rerank reranks documents based on query relevance using Jina's Rerank API
func (r *JinaReranker) Rerank(ctx context.Context, query string, documents []rag.DocumentSearchResult) ([]rag.DocumentSearchResult, error) {
if len(documents) == 0 {
return []rag.DocumentSearchResult{}, nil
}
if r.apiKey == "" {
return nil, fmt.Errorf("Jina API key is required. Set JINA_API_KEY environment variable or pass apiKey parameter")
}
// Prepare request body
reqDocs := make([]jinaDocument, len(documents))
for i, doc := range documents {
title := ""
if t, ok := doc.Document.Metadata["title"].(string); ok {
title = t
}
reqDocs[i] = jinaDocument{
Text: doc.Document.Content,
Title: title,
}
}
reqBody := jinaRerankRequest{
Query: query,
Documents: reqDocs,
TopN: r.config.TopK,
Model: r.config.Model,
}
jsonBody, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
// Create HTTP request
req, err := http.NewRequestWithContext(ctx, "POST", r.config.APIBase, bytes.NewReader(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+r.apiKey)
// Send request
resp, err := r.client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
// Read response
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
// Check status code
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Jina API returned status %d: %s", resp.StatusCode, string(body))
}
// Parse response
var rerankResp jinaRerankResponse
if err := json.Unmarshal(body, &rerankResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
// Map results back to documents
results := make([]rag.DocumentSearchResult, len(rerankResp.Results))
for i, result := range rerankResp.Results {
originalDoc := documents[result.Index]
results[i] = rag.DocumentSearchResult{
Document: originalDoc.Document,
Score: result.RelevanceScore,
Metadata: r.mergeMetadata(originalDoc.Metadata, map[string]any{
"jina_rerank_score": result.RelevanceScore,
"original_score": originalDoc.Score,
"original_index": result.Index,
"reranking_method": "jina",
"rerank_model": rerankResp.Model,
}),
}
}
return results, nil
}
// mergeMetadata merges two metadata maps
func (r *JinaReranker) mergeMetadata(m1, m2 map[string]any) map[string]any {
result := make(map[string]any)
maps.Copy(result, m1)
maps.Copy(result, m2)
return result
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/cross_encoder_reranker.go | rag/retriever/cross_encoder_reranker.go | package retriever
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"maps"
"net/http"
"time"
"github.com/smallnest/langgraphgo/rag"
)
// CrossEncoderRerankerConfig configures the Cross-Encoder reranker
type CrossEncoderRerankerConfig struct {
// ModelName is the name of the cross-encoder model
// Common models:
// - "cross-encoder/ms-marco-MiniLM-L-6-v2"
// - "cross-encoder/ms-marco-MiniLM-L-12-v2"
// - "cross-encoder/mmarco-mMiniLMv2-L12-H384-v1"
ModelName string
// TopK is the number of documents to return
TopK int
// APIBase is the URL of the cross-encoder service
// This can be a local service (e.g., http://localhost:8000/rerank)
// or a remote service
APIBase string
// Timeout is the HTTP request timeout
Timeout time.Duration
}
// DefaultCrossEncoderRerankerConfig returns the default configuration
func DefaultCrossEncoderRerankerConfig() CrossEncoderRerankerConfig {
return CrossEncoderRerankerConfig{
ModelName: "cross-encoder/ms-marco-MiniLM-L-6-v2",
TopK: 5,
APIBase: "http://localhost:8000/rerank",
Timeout: 30 * time.Second,
}
}
// CrossEncoderReranker uses a cross-encoder model service for reranking
//
// This reranker expects an HTTP service that accepts POST requests with the following JSON format:
//
// {
// "query": "search query",
// "documents": ["document 1", "document 2", ...],
// "top_n": 5,
// "model": "model-name"
// }
//
// And returns:
//
// {
// "scores": [0.95, 0.87, ...],
// "indices": [0, 2, ...]
// }
//
// You can set up a local service using Python with the sentence-transformers library.
// See the RERANKER.md file for an example setup script.
type CrossEncoderReranker struct {
client *http.Client
config CrossEncoderRerankerConfig
}
// NewCrossEncoderReranker creates a new cross-encoder reranker
func NewCrossEncoderReranker(config CrossEncoderRerankerConfig) *CrossEncoderReranker {
if config.ModelName == "" {
config = DefaultCrossEncoderRerankerConfig()
}
if config.Timeout == 0 {
config.Timeout = 30 * time.Second
}
if config.APIBase == "" {
config.APIBase = "http://localhost:8000/rerank"
}
return &CrossEncoderReranker{
client: &http.Client{
Timeout: config.Timeout,
},
config: config,
}
}
// crossEncoderRequest represents the request body for the cross-encoder service
type crossEncoderRequest struct {
Query string `json:"query"`
Documents []string `json:"documents"`
TopN int `json:"top_n,omitempty"`
Model string `json:"model,omitempty"`
}
// crossEncoderResponse represents the response from the cross-encoder service
type crossEncoderResponse struct {
Scores []float64 `json:"scores"`
Indices []int `json:"indices"`
}
// Rerank reranks documents based on query relevance using cross-encoder scoring
func (r *CrossEncoderReranker) Rerank(ctx context.Context, query string, documents []rag.DocumentSearchResult) ([]rag.DocumentSearchResult, error) {
if len(documents) == 0 {
return []rag.DocumentSearchResult{}, nil
}
// Prepare request body
docTexts := make([]string, len(documents))
for i, doc := range documents {
docTexts[i] = doc.Document.Content
}
reqBody := crossEncoderRequest{
Query: query,
Documents: docTexts,
TopN: r.config.TopK,
Model: r.config.ModelName,
}
jsonBody, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
// Create HTTP request
req, err := http.NewRequestWithContext(ctx, "POST", r.config.APIBase, bytes.NewReader(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
// Send request
resp, err := r.client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
// Read response
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
// Check status code
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("cross-encoder service returned status %d: %s", resp.StatusCode, string(body))
}
// Parse response
var ceResp crossEncoderResponse
if err := json.Unmarshal(body, &ceResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
// Map results back to documents
results := make([]rag.DocumentSearchResult, len(ceResp.Indices))
for i, idx := range ceResp.Indices {
if idx >= 0 && idx < len(documents) {
originalDoc := documents[idx]
results[i] = rag.DocumentSearchResult{
Document: originalDoc.Document,
Score: ceResp.Scores[i],
Metadata: r.mergeMetadata(originalDoc.Metadata, map[string]any{
"cross_encoder_score": ceResp.Scores[i],
"original_score": originalDoc.Score,
"original_index": idx,
"reranking_method": "cross_encoder",
"rerank_model": r.config.ModelName,
}),
}
}
}
return results, nil
}
// mergeMetadata merges two metadata maps
func (r *CrossEncoderReranker) mergeMetadata(m1, m2 map[string]any) map[string]any {
result := make(map[string]any)
maps.Copy(result, m1)
maps.Copy(result, m2)
return result
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/vector_test.go | rag/retriever/vector_test.go | package retriever
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
type mockVectorStore struct {
docs []rag.Document
}
func (m *mockVectorStore) Add(ctx context.Context, documents []rag.Document) error {
m.docs = append(m.docs, documents...)
return nil
}
func (m *mockVectorStore) Search(ctx context.Context, query []float32, k int) ([]rag.DocumentSearchResult, error) {
var results []rag.DocumentSearchResult
for i := 0; i < len(m.docs) && i < k; i++ {
results = append(results, rag.DocumentSearchResult{
Document: m.docs[i],
Score: 1.0 - float64(i)*0.1,
})
}
return results, nil
}
func (m *mockVectorStore) SearchWithFilter(ctx context.Context, query []float32, k int, filter map[string]any) ([]rag.DocumentSearchResult, error) {
return m.Search(ctx, query, k)
}
func (m *mockVectorStore) Delete(ctx context.Context, ids []string) error { return nil }
func (m *mockVectorStore) Update(ctx context.Context, documents []rag.Document) error { return nil }
func (m *mockVectorStore) GetStats(ctx context.Context) (*rag.VectorStoreStats, error) {
return nil, nil
}
func TestVectorRetriever(t *testing.T) {
ctx := context.Background()
store := &mockVectorStore{
docs: []rag.Document{
{ID: "doc1", Content: "content 1"},
{ID: "doc2", Content: "content 2"},
},
}
embedder := &mockEmbedder{}
r := NewVectorRetriever(store, embedder, rag.RetrievalConfig{K: 2})
t.Run("Basic Retrieve", func(t *testing.T) {
docs, err := r.Retrieve(ctx, "test query")
assert.NoError(t, err)
assert.Len(t, docs, 2)
assert.Equal(t, "doc1", docs[0].ID)
})
t.Run("Retrieve with Score Threshold", func(t *testing.T) {
rLow := NewVectorRetriever(store, embedder, rag.RetrievalConfig{K: 2, ScoreThreshold: 0.95})
docs, err := rLow.Retrieve(ctx, "test query")
assert.NoError(t, err)
assert.Len(t, docs, 1) // Only doc1 has score 1.0 >= 0.95
})
t.Run("Retrieve with MMR", func(t *testing.T) {
rMMR := NewVectorRetriever(store, embedder, rag.RetrievalConfig{K: 2, SearchType: "mmr"})
docs, err := rMMR.Retrieve(ctx, "test query")
assert.NoError(t, err)
assert.NotEmpty(t, docs)
})
t.Run("Retrieve with Diversity", func(t *testing.T) {
rDiv := NewVectorRetriever(store, embedder, rag.RetrievalConfig{K: 2, SearchType: "diversity"})
docs, err := rDiv.Retrieve(ctx, "test query")
assert.NoError(t, err)
assert.NotEmpty(t, docs)
})
t.Run("VectorStoreRetriever", func(t *testing.T) {
vsr := NewVectorStoreRetriever(store, embedder, 2)
docs, err := vsr.Retrieve(ctx, "test query")
assert.NoError(t, err)
assert.Len(t, docs, 2)
res, err := vsr.RetrieveWithConfig(ctx, "test", &rag.RetrievalConfig{K: 1})
assert.NoError(t, err)
assert.Len(t, res, 1)
})
}
func TestContentSimilarity(t *testing.T) {
s1 := "hello world"
s2 := "hello there"
sim := contentSimilarity(s1, s2)
assert.Greater(t, sim, 0.0)
assert.Less(t, sim, 1.0)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/hybrid_test.go | rag/retriever/hybrid_test.go | package retriever
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestHybridRetriever(t *testing.T) {
ctx := context.Background()
r1 := &mockRetriever{docs: []rag.Document{{ID: "1", Content: "r1"}}}
r2 := &mockRetriever{docs: []rag.Document{{ID: "2", Content: "r2"}}}
h := NewHybridRetriever([]rag.Retriever{r1, r2}, []float64{0.7, 0.3}, rag.RetrievalConfig{K: 2})
assert.NotNil(t, h)
t.Run("Hybrid Retrieve", func(t *testing.T) {
docs, err := h.Retrieve(ctx, "test")
assert.NoError(t, err)
assert.Len(t, docs, 2)
})
t.Run("Hybrid Weights", func(t *testing.T) {
h.SetWeights([]float64{0.5, 0.5})
assert.Equal(t, []float64{0.5, 0.5}, h.GetWeights())
})
t.Run("Retriever Management", func(t *testing.T) {
assert.Equal(t, 2, h.GetRetrieverCount())
r3 := &mockRetriever{}
h.AddRetriever(r3, 0.1)
assert.Equal(t, 3, h.GetRetrieverCount())
h.RemoveRetriever(2)
assert.Equal(t, 2, h.GetRetrieverCount())
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/reranker.go | rag/retriever/reranker.go | package retriever
import (
"context"
"strings"
"github.com/smallnest/langgraphgo/rag"
)
// SimpleReranker is a simple reranker that scores documents based on keyword matching
type SimpleReranker struct {
// Can be extended with more sophisticated reranking logic
}
// NewSimpleReranker creates a new SimpleReranker
func NewSimpleReranker() *SimpleReranker {
return &SimpleReranker{}
}
// Rerank reranks documents based on query relevance
func (r *SimpleReranker) Rerank(ctx context.Context, query string, documents []rag.DocumentSearchResult) ([]rag.DocumentSearchResult, error) {
queryTerms := strings.Fields(strings.ToLower(query))
type docScore struct {
doc rag.DocumentSearchResult
score float64
}
scores := make([]docScore, len(documents))
for i, docResult := range documents {
content := strings.ToLower(docResult.Document.Content)
// Simple scoring: count query term occurrences
var score float64
for _, term := range queryTerms {
score += float64(strings.Count(content, term))
}
// Normalize by document length
if len(content) > 0 {
score = score / float64(len(content)) * 1000
}
// Combine with original score
finalScore := 0.7*docResult.Score + 0.3*score
scores[i] = docScore{doc: rag.DocumentSearchResult{
Document: docResult.Document,
Score: finalScore,
Metadata: docResult.Metadata,
}, score: finalScore}
}
// Sort by score (descending)
for i := range scores {
for j := i + 1; j < len(scores); j++ {
if scores[j].score > scores[i].score {
scores[i], scores[j] = scores[j], scores[i]
}
}
}
results := make([]rag.DocumentSearchResult, len(scores))
for i, s := range scores {
results[i] = s.doc
}
return results, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/retriever/reranker_test.go | rag/retriever/reranker_test.go | package retriever
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestSimpleReranker(t *testing.T) {
ctx := context.Background()
r := NewSimpleReranker()
assert.NotNil(t, r)
docs := []rag.DocumentSearchResult{
{Document: rag.Document{Content: "match"}, Score: 0.5},
{Document: rag.Document{Content: "no match"}, Score: 0.1},
}
t.Run("Rerank with exact match", func(t *testing.T) {
res, err := r.Rerank(ctx, "match", docs)
assert.NoError(t, err)
assert.Greater(t, res[0].Score, 0.5)
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/vector.go | rag/store/vector.go | package store
import (
"context"
"fmt"
"math"
"time"
"github.com/smallnest/langgraphgo/rag"
)
// InMemoryVectorStore is a simple in-memory vector store implementation
type InMemoryVectorStore struct {
documents []rag.Document
embeddings [][]float32
embedder rag.Embedder
}
// NewInMemoryVectorStore creates a new InMemoryVectorStore
func NewInMemoryVectorStore(embedder rag.Embedder) *InMemoryVectorStore {
return &InMemoryVectorStore{
documents: make([]rag.Document, 0),
embeddings: make([][]float32, 0),
embedder: embedder,
}
}
// AddWithEmbedding adds a document to the in-memory vector store with an explicit embedding
func (s *InMemoryVectorStore) AddWithEmbedding(ctx context.Context, doc rag.Document, embedding []float32) error {
s.documents = append(s.documents, doc)
s.embeddings = append(s.embeddings, embedding)
return nil
}
// Add adds multiple documents to the in-memory vector store
func (s *InMemoryVectorStore) Add(ctx context.Context, documents []rag.Document) error {
for _, doc := range documents {
embedding := doc.Embedding
if len(embedding) == 0 {
if s.embedder == nil {
return fmt.Errorf("no embedder configured and document has no embedding")
}
var err error
embedding, err = s.embedder.EmbedDocument(ctx, doc.Content)
if err != nil {
return fmt.Errorf("failed to embed document: %w", err)
}
}
s.documents = append(s.documents, doc)
s.embeddings = append(s.embeddings, embedding)
}
return nil
}
// AddBatch adds multiple documents with explicit embeddings
func (s *InMemoryVectorStore) AddBatch(ctx context.Context, documents []rag.Document, embeddings [][]float32) error {
if len(documents) != len(embeddings) {
return fmt.Errorf("documents and embeddings must have same length")
}
s.documents = append(s.documents, documents...)
s.embeddings = append(s.embeddings, embeddings...)
return nil
}
// Search performs similarity search
func (s *InMemoryVectorStore) Search(ctx context.Context, queryEmbedding []float32, k int) ([]rag.DocumentSearchResult, error) {
if k <= 0 {
return nil, fmt.Errorf("k must be positive")
}
if len(s.documents) == 0 {
return []rag.DocumentSearchResult{}, nil
}
// Calculate similarities
type docScore struct {
index int
score float64
}
scores := make([]docScore, len(s.documents))
for i, docEmb := range s.embeddings {
similarity := cosineSimilarity32(queryEmbedding, docEmb)
scores[i] = docScore{index: i, score: similarity}
}
// Sort by similarity score (descending)
for i := range scores {
for j := i + 1; j < len(scores); j++ {
if scores[j].score > scores[i].score {
scores[i], scores[j] = scores[j], scores[i]
}
}
}
if k > len(scores) {
k = len(scores)
}
results := make([]rag.DocumentSearchResult, k)
for i := 0; i < k; i++ {
results[i] = rag.DocumentSearchResult{
Document: s.documents[scores[i].index],
Score: float64(scores[i].score),
}
}
return results, nil
}
// SearchWithFilter performs similarity search with filters
func (s *InMemoryVectorStore) SearchWithFilter(ctx context.Context, queryEmbedding []float32, k int, filter map[string]any) ([]rag.DocumentSearchResult, error) {
if k <= 0 {
return nil, fmt.Errorf("k must be positive")
}
// Filter documents first
var filteredDocs []rag.Document
var filteredEmbeddings [][]float32
for i, doc := range s.documents {
if s.matchesFilter(doc, filter) {
filteredDocs = append(filteredDocs, doc)
filteredEmbeddings = append(filteredEmbeddings, s.embeddings[i])
}
}
if len(filteredDocs) == 0 {
return []rag.DocumentSearchResult{}, nil
}
// Calculate similarities
type docScore struct {
index int
score float64
}
scores := make([]docScore, len(filteredDocs))
for i, docEmb := range filteredEmbeddings {
similarity := cosineSimilarity32(queryEmbedding, docEmb)
scores[i] = docScore{index: i, score: similarity}
}
// Sort by similarity score (descending)
for i := range scores {
for j := i + 1; j < len(scores); j++ {
if scores[j].score > scores[i].score {
scores[i], scores[j] = scores[j], scores[i]
}
}
}
if k > len(scores) {
k = len(scores)
}
results := make([]rag.DocumentSearchResult, k)
for i := 0; i < k; i++ {
results[i] = rag.DocumentSearchResult{
Document: filteredDocs[scores[i].index],
Score: float64(scores[i].score),
}
}
return results, nil
}
// Delete removes a document by ID
func (s *InMemoryVectorStore) Delete(ctx context.Context, ids []string) error {
idMap := make(map[string]bool)
for _, id := range ids {
idMap[id] = true
}
var newDocs []rag.Document
var newEmbeddings [][]float32
for i, doc := range s.documents {
if !idMap[doc.ID] {
newDocs = append(newDocs, doc)
newEmbeddings = append(newEmbeddings, s.embeddings[i])
}
}
s.documents = newDocs
s.embeddings = newEmbeddings
return nil
}
// UpdateWithEmbedding updates a document and its embedding
func (s *InMemoryVectorStore) UpdateWithEmbedding(ctx context.Context, doc rag.Document, embedding []float32) error {
for i, existingDoc := range s.documents {
if existingDoc.ID == doc.ID {
s.documents[i] = doc
s.embeddings[i] = embedding
return nil
}
}
return fmt.Errorf("document not found: %s", doc.ID)
}
// Update updates documents in the vector store
func (s *InMemoryVectorStore) Update(ctx context.Context, documents []rag.Document) error {
for _, doc := range documents {
embedding := doc.Embedding
if len(embedding) == 0 {
if s.embedder == nil {
return fmt.Errorf("no embedder configured and document %s has no embedding", doc.ID)
}
var err error
embedding, err = s.embedder.EmbedDocument(ctx, doc.Content)
if err != nil {
return fmt.Errorf("failed to embed document %s: %w", doc.ID, err)
}
}
found := false
for i, existingDoc := range s.documents {
if existingDoc.ID == doc.ID {
s.documents[i] = doc
s.embeddings[i] = embedding
found = true
break
}
}
if !found {
return fmt.Errorf("document not found: %s", doc.ID)
}
}
return nil
}
// GetStats returns statistics about the vector store
func (s *InMemoryVectorStore) GetStats(ctx context.Context) (*rag.VectorStoreStats, error) {
stats := &rag.VectorStoreStats{
TotalDocuments: len(s.documents),
TotalVectors: len(s.embeddings),
LastUpdated: time.Now(),
}
if len(s.embeddings) > 0 {
stats.Dimension = len(s.embeddings[0])
}
return stats, nil
}
// Close closes the vector store (no-op for in-memory implementation)
func (s *InMemoryVectorStore) Close() error {
// Clear all data
s.documents = make([]rag.Document, 0)
s.embeddings = make([][]float32, 0)
return nil
}
// matchesFilter checks if a document matches the given filter
func (s *InMemoryVectorStore) matchesFilter(doc rag.Document, filter map[string]any) bool {
for key, value := range filter {
docValue, exists := doc.Metadata[key]
if !exists || docValue != value {
return false
}
}
return true
}
// cosineSimilarity32 calculates cosine similarity between two float32 vectors
func cosineSimilarity32(a, b []float32) float64 {
if len(a) != len(b) {
return 0
}
var dotProduct float64
var normA float64
var normB float64
for i := range a {
dotProduct += float64(a[i] * b[i])
normA += float64(a[i] * a[i])
normB += float64(b[i] * b[i])
}
if normA == 0 || normB == 0 {
return 0
}
return dotProduct / (math.Sqrt(normA) * math.Sqrt(normB))
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/falkordb.go | rag/store/falkordb.go | package store
import (
"context"
"fmt"
"maps"
"net/url"
"regexp"
"strings"
"github.com/redis/go-redis/v9"
"github.com/smallnest/langgraphgo/rag"
)
// FalkorDBGraph implements a FalkorDB knowledge graph
type FalkorDBGraph struct {
client redis.UniversalClient
graphName string
}
// NewFalkorDBGraph creates a new FalkorDB knowledge graph
func NewFalkorDBGraph(connectionString string) (rag.KnowledgeGraph, error) {
// Format: falkordb://host:port/graph_name
u, err := url.Parse(connectionString)
if err != nil {
return nil, fmt.Errorf("invalid connection string: %w", err)
}
addr := u.Host
if addr == "" {
return nil, fmt.Errorf("invalid connection string: missing host")
}
graphName := strings.TrimPrefix(u.Path, "/")
if graphName == "" {
graphName = "rag"
}
// Create a go-redis client
client := redis.NewClient(&redis.Options{
Addr: addr,
})
return &FalkorDBGraph{
client: client,
graphName: graphName,
}, nil
}
// AddEntity adds an entity to the graph
func (f *FalkorDBGraph) AddEntity(ctx context.Context, entity *rag.Entity) error {
g := NewGraph(f.graphName, f.client)
label := sanitizeLabel(entity.Type)
props := entityToMap(entity)
propsStr := propsToString(props)
// Escape single quotes in entity ID for Cypher compatibility
escapedID := strings.ReplaceAll(entity.ID, "'", "\\'")
// Using MERGE to avoid duplicates
query := fmt.Sprintf("MERGE (n:%s {id: '%s'}) SET n += %s", label, escapedID, propsStr)
_, err := g.Query(ctx, query)
return err
}
// AddRelationship adds a relationship to the graph
func (f *FalkorDBGraph) AddRelationship(ctx context.Context, rel *rag.Relationship) error {
g := NewGraph(f.graphName, f.client)
relType := sanitizeLabel(rel.Type)
props := relationshipToMap(rel)
propsStr := propsToString(props)
// Escape single quotes for Cypher compatibility
escapedSource := strings.ReplaceAll(rel.Source, "'", "\\'")
escapedTarget := strings.ReplaceAll(rel.Target, "'", "\\'")
escapedID := strings.ReplaceAll(rel.ID, "'", "\\'")
// MATCH source and target, then MERGE relationship
query := fmt.Sprintf("MATCH (a {id: '%s'}), (b {id: '%s'}) MERGE (a)-[r:%s {id: '%s'}]->(b) SET r += %s",
escapedSource, escapedTarget, relType, escapedID, propsStr)
_, err := g.Query(ctx, query)
return err
}
// Query performs a graph query
func (f *FalkorDBGraph) Query(ctx context.Context, query *rag.GraphQuery) (*rag.GraphQueryResult, error) {
g := NewGraph(f.graphName, f.client)
cypher := "MATCH (n)-[r]->(m)"
where := []string{}
if len(query.EntityTypes) > 0 {
orClauses := []string{}
for _, t := range query.EntityTypes {
lbl := sanitizeLabel(t)
orClauses = append(orClauses, fmt.Sprintf("n:%s", lbl))
orClauses = append(orClauses, fmt.Sprintf("m:%s", lbl))
}
if len(orClauses) > 0 {
where = append(where, "("+strings.Join(orClauses, " OR ")+")")
}
}
if len(query.Relationships) > 0 {
relClauses := []string{}
for _, t := range query.Relationships {
lbl := sanitizeLabel(t)
relClauses = append(relClauses, fmt.Sprintf("type(r) = '%s'", lbl))
}
if len(relClauses) > 0 {
where = append(where, "("+strings.Join(relClauses, " OR ")+")")
}
}
if len(where) > 0 {
cypher += " WHERE " + strings.Join(where, " AND ")
}
cypher += " RETURN n, r, m"
if query.Limit > 0 {
cypher += fmt.Sprintf(" LIMIT %d", query.Limit)
}
qr, err := g.Query(ctx, cypher)
if err != nil {
return nil, err
}
result := &rag.GraphQueryResult{
Entities: make([]*rag.Entity, 0),
Relationships: make([]*rag.Relationship, 0),
}
seenEntities := make(map[string]bool)
seenRels := make(map[string]bool)
for _, row := range qr.Results {
if len(row) < 3 {
continue
}
nObj := row[0]
rObj := row[1]
mObj := row[2]
entN := parseNode(nObj)
if entN != nil && !seenEntities[entN.ID] {
result.Entities = append(result.Entities, entN)
seenEntities[entN.ID] = true
}
entM := parseNode(mObj)
if entM != nil && !seenEntities[entM.ID] {
result.Entities = append(result.Entities, entM)
seenEntities[entM.ID] = true
}
if entN != nil && entM != nil {
rel := parseEdge(rObj, entN.ID, entM.ID)
if rel != nil && !seenRels[rel.ID] {
result.Relationships = append(result.Relationships, rel)
seenRels[rel.ID] = true
}
}
}
return result, nil
}
// GetEntity retrieves an entity by ID
func (f *FalkorDBGraph) GetEntity(ctx context.Context, id string) (*rag.Entity, error) {
g := NewGraph(f.graphName, f.client)
query := fmt.Sprintf("MATCH (n {id: '%s'}) RETURN n", id)
qr, err := g.Query(ctx, query)
if err != nil {
return nil, err
}
if len(qr.Results) == 0 {
return nil, fmt.Errorf("entity not found: %s", id)
}
row := qr.Results[0]
if len(row) == 0 {
return nil, fmt.Errorf("invalid result")
}
ent := parseNode(row[0])
if ent == nil {
return nil, fmt.Errorf("failed to parse entity")
}
return ent, nil
}
// GetRelationship retrieves a relationship by ID
func (f *FalkorDBGraph) GetRelationship(ctx context.Context, id string) (*rag.Relationship, error) {
g := NewGraph(f.graphName, f.client)
query := fmt.Sprintf("MATCH (a)-[r {id: '%s'}]->(b) RETURN a, r, b", id)
qr, err := g.Query(ctx, query)
if err != nil {
return nil, err
}
if len(qr.Results) == 0 {
return nil, fmt.Errorf("relationship not found: %s", id)
}
row := qr.Results[0]
if len(row) < 3 {
return nil, fmt.Errorf("invalid result")
}
a := parseNode(row[0])
b := parseNode(row[2])
rel := parseEdge(row[1], a.ID, b.ID)
return rel, nil
}
// GetRelatedEntities finds entities related to a given entity
func (f *FalkorDBGraph) GetRelatedEntities(ctx context.Context, entityID string, maxDepth int) ([]*rag.Entity, error) {
if maxDepth < 1 {
maxDepth = 1
}
g := NewGraph(f.graphName, f.client)
query := fmt.Sprintf("MATCH (n {id: '%s'})-[*1..%d]-(m) RETURN DISTINCT m", entityID, maxDepth)
qr, err := g.Query(ctx, query)
if err != nil {
return nil, err
}
entities := []*rag.Entity{}
seen := make(map[string]bool)
for _, row := range qr.Results {
if len(row) == 0 {
continue
}
ent := parseNode(row[0])
if ent != nil && !seen[ent.ID] {
entities = append(entities, ent)
seen[ent.ID] = true
}
}
return entities, nil
}
// DeleteEntity removes an entity
func (f *FalkorDBGraph) DeleteEntity(ctx context.Context, id string) error {
g := NewGraph(f.graphName, f.client)
query := fmt.Sprintf("MATCH (n {id: '%s'}) DETACH DELETE n", id)
_, err := g.Query(ctx, query)
return err
}
// DeleteRelationship removes a relationship
func (f *FalkorDBGraph) DeleteRelationship(ctx context.Context, id string) error {
g := NewGraph(f.graphName, f.client)
query := fmt.Sprintf("MATCH ()-[r {id: '%s'}]->() DELETE r", id)
_, err := g.Query(ctx, query)
return err
}
// UpdateEntity updates an entity
func (f *FalkorDBGraph) UpdateEntity(ctx context.Context, entity *rag.Entity) error {
return f.AddEntity(ctx, entity)
}
// UpdateRelationship updates a relationship
func (f *FalkorDBGraph) UpdateRelationship(ctx context.Context, rel *rag.Relationship) error {
return f.AddRelationship(ctx, rel)
}
// Close closes the driver
func (f *FalkorDBGraph) Close() error {
if f.client != nil {
return f.client.Close()
}
return nil
}
// Helpers
var labelRegex = regexp.MustCompile(`[^a-zA-Z0-9_]`)
func sanitizeLabel(l string) string {
clean := labelRegex.ReplaceAllString(l, "_")
if clean == "" {
return "Entity"
}
return clean
}
func propsToString(m map[string]any) string {
parts := []string{}
for k, v := range m {
var val any
switch v := v.(type) {
case []float32:
// Convert to Cypher list: [v1, v2, ...]
s := make([]string, len(v))
for i, f := range v {
s[i] = fmt.Sprintf("%f", f)
}
val = "[" + strings.Join(s, ",") + "]"
default:
val = quoteString(v)
}
parts = append(parts, fmt.Sprintf("%s: %v", k, val))
}
return "{" + strings.Join(parts, ", ") + "}"
}
func entityToMap(e *rag.Entity) map[string]any {
m := make(map[string]any)
maps.Copy(m, e.Properties)
m["name"] = e.Name
m["type"] = e.Type
if len(e.Embedding) > 0 {
m["embedding"] = e.Embedding
}
return m
}
func relationshipToMap(r *rag.Relationship) map[string]any {
m := make(map[string]any)
maps.Copy(m, r.Properties)
m["weight"] = r.Weight
m["confidence"] = r.Confidence
m["type"] = r.Type
return m
}
func toString(i any) string {
if s, ok := i.(string); ok {
return s
}
if b, ok := i.([]byte); ok {
return string(b)
}
return fmt.Sprint(i)
}
// Parsing Helpers
func parseNode(obj any) *rag.Entity {
vals, ok := obj.([]any)
if !ok {
return nil
}
e := &rag.Entity{
Properties: make(map[string]any),
}
// Check for KV list format: [[key, val], [key, val], ...]
// FalkorDB sometimes returns this structure
if len(vals) > 0 {
if first, ok := vals[0].([]any); ok && len(first) == 2 {
k := toString(first[0])
if k == "id" || k == "labels" || k == "properties" {
return parseNodeKV(vals)
}
}
}
// Standard format: [id, labels, properties]
if len(vals) >= 3 {
// Labels
if labels, ok := vals[1].([]any); ok && len(labels) > 0 {
if l, ok := labels[0].([]byte); ok {
e.Type = string(l)
} else if l, ok := labels[0].(string); ok {
e.Type = l
}
}
// Properties
if props, ok := vals[2].([]any); ok {
parseFalkorDBProperties(props, e)
}
} else if len(vals) >= 2 {
// FalkorDB format: [node_id, complex_structure]
if complexStruct, ok := vals[1].([]any); ok && len(complexStruct) >= 3 {
if props, ok := complexStruct[2].([]any); ok {
parseFalkorDBProperties(props, e)
}
}
}
return e
}
func parseNodeKV(pairs []any) *rag.Entity {
e := &rag.Entity{Properties: make(map[string]any)}
for _, item := range pairs {
pair, ok := item.([]any)
if !ok || len(pair) != 2 {
continue
}
key := toString(pair[0])
val := pair[1]
switch key {
case "id":
e.ID = toString(val)
case "labels":
if lbls, ok := val.([]any); ok && len(lbls) > 0 {
e.Type = toString(lbls[0])
}
case "properties":
if props, ok := val.([]any); ok {
for _, p := range props {
if kv, ok := p.([]any); ok && len(kv) == 2 {
pk := toString(kv[0])
pv := toString(kv[1])
if pk == "id" {
e.ID = pv
} else if pk == "name" {
e.Name = pv
} else if pk == "type" {
e.Type = pv
} else {
e.Properties[pk] = pv
}
}
}
}
}
}
return e
}
func parseEdge(obj any, sourceID, targetID string) *rag.Relationship {
vals, ok := obj.([]any)
if !ok {
return nil
}
rel := &rag.Relationship{
Source: sourceID,
Target: targetID,
Properties: make(map[string]any),
}
// Check for KV list format: [[key, val], [key, val], ...]
if len(vals) > 0 {
if first, ok := vals[0].([]any); ok && len(first) == 2 {
k := toString(first[0])
if k == "id" || k == "type" || k == "properties" || k == "src" || k == "dst" {
// Parse KV edge
for _, item := range vals {
pair, ok := item.([]any)
if !ok || len(pair) != 2 {
continue
}
key := toString(pair[0])
val := pair[1]
switch key {
case "id":
rel.ID = toString(val)
case "type":
rel.Type = toString(val)
case "properties":
if props, ok := val.([]any); ok {
for _, p := range props {
if kv, ok := p.([]any); ok && len(kv) == 2 {
pk := toString(kv[0])
pv := toString(kv[1])
if pk == "id" {
rel.ID = pv
} else if pk == "weight" {
rel.Weight = 0
} else {
rel.Properties[pk] = pv
}
}
}
}
}
}
return rel
}
}
}
if len(vals) < 3 {
return nil
}
// Type (Index 1)
if t, ok := vals[1].([]byte); ok {
rel.Type = string(t)
} else if t, ok := vals[1].(string); ok {
rel.Type = t
}
// Properties (Index 4 usually, but check len)
if len(vals) > 4 {
if props, ok := vals[4].([]any); ok {
for i := range props {
if propPair, ok := props[i].([]any); ok && len(propPair) == 2 {
key := toString(propPair[0])
val := propPair[1]
if b, ok := val.([]byte); ok {
val = string(b)
}
switch key {
case "id":
rel.ID = fmt.Sprint(val)
case "weight":
rel.Weight = 0
default:
rel.Properties[key] = val
}
}
}
}
}
return rel
}
// parseFalkorDBProperties parses FalkorDB property format: [[id, len, str], [id, len, str], ...]
func parseFalkorDBProperties(props []any, e *rag.Entity) {
for i := 0; i < len(props)-1; i += 2 {
if i+1 < len(props) {
// Key
key := extractStringFromFalkorDBFormat(props[i])
// Value
value := extractStringFromFalkorDBFormat(props[i+1])
if key != "" {
switch key {
case "id":
e.ID = value
case "name":
e.Name = value
case "type":
e.Type = value
default:
e.Properties[key] = value
}
}
}
}
}
// extractStringFromFalkorDBFormat extracts string from format [id, len, str]
func extractStringFromFalkorDBFormat(item any) string {
// Handle different possible formats
switch v := item.(type) {
case []any:
if len(v) >= 3 {
// Format: [id, length, string]
if str, ok := v[2].(string); ok {
return str
} else if bytes, ok := v[2].([]byte); ok {
return string(bytes)
}
} else if len(v) == 2 {
// Format: [id, string]
if str, ok := v[1].(string); ok {
return str
} else if bytes, ok := v[1].([]byte); ok {
return string(bytes)
}
}
case string:
return v
case []byte:
return string(v)
}
return ""
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/mock.go | rag/store/mock.go | package store
import (
"context"
"math"
"strings"
"github.com/smallnest/langgraphgo/rag"
)
// MockEmbedder is a simple mock embedder for testing
type MockEmbedder struct {
Dimension int
}
// NewMockEmbedder creates a new MockEmbedder
func NewMockEmbedder(dimension int) *MockEmbedder {
return &MockEmbedder{
Dimension: dimension,
}
}
// EmbedDocument generates mock embedding for a document
func (e *MockEmbedder) EmbedDocument(ctx context.Context, text string) ([]float32, error) {
return e.generateEmbedding(text), nil
}
// EmbedDocuments generates mock embeddings for documents
func (e *MockEmbedder) EmbedDocuments(ctx context.Context, texts []string) ([][]float32, error) {
embeddings := make([][]float32, len(texts))
for i, text := range texts {
embeddings[i] = e.generateEmbedding(text)
}
return embeddings, nil
}
// GetDimension returns the embedding dimension
func (e *MockEmbedder) GetDimension() int {
return e.Dimension
}
func (e *MockEmbedder) generateEmbedding(text string) []float32 {
// Simple deterministic embedding based on text content
embedding := make([]float32, e.Dimension)
for i := 0; i < e.Dimension; i++ {
var sum float64
for j, char := range text {
sum += float64(char) * float64(i+j+1)
}
embedding[i] = float32(math.Sin(sum / 1000.0))
}
// Normalize
var norm float32
for _, v := range embedding {
norm += v * v
}
norm = float32(math.Sqrt(float64(norm)))
if norm > 0 {
for i := range embedding {
embedding[i] /= norm
}
}
return embedding
}
// SimpleReranker is a simple reranker that scores documents based on keyword matching
type SimpleReranker struct {
// Can be extended with more sophisticated reranking logic
}
// NewSimpleReranker creates a new SimpleReranker
func NewSimpleReranker() *SimpleReranker {
return &SimpleReranker{}
}
// Rerank reranks documents based on query relevance
func (r *SimpleReranker) Rerank(ctx context.Context, query string, documents []rag.DocumentSearchResult) ([]rag.DocumentSearchResult, error) {
queryTerms := strings.Fields(strings.ToLower(query))
type docScore struct {
doc rag.DocumentSearchResult
score float64
}
scores := make([]docScore, len(documents))
for i, docResult := range documents {
content := strings.ToLower(docResult.Document.Content)
// Simple scoring: count query term occurrences
var score float64
for _, term := range queryTerms {
score += float64(strings.Count(content, term))
}
// Normalize by document length
if len(content) > 0 {
score = score / float64(len(content)) * 1000
}
// Combine with original score
finalScore := 0.7*float64(docResult.Score) + 0.3*score
scores[i] = docScore{doc: rag.DocumentSearchResult{
Document: docResult.Document,
Score: finalScore,
Metadata: docResult.Metadata,
}, score: finalScore}
}
// Sort by score (descending)
for i := range scores {
for j := i + 1; j < len(scores); j++ {
if scores[j].score > scores[i].score {
scores[i], scores[j] = scores[j], scores[i]
}
}
}
results := make([]rag.DocumentSearchResult, len(scores))
for i, s := range scores {
results[i] = s.doc
}
return results, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/mock_test.go | rag/store/mock_test.go | package store
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestMockComponents(t *testing.T) {
ctx := context.Background()
t.Run("MockEmbedder", func(t *testing.T) {
e := NewMockEmbedder(2)
assert.Equal(t, 2, e.GetDimension())
emb, err := e.EmbedDocument(ctx, "test")
assert.NoError(t, err)
assert.Len(t, emb, 2)
embs, err := e.EmbedDocuments(ctx, []string{"test1", "test2"})
assert.NoError(t, err)
assert.Len(t, embs, 2)
})
t.Run("SimpleReranker Mock", func(t *testing.T) {
r := NewSimpleReranker()
docs := []rag.DocumentSearchResult{{Score: 0.1}}
res, err := r.Rerank(ctx, "query", docs)
assert.NoError(t, err)
assert.NotEmpty(t, res)
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/knowledge_graph.go | rag/store/knowledge_graph.go | package store
import (
"context"
"fmt"
"strings"
"github.com/smallnest/langgraphgo/rag"
)
// NewKnowledgeGraph creates a new knowledge graph based on the database URL
func NewKnowledgeGraph(databaseURL string) (rag.KnowledgeGraph, error) {
if strings.HasPrefix(databaseURL, "memory://") {
return &MemoryGraph{
entities: make(map[string]rag.Entity),
relationships: make(map[string]rag.Relationship),
entityIndex: make(map[string][]string),
}, nil
}
if strings.HasPrefix(databaseURL, "falkordb://") {
return NewFalkorDBGraph(databaseURL)
}
// Placeholder for other database types
return nil, fmt.Errorf("only memory:// and falkordb:// URLs are currently supported")
}
// MemoryGraph implements an in-memory knowledge graph
type MemoryGraph struct {
entities map[string]rag.Entity
relationships map[string]rag.Relationship
entityIndex map[string][]string
}
// AddEntity adds an entity to the memory graph
func (m *MemoryGraph) AddEntity(ctx context.Context, entity *rag.Entity) error {
m.entities[entity.ID] = *entity
// Update type index
if _, exists := m.entityIndex[entity.Type]; !exists {
m.entityIndex[entity.Type] = make([]string, 0)
}
m.entityIndex[entity.Type] = append(m.entityIndex[entity.Type], entity.ID)
return nil
}
// AddRelationship adds a relationship to the memory graph
func (m *MemoryGraph) AddRelationship(ctx context.Context, rel *rag.Relationship) error {
m.relationships[rel.ID] = *rel
return nil
}
// Query performs a graph query
func (m *MemoryGraph) Query(ctx context.Context, query *rag.GraphQuery) (*rag.GraphQueryResult, error) {
result := &rag.GraphQueryResult{
Entities: make([]*rag.Entity, 0),
Relationships: make([]*rag.Relationship, 0),
Paths: make([][]*rag.Entity, 0),
Metadata: make(map[string]any),
}
// Filter by entity types
if len(query.EntityTypes) > 0 {
for _, entityType := range query.EntityTypes {
if entityIDs, exists := m.entityIndex[entityType]; exists {
for _, id := range entityIDs {
if entity, exists := m.entities[id]; exists {
e := entity
result.Entities = append(result.Entities, &e)
}
}
}
}
}
// Filter by relationship types
// Note: GraphQuery in rag/types.go has Relationships []string, checking implementation
if len(query.Relationships) > 0 {
for _, relType := range query.Relationships {
for _, rel := range m.relationships {
if rel.Type == relType {
r := rel
result.Relationships = append(result.Relationships, &r)
}
}
}
}
// Apply limit
if query.Limit > 0 && len(result.Entities) > query.Limit {
result.Entities = result.Entities[:query.Limit]
}
return result, nil
}
// GetEntity retrieves an entity by ID
func (m *MemoryGraph) GetEntity(ctx context.Context, id string) (*rag.Entity, error) {
entity, exists := m.entities[id]
if !exists {
return nil, fmt.Errorf("entity not found: %s", id)
}
return &entity, nil
}
// GetRelationship retrieves a relationship by ID
func (m *MemoryGraph) GetRelationship(ctx context.Context, id string) (*rag.Relationship, error) {
rel, exists := m.relationships[id]
if !exists {
return nil, fmt.Errorf("relationship not found: %s", id)
}
return &rel, nil
}
// GetRelatedEntities finds entities related to a given entity
func (m *MemoryGraph) GetRelatedEntities(ctx context.Context, entityID string, maxDepth int) ([]*rag.Entity, error) {
related := make([]*rag.Entity, 0)
visited := make(map[string]bool)
// Simple implementation for depth 1
// For maxDepth > 1, would need BFS
// Find relationships connected to this entity
for _, rel := range m.relationships {
// Note: relationshipType filter not in signature anymore, maxDepth added
if rel.Source == entityID {
if !visited[rel.Target] {
visited[rel.Target] = true
if entity, exists := m.entities[rel.Target]; exists {
e := entity
related = append(related, &e)
}
}
} else if rel.Target == entityID {
if !visited[rel.Source] {
visited[rel.Source] = true
if entity, exists := m.entities[rel.Source]; exists {
e := entity
related = append(related, &e)
}
}
}
}
return related, nil
}
// DeleteEntity removes an entity from the memory graph
func (m *MemoryGraph) DeleteEntity(ctx context.Context, id string) error {
delete(m.entities, id)
// Remove from type index
for entityType, entityIDs := range m.entityIndex {
for i, entityID := range entityIDs {
if entityID == id {
m.entityIndex[entityType] = append(entityIDs[:i], entityIDs[i+1:]...)
break
}
}
if len(m.entityIndex[entityType]) == 0 {
delete(m.entityIndex, entityType)
}
}
return nil
}
// DeleteRelationship removes a relationship from the memory graph
func (m *MemoryGraph) DeleteRelationship(ctx context.Context, id string) error {
delete(m.relationships, id)
return nil
}
// UpdateEntity updates an entity in the memory graph
func (m *MemoryGraph) UpdateEntity(ctx context.Context, entity *rag.Entity) error {
if _, exists := m.entities[entity.ID]; !exists {
return fmt.Errorf("entity not found: %s", entity.ID)
}
// Update type index if type changed
oldEntity, exists := m.entities[entity.ID]
if exists && oldEntity.Type != entity.Type {
// Remove from old type index
for i, entityID := range m.entityIndex[oldEntity.Type] {
if entityID == entity.ID {
m.entityIndex[oldEntity.Type] = append(m.entityIndex[oldEntity.Type][:i], m.entityIndex[oldEntity.Type][i+1:]...)
break
}
}
// Add to new type index
if _, exists := m.entityIndex[entity.Type]; !exists {
m.entityIndex[entity.Type] = make([]string, 0)
}
m.entityIndex[entity.Type] = append(m.entityIndex[entity.Type], entity.ID)
}
m.entities[entity.ID] = *entity
return nil
}
// UpdateRelationship updates a relationship in the memory graph
func (m *MemoryGraph) UpdateRelationship(ctx context.Context, rel *rag.Relationship) error {
if _, exists := m.relationships[rel.ID]; !exists {
return fmt.Errorf("relationship not found: %s", rel.ID)
}
m.relationships[rel.ID] = *rel
return nil
}
// Close closes the memory graph (no-op for in-memory implementation)
func (m *MemoryGraph) Close() error {
// Clear all data
m.entities = make(map[string]rag.Entity)
m.relationships = make(map[string]rag.Relationship)
m.entityIndex = make(map[string][]string)
return nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/vector_test.go | rag/store/vector_test.go | package store
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
type mockEmbedder struct {
dim int
}
func (m *mockEmbedder) EmbedDocument(ctx context.Context, text string) ([]float32, error) {
res := make([]float32, m.dim)
for i := 0; i < m.dim; i++ {
res[i] = 0.1
}
return res, nil
}
func (m *mockEmbedder) EmbedDocuments(ctx context.Context, texts []string) ([][]float32, error) {
res := make([][]float32, len(texts))
for i := range texts {
emb, _ := m.EmbedDocument(ctx, texts[i])
res[i] = emb
}
return res, nil
}
func (m *mockEmbedder) GetDimension() int {
return m.dim
}
func TestInMemoryVectorStore(t *testing.T) {
ctx := context.Background()
embedder := &mockEmbedder{dim: 3}
s := NewInMemoryVectorStore(embedder)
t.Run("Add and Search", func(t *testing.T) {
docs := []rag.Document{
{ID: "1", Content: "hello", Embedding: []float32{1, 0, 0}},
{ID: "2", Content: "world", Embedding: []float32{0, 1, 0}},
}
err := s.Add(ctx, docs)
assert.NoError(t, err)
// Search for something close to "hello"
results, err := s.Search(ctx, []float32{1, 0.1, 0}, 1)
assert.NoError(t, err)
assert.Len(t, results, 1)
assert.Equal(t, "1", results[0].Document.ID)
assert.Greater(t, results[0].Score, 0.9)
})
t.Run("Search with Filter", func(t *testing.T) {
docs := []rag.Document{
{ID: "3", Content: "filtered", Embedding: []float32{0, 0, 1}, Metadata: map[string]any{"type": "special"}},
}
s.Add(ctx, docs)
results, err := s.SearchWithFilter(ctx, []float32{0, 0, 1}, 10, map[string]any{"type": "special"})
assert.NoError(t, err)
assert.Len(t, results, 1)
assert.Equal(t, "3", results[0].Document.ID)
results, err = s.SearchWithFilter(ctx, []float32{0, 0, 1}, 10, map[string]any{"type": "none"})
assert.NoError(t, err)
assert.Len(t, results, 0)
})
t.Run("Update and Delete", func(t *testing.T) {
doc := rag.Document{ID: "1", Content: "updated", Embedding: []float32{1, 1, 1}}
err := s.Update(ctx, []rag.Document{doc})
assert.NoError(t, err)
stats, _ := s.GetStats(ctx)
countBefore := stats.TotalDocuments
err = s.Delete(ctx, []string{"1"})
assert.NoError(t, err)
stats, _ = s.GetStats(ctx)
assert.Equal(t, countBefore-1, stats.TotalDocuments)
})
t.Run("AddBatch", func(t *testing.T) {
docs := []rag.Document{{ID: "4", Content: "batch1"}}
embs := [][]float32{{0.5, 0.5, 0.5}}
err := s.AddBatch(ctx, docs, embs)
assert.NoError(t, err)
stats, _ := s.GetStats(ctx)
assert.GreaterOrEqual(t, stats.TotalDocuments, 1)
})
t.Run("Update without Embedding", func(t *testing.T) {
doc := rag.Document{ID: "4", Content: "updated batch1"}
err := s.Update(ctx, []rag.Document{doc})
assert.NoError(t, err)
results, _ := s.Search(ctx, []float32{0.5, 0.5, 0.5}, 1)
assert.Equal(t, "updated batch1", results[0].Document.Content)
})
t.Run("Add without embedding", func(t *testing.T) {
doc := rag.Document{ID: "5", Content: "no emb"}
err := s.Add(ctx, []rag.Document{doc})
assert.NoError(t, err)
stats, _ := s.GetStats(ctx)
assert.GreaterOrEqual(t, stats.TotalVectors, 1)
})
t.Run("Update with embedding", func(t *testing.T) {
doc := rag.Document{ID: "5", Content: "updated with emb"}
err := s.UpdateWithEmbedding(ctx, doc, []float32{0.9, 0.9, 0.9})
assert.NoError(t, err)
})
t.Run("Delete", func(t *testing.T) {
err := s.Delete(ctx, []string{"4"})
assert.NoError(t, err)
})
t.Run("Matches Filter", func(t *testing.T) {
doc := rag.Document{Metadata: map[string]any{"key": "val"}}
assert.True(t, s.matchesFilter(doc, map[string]any{"key": "val"}))
assert.False(t, s.matchesFilter(doc, map[string]any{"key": "wrong"}))
assert.False(t, s.matchesFilter(doc, map[string]any{"missing": "any"}))
})
}
func TestCosineSimilarity32(t *testing.T) {
v1 := []float32{1, 0}
v2 := []float32{1, 0}
assert.InDelta(t, 1.0, cosineSimilarity32(v1, v2), 1e-6)
v3 := []float32{0, 1}
assert.InDelta(t, 0.0, cosineSimilarity32(v1, v3), 1e-6)
assert.Equal(t, 0.0, cosineSimilarity32([]float32{1}, []float32{1, 2}))
assert.Equal(t, 0.0, cosineSimilarity32([]float32{0}, []float32{0}))
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/falkordb_internal.go | rag/store/falkordb_internal.go | package store
import (
"context"
"crypto/rand"
"fmt"
"os"
"strings"
"github.com/olekukonko/tablewriter"
"github.com/redis/go-redis/v9"
)
func quoteString(i any) any {
switch x := i.(type) {
case string:
if len(x) == 0 {
return "\"\""
}
// Escape single quotes for Cypher compatibility
x = strings.ReplaceAll(x, "'", "\\'")
if x[0] != '"' {
x = "\"" + x
}
if x[len(x)-1] != '"' {
x += "\""
}
return x
default:
return i
}
}
func randomString(n int) string {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
output := make([]byte, n)
randomness := make([]byte, n)
_, err := rand.Read(randomness)
if err != nil {
panic(err)
}
l := len(letterBytes)
for pos := range output {
random := uint8(randomness[pos])
randomPos := random % uint8(l)
output[pos] = letterBytes[randomPos]
}
return string(output)
}
// Node represents a node within a graph.
type Node struct {
ID string
Alias string
Label string
Properties map[string]any
}
func (n *Node) String() string {
s := "("
if n.Alias != "" {
s += n.Alias
}
if n.Label != "" {
s += ":" + n.Label
}
if len(n.Properties) > 0 {
p := ""
for k, v := range n.Properties {
p += fmt.Sprintf("%s:%v,", k, quoteString(v))
}
p = p[:len(p)-1]
s += "{" + p + "}"
}
s += ")"
return s
}
// Edge represents an edge connecting two nodes in the graph.
type Edge struct {
Source *Node
Destination *Node
Relation string
Properties map[string]any
}
func (e *Edge) String() string {
s := "(" + e.Source.Alias + ")"
s += "-["
if e.Relation != "" {
s += ":" + e.Relation
}
if len(e.Properties) > 0 {
p := ""
for k, v := range e.Properties {
p += fmt.Sprintf("%s:%s,", k, quoteString(v))
}
p = p[:len(p)-1]
s += "{" + p + "}"
}
s += "]->"
s += "(" + e.Destination.Alias + ")"
return s
}
// Graph represents a graph, which is a collection of nodes and edges.
type Graph struct {
Name string
Nodes map[string]*Node
Edges []*Edge
Conn redis.UniversalClient
}
// NewGraph creates a new graph (helper constructor).
func NewGraph(name string, conn redis.UniversalClient) Graph {
return Graph{
Name: name,
Nodes: make(map[string]*Node),
Conn: conn,
}
}
// AddNode adds a node to the graph structure (for Commit usage).
func (g *Graph) AddNode(n *Node) error {
if n.Alias == "" {
n.Alias = randomString(10)
}
g.Nodes[n.Alias] = n
return nil
}
// AddEdge adds an edge to the graph structure (for Commit usage).
func (g *Graph) AddEdge(e *Edge) error {
if e.Source == nil || e.Destination == nil {
return fmt.Errorf("AddEdge: both source and destination nodes should be defined")
}
if _, ok := g.Nodes[e.Source.Alias]; !ok {
return fmt.Errorf("AddEdge: source node neeeds to be added to the graph first")
}
if _, ok := g.Nodes[e.Destination.Alias]; !ok {
return fmt.Errorf("AddEdge: destination node neeeds to be added to the graph first")
}
g.Edges = append(g.Edges, e)
return nil
}
// Commit creates the entire graph (using CREATE).
func (g *Graph) Commit(ctx context.Context) (QueryResult, error) {
q := "CREATE "
for _, n := range g.Nodes {
q += fmt.Sprintf("%s,", n)
}
for _, e := range g.Edges {
q += fmt.Sprintf("%s,", e)
}
q = q[:len(q)-1]
return g.Query(ctx, q)
}
// QueryResult represents the results of a query.
type QueryResult struct {
Header []string
Results [][]any
Statistics []string
}
// Query executes a query against the graph.
func (g *Graph) Query(ctx context.Context, q string) (QueryResult, error) {
qr := QueryResult{}
// go-redis Do returns a Cmd which can be used to get the result
res, err := g.Conn.Do(ctx, "GRAPH.QUERY", g.Name, q).Result()
if err != nil {
return qr, err
}
r, ok := res.([]any)
if !ok {
return qr, fmt.Errorf("unexpected response type: %T", res)
}
if len(r) == 3 {
// Header
if header, ok := r[0].([]any); ok {
qr.Header = make([]string, len(header))
for i, h := range header {
qr.Header[i] = fmt.Sprint(h)
}
}
// Results
if rows, ok := r[1].([]any); ok {
qr.Results = make([][]any, len(rows))
for i, row := range rows {
if rVals, ok := row.([]any); ok {
qr.Results[i] = rVals
}
}
}
// Stats
if stats, ok := r[2].([]any); ok {
qr.Statistics = make([]string, len(stats))
for i, s := range stats {
qr.Statistics[i] = fmt.Sprint(s)
}
}
} else if len(r) == 2 {
// Results
if rows, ok := r[0].([]any); ok {
qr.Results = make([][]any, len(rows))
for i, row := range rows {
if rVals, ok := row.([]any); ok {
qr.Results[i] = rVals
}
}
}
// Stats
if stats, ok := r[1].([]any); ok {
qr.Statistics = make([]string, len(stats))
for i, s := range stats {
qr.Statistics[i] = fmt.Sprint(s)
}
}
} else if len(r) == 1 {
// Only statistics (e.g., when MERGE operation doesn't return data)
if stats, ok := r[0].([]any); ok {
qr.Statistics = make([]string, len(stats))
for i, s := range stats {
qr.Statistics[i] = fmt.Sprint(s)
}
}
// No results to process, which is fine for write operations
} else {
return qr, fmt.Errorf("unexpected response length: %d", len(r))
}
return qr, nil
}
func (g *Graph) Delete(ctx context.Context) error {
return g.Conn.Do(ctx, "GRAPH.DELETE", g.Name).Err()
}
func (qr *QueryResult) PrettyPrint() {
if len(qr.Results) > 0 {
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
if len(qr.Header) > 0 {
table.SetHeader(qr.Header)
}
for _, row := range qr.Results {
sRow := make([]string, len(row))
for i, v := range row {
sRow[i] = fmt.Sprint(v)
}
table.Append(sRow)
}
table.Render()
}
for _, stat := range qr.Statistics {
fmt.Fprintf(os.Stdout, "\n%s", stat)
}
fmt.Fprintf(os.Stdout, "\n")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/falkordb_test.go | rag/store/falkordb_test.go | package store
import (
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestNewFalkorDBGraph(t *testing.T) {
t.Run("Valid connection string with custom graph name", func(t *testing.T) {
g, err := NewFalkorDBGraph("falkordb://localhost:6379/custom_graph")
assert.NoError(t, err)
assert.NotNil(t, g)
fg := g.(*FalkorDBGraph)
assert.Equal(t, "custom_graph", fg.graphName)
assert.NotNil(t, fg.client)
fg.Close()
})
t.Run("Valid connection string with default graph name", func(t *testing.T) {
g, err := NewFalkorDBGraph("falkordb://localhost:6379")
assert.NoError(t, err)
assert.NotNil(t, g)
fg := g.(*FalkorDBGraph)
assert.Equal(t, "rag", fg.graphName) // Default graph name
fg.Close()
})
t.Run("Invalid URL", func(t *testing.T) {
g, err := NewFalkorDBGraph("://invalid")
assert.Error(t, err)
assert.Nil(t, g)
assert.Contains(t, err.Error(), "invalid connection string")
})
t.Run("Missing host", func(t *testing.T) {
g, err := NewFalkorDBGraph("falkordb:///graph")
assert.Error(t, err)
assert.Nil(t, g)
assert.Contains(t, err.Error(), "missing host")
})
t.Run("NewKnowledgeGraph factory", func(t *testing.T) {
g, err := NewKnowledgeGraph("falkordb://localhost:6379/graph")
if err == nil {
assert.NotNil(t, g)
if fg, ok := g.(*FalkorDBGraph); ok {
fg.Close()
}
}
})
}
func TestSanitizeLabel(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{"Simple label", "Person", "Person"},
{"Label with space", "Person Age", "Person_Age"},
{"Label with special chars", "Person-Type@123", "Person_Type_123"},
{"Empty label", "", "Entity"},
{"Only special chars", "@#$%", "____"}, // Special chars become underscores, not Entity
{"Mixed case", "MyEntity", "MyEntity"},
{"Numbers", "Entity123", "Entity123"},
{"Underscores preserved", "My_Entity", "My_Entity"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := sanitizeLabel(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestPropsToString(t *testing.T) {
t.Run("String properties", func(t *testing.T) {
props := map[string]any{"name": "test", "age": 30}
s := propsToString(props)
assert.Contains(t, s, "name")
assert.Contains(t, s, "age")
assert.Contains(t, s, "{")
assert.Contains(t, s, "}")
})
t.Run("Float32 slice embedding", func(t *testing.T) {
props := map[string]any{
"name": "entity",
"embedding": []float32{0.1, 0.2, 0.3},
}
s := propsToString(props)
assert.Contains(t, s, "embedding")
assert.Contains(t, s, "[")
assert.Contains(t, s, "]")
})
t.Run("Boolean and numeric values", func(t *testing.T) {
props := map[string]any{
"active": true,
"count": 42,
"ratio": 3.14,
}
s := propsToString(props)
assert.Contains(t, s, "active")
assert.Contains(t, s, "count")
assert.Contains(t, s, "ratio")
})
t.Run("Empty map", func(t *testing.T) {
props := map[string]any{}
s := propsToString(props)
assert.Equal(t, "{}", s)
})
}
func TestEntityToMap(t *testing.T) {
t.Run("Entity with all fields", func(t *testing.T) {
e := &rag.Entity{
ID: "1",
Name: "John",
Type: "Person",
Embedding: []float32{0.1, 0.2},
Properties: map[string]any{"age": 30},
}
m := entityToMap(e)
assert.Equal(t, "John", m["name"])
assert.Equal(t, "Person", m["type"])
assert.Equal(t, 30, m["age"])
assert.NotNil(t, m["embedding"])
})
t.Run("Entity without embedding", func(t *testing.T) {
e := &rag.Entity{
ID: "2",
Name: "Jane",
Type: "Person",
Properties: map[string]any{"city": "NYC"},
}
m := entityToMap(e)
assert.Equal(t, "Jane", m["name"])
assert.Equal(t, "Person", m["type"])
assert.Equal(t, "NYC", m["city"])
assert.Nil(t, m["embedding"])
})
t.Run("Entity with empty properties", func(t *testing.T) {
e := &rag.Entity{
ID: "3",
Name: "Test",
Type: "Type",
Properties: map[string]any{},
}
m := entityToMap(e)
assert.Equal(t, "Test", m["name"])
assert.Equal(t, "Type", m["type"])
})
}
func TestRelationshipToMap(t *testing.T) {
t.Run("Relationship with all fields", func(t *testing.T) {
r := &rag.Relationship{
ID: "1",
Source: "s",
Target: "t",
Type: "KNOWS",
Weight: 0.8,
Confidence: 0.9,
Properties: map[string]any{"since": 2020},
}
m := relationshipToMap(r)
assert.Equal(t, "KNOWS", m["type"])
assert.Equal(t, 0.8, m["weight"])
assert.Equal(t, 0.9, m["confidence"])
assert.Equal(t, 2020, m["since"])
})
t.Run("Relationship with empty properties", func(t *testing.T) {
r := &rag.Relationship{
ID: "2",
Source: "a",
Target: "b",
Type: "RELATED",
Properties: map[string]any{},
}
m := relationshipToMap(r)
assert.Equal(t, "RELATED", m["type"])
assert.Contains(t, m, "weight")
assert.Contains(t, m, "confidence")
})
}
func TestToString(t *testing.T) {
tests := []struct {
name string
input any
expected string
}{
{"String input", "hello", "hello"},
{"Byte slice", []byte("world"), "world"},
{"Integer", 123, "123"},
{"Float", 3.14, "3.14"},
{"Boolean", true, "true"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := toString(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestParseNode(t *testing.T) {
t.Run("Standard format with labels and properties", func(t *testing.T) {
// Format: [id, [labels], [[key1, val1], [key2, val2]]]
obj := []any{
int64(1),
[]any{[]byte("Person")},
[]any{
[]any{int64(1), int64(2), "id"},
[]any{int64(1), int64(4), "test"},
[]any{int64(2), int64(4), "name"},
[]any{int64(2), int64(4), "John"},
},
}
e := parseNode(obj)
assert.NotNil(t, e)
assert.Equal(t, "Person", e.Type)
assert.Equal(t, "test", e.ID)
assert.Equal(t, "John", e.Name)
})
t.Run("Standard format with string labels", func(t *testing.T) {
obj := []any{
int64(1),
[]any{"Company"},
[]any{
[]any{int64(1), int64(2), "id"},
[]any{int64(1), int64(3), "c1"},
},
}
e := parseNode(obj)
assert.NotNil(t, e)
assert.Equal(t, "Company", e.Type)
assert.Equal(t, "c1", e.ID)
})
t.Run("KV format", func(t *testing.T) {
obj := []any{
[]any{"id", "node1"},
[]any{"labels", []any{"Person"}},
[]any{"properties", []any{
[]any{"name", "Alice"},
[]any{"id", "alice1"},
}},
}
e := parseNode(obj)
assert.NotNil(t, e)
assert.Equal(t, "alice1", e.ID)
assert.Equal(t, "Alice", e.Name)
assert.Equal(t, "Person", e.Type)
})
t.Run("KV format with string labels", func(t *testing.T) {
obj := []any{
[]any{"id", "node2"},
[]any{"labels", []any{"Product", "Item"}},
[]any{"properties", []any{
[]any{"name", "Widget"},
[]any{"price", "9.99"},
}},
}
e := parseNode(obj)
assert.NotNil(t, e)
assert.Equal(t, "node2", e.ID)
assert.Equal(t, "Widget", e.Name)
assert.Equal(t, "Product", e.Type) // First label
assert.Equal(t, "9.99", e.Properties["price"])
})
t.Run("Invalid format", func(t *testing.T) {
e := parseNode("not a slice")
assert.Nil(t, e)
})
t.Run("Empty slice", func(t *testing.T) {
e := parseNode([]any{})
assert.NotNil(t, e)
})
t.Run("Single element slice", func(t *testing.T) {
e := parseNode([]any{int64(1)})
assert.NotNil(t, e)
})
t.Run("Two element slice without valid KV", func(t *testing.T) {
obj := []any{int64(1), "not a slice"}
e := parseNode(obj)
assert.NotNil(t, e)
assert.Empty(t, e.ID)
})
t.Run("Complex nested structure", func(t *testing.T) {
obj := []any{
int64(1),
[]any{
int64(2),
[]any{[]byte("Label")},
[]any{
[]any{int64(1), int64(2), "id"},
[]any{int64(1), int64(3), "id1"},
},
},
}
e := parseNode(obj)
assert.NotNil(t, e)
assert.Equal(t, "id1", e.ID)
})
}
func TestParseNodeKV(t *testing.T) {
t.Run("Complete KV pairs", func(t *testing.T) {
pairs := []any{
[]any{"id", "entity1"},
[]any{"labels", []any{"Person"}},
[]any{"properties", []any{
[]any{"name", "Bob"},
[]any{"type", "User"},
[]any{"age", "30"},
}},
}
e := parseNodeKV(pairs)
assert.NotNil(t, e)
assert.Equal(t, "entity1", e.ID)
assert.Equal(t, "User", e.Type)
assert.Equal(t, "Bob", e.Name)
assert.Equal(t, "30", e.Properties["age"])
})
t.Run("Invalid pairs", func(t *testing.T) {
pairs := []any{
"not a pair",
[]any{"single"},
}
e := parseNodeKV(pairs)
assert.NotNil(t, e)
assert.Empty(t, e.ID)
})
}
func TestParseEdge(t *testing.T) {
t.Run("Standard edge format", func(t *testing.T) {
obj := []any{
int64(1),
[]byte("KNOWS"),
int64(2),
int64(3),
[]any{
[]any{"id", "rel1"},
[]any{"weight", 0.5},
},
}
rel := parseEdge(obj, "source1", "target1")
assert.NotNil(t, rel)
assert.Equal(t, "source1", rel.Source)
assert.Equal(t, "target1", rel.Target)
assert.Equal(t, "KNOWS", rel.Type)
assert.Equal(t, "rel1", rel.ID)
})
t.Run("Standard edge with properties containing id", func(t *testing.T) {
obj := []any{
int64(1),
[]byte("LIKES"),
int64(2),
int64(3),
[]any{
[]any{"custom_prop", "value1"},
[]any{"id", "edge123"},
},
}
rel := parseEdge(obj, "src", "dst")
assert.NotNil(t, rel)
assert.Equal(t, "LIKES", rel.Type)
assert.Equal(t, "edge123", rel.ID)
assert.Equal(t, "value1", rel.Properties["custom_prop"])
})
t.Run("KV edge format", func(t *testing.T) {
obj := []any{
[]any{"id", "edge1"},
[]any{"type", "RELATED"},
[]any{"properties", []any{
[]any{"id", "edge_id1"},
[]any{"strength", "high"},
}},
}
rel := parseEdge(obj, "src", "dst")
assert.NotNil(t, rel)
assert.Equal(t, "src", rel.Source)
assert.Equal(t, "dst", rel.Target)
assert.Equal(t, "RELATED", rel.Type)
assert.Equal(t, "edge_id1", rel.ID)
assert.Equal(t, "high", rel.Properties["strength"])
})
t.Run("KV edge format with empty properties", func(t *testing.T) {
obj := []any{
[]any{"id", "edge2"},
[]any{"type", "CONNECTS"},
[]any{"properties", []any{}},
}
rel := parseEdge(obj, "a", "b")
assert.NotNil(t, rel)
assert.Equal(t, "a", rel.Source)
assert.Equal(t, "b", rel.Target)
assert.Equal(t, "CONNECTS", rel.Type)
assert.Equal(t, "edge2", rel.ID)
})
t.Run("KV edge format without properties key", func(t *testing.T) {
obj := []any{
[]any{"id", "edge3"},
[]any{"type", "LINKS"},
[]any{"src", "nodeA"},
[]any{"dst", "nodeB"},
}
rel := parseEdge(obj, "x", "y")
assert.NotNil(t, rel)
assert.Equal(t, "x", rel.Source)
assert.Equal(t, "y", rel.Target)
assert.Equal(t, "LINKS", rel.Type)
})
t.Run("Invalid format", func(t *testing.T) {
rel := parseEdge("not a slice", "src", "dst")
assert.Nil(t, rel)
})
t.Run("Short slice", func(t *testing.T) {
obj := []any{int64(1), []byte("TYPE")}
rel := parseEdge(obj, "s", "t")
assert.Nil(t, rel)
})
t.Run("Three element slice", func(t *testing.T) {
obj := []any{int64(1), []byte("TEST"), int64(3)}
rel := parseEdge(obj, "a", "b")
// Three elements: first is id, second is type (byte), third is something
// The code returns a relationship with type set
assert.NotNil(t, rel)
assert.Equal(t, "TEST", rel.Type)
assert.Equal(t, "a", rel.Source)
assert.Equal(t, "b", rel.Target)
})
t.Run("String type", func(t *testing.T) {
obj := []any{
int64(1),
"WORKS_WITH",
int64(2),
int64(3),
[]any{},
}
rel := parseEdge(obj, "a", "b")
assert.NotNil(t, rel)
assert.Equal(t, "WORKS_WITH", rel.Type)
})
t.Run("String type with non-empty properties", func(t *testing.T) {
obj := []any{
int64(1),
"MARRIED_TO",
int64(2),
int64(3),
[]any{
[]any{"since", "2020"},
[]any{"id", "rel_custom_id"},
},
}
rel := parseEdge(obj, "p1", "p2")
assert.NotNil(t, rel)
assert.Equal(t, "MARRIED_TO", rel.Type)
assert.Equal(t, "rel_custom_id", rel.ID)
assert.Equal(t, "2020", rel.Properties["since"])
})
t.Run("Empty slice", func(t *testing.T) {
rel := parseEdge([]any{}, "src", "dst")
assert.Nil(t, rel)
})
t.Run("KV edge format with src key", func(t *testing.T) {
obj := []any{
[]any{"id", "edge_src"},
[]any{"src", "node_src"},
[]any{"dst", "node_dst"},
}
rel := parseEdge(obj, "x", "y")
assert.NotNil(t, rel)
assert.Equal(t, "edge_src", rel.ID)
// src and dst keys are recognized but don't override the parameters
assert.Equal(t, "x", rel.Source)
assert.Equal(t, "y", rel.Target)
})
t.Run("KV edge format with src and dst override", func(t *testing.T) {
obj := []any{
[]any{"type", "CONTAINS"},
[]any{"src", "actual_source"},
[]any{"dst", "actual_target"},
}
rel := parseEdge(obj, "param_source", "param_target")
assert.NotNil(t, rel)
assert.Equal(t, "CONTAINS", rel.Type)
// The src/dst in KV don't override parameters in current implementation
assert.Equal(t, "param_source", rel.Source)
assert.Equal(t, "param_target", rel.Target)
})
t.Run("Edge with byte type value in properties", func(t *testing.T) {
obj := []any{
int64(1),
[]byte("CONNECTED"),
int64(2),
int64(3),
[]any{
[]any{"id", "edge_bytes"},
[]any{"note", []byte("note_value")},
},
}
rel := parseEdge(obj, "s", "t")
assert.NotNil(t, rel)
assert.Equal(t, "CONNECTED", rel.Type)
assert.Equal(t, "edge_bytes", rel.ID)
assert.Equal(t, "note_value", rel.Properties["note"])
})
t.Run("Edge with weight in properties", func(t *testing.T) {
obj := []any{
int64(1),
"RELATES",
int64(2),
int64(3),
[]any{
[]any{"weight", "0.7"},
[]any{"id", "rel_weight"},
},
}
rel := parseEdge(obj, "a", "b")
assert.NotNil(t, rel)
assert.Equal(t, "RELATES", rel.Type)
assert.Equal(t, "rel_weight", rel.ID)
assert.Equal(t, float64(0), rel.Weight) // weight is set to 0
})
t.Run("KV edge with weight in nested properties", func(t *testing.T) {
obj := []any{
[]any{"id", "edge_kvp"},
[]any{"type", "WEIGHTED"},
[]any{"properties", []any{
[]any{"weight", "0.9"},
[]any{"id", "prop_id"},
}},
}
rel := parseEdge(obj, "src", "dst")
assert.NotNil(t, rel)
assert.Equal(t, "prop_id", rel.ID) // properties id overrides top-level id
assert.Equal(t, "WEIGHTED", rel.Type)
assert.Equal(t, float64(0), rel.Weight)
})
t.Run("KV edge with non-KV first element", func(t *testing.T) {
obj := []any{
"not a KV pair",
[]any{"type", "SOME_TYPE"},
[]any{"src", "source"},
}
rel := parseEdge(obj, "s", "t")
assert.NotNil(t, rel)
// First element is not a KV pair with special keys, so it falls through to standard parsing
// But it's also not a []any, so we don't match the KV format
})
}
func TestParseFalkorDBProperties(t *testing.T) {
t.Run("Even number of properties", func(t *testing.T) {
props := []any{
[]any{int64(1), int64(2), "name"},
[]any{int64(2), int64(4), "John"},
[]any{int64(3), int64(4), "type"},
[]any{int64(4), int64(6), "Person"},
}
e := &rag.Entity{Properties: make(map[string]any)}
parseFalkorDBProperties(props, e)
assert.Equal(t, "John", e.Name)
assert.Equal(t, "Person", e.Type)
})
t.Run("Odd number of properties", func(t *testing.T) {
props := []any{
[]any{int64(1), int64(2), "id"},
[]any{int64(2), int64(5), "test1"},
[]any{int64(3), int64(3), "age"},
}
e := &rag.Entity{Properties: make(map[string]any)}
parseFalkorDBProperties(props, e)
assert.Equal(t, "test1", e.ID)
})
t.Run("Custom properties", func(t *testing.T) {
props := []any{
[]any{int64(1), int64(3), "city"},
[]any{int64(2), int64(3), "NYC"},
[]any{int64(3), int64(3), "country"},
[]any{int64(4), int64(3), "USA"},
}
e := &rag.Entity{Properties: make(map[string]any)}
parseFalkorDBProperties(props, e)
assert.Equal(t, "NYC", e.Properties["city"])
assert.Equal(t, "USA", e.Properties["country"])
})
}
func TestExtractStringFromFalkorDBFormat(t *testing.T) {
tests := []struct {
name string
input any
expected string
}{
{
name: "Three element array [id, len, str]",
input: []any{int64(1), int64(5), "hello"},
expected: "hello",
},
{
name: "Three element array with bytes",
input: []any{int64(1), int64(5), []byte("world")},
expected: "world",
},
{
name: "Two element array [id, str]",
input: []any{int64(1), "test"},
expected: "test",
},
{
name: "Two element array with bytes",
input: []any{int64(1), []byte("test2")},
expected: "test2",
},
{
name: "Direct string",
input: "direct",
expected: "direct",
},
{
name: "Direct bytes",
input: []byte("bytes"),
expected: "bytes",
},
{
name: "Empty array",
input: []any{},
expected: "",
},
{
name: "Single element array",
input: []any{int64(1)},
expected: "",
},
{
name: "Unsupported type",
input: 123,
expected: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := extractStringFromFalkorDBFormat(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestFalkorDBClose(t *testing.T) {
t.Run("Close with valid client", func(t *testing.T) {
fg, err := NewFalkorDBGraph("falkordb://localhost:6379/test")
assert.NoError(t, err)
assert.NotNil(t, fg)
// Type assert to FalkorDBGraph
graph := fg.(*FalkorDBGraph)
err = graph.Close()
// Close might fail if Redis is not running, but should not panic
assert.NoError(t, err)
})
t.Run("Close with nil client", func(t *testing.T) {
fg := &FalkorDBGraph{client: nil}
err := fg.Close()
assert.NoError(t, err)
})
}
func TestInternalHelpers(t *testing.T) {
t.Run("quoteString with empty string", func(t *testing.T) {
assert.Equal(t, "\"\"", quoteString(""))
})
t.Run("quoteString with plain string", func(t *testing.T) {
assert.Equal(t, "\"test\"", quoteString("test"))
})
t.Run("quoteString with quoted string", func(t *testing.T) {
assert.Equal(t, "\"already\"", quoteString("\"already\""))
})
t.Run("quoteString with single quotes", func(t *testing.T) {
result := quoteString("it's")
assert.Equal(t, "\"it\\'s\"", result)
})
t.Run("quoteString with non-string types", func(t *testing.T) {
assert.Equal(t, 123, quoteString(123))
assert.Equal(t, true, quoteString(true))
assert.Equal(t, 3.14, quoteString(3.14))
})
t.Run("randomString", func(t *testing.T) {
rs := randomString(10)
assert.Len(t, rs, 10)
// Different calls should produce different strings
rs2 := randomString(10)
assert.NotEqual(t, rs, rs2)
// Verify it's only letters
for _, c := range rs {
assert.True(t, (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'))
}
})
t.Run("Node String with all fields", func(t *testing.T) {
n := &Node{Alias: "a", Label: "Person", Properties: map[string]any{"name": "John", "age": 30}}
s := n.String()
assert.Contains(t, s, "a:Person")
assert.Contains(t, s, "name")
assert.Contains(t, s, "age")
})
t.Run("Node String with only alias", func(t *testing.T) {
n := &Node{Alias: "x"}
s := n.String()
assert.Equal(t, "(x)", s)
})
t.Run("Node String with only label", func(t *testing.T) {
n := &Node{Label: "Type"}
s := n.String()
assert.Equal(t, "(:Type)", s)
})
t.Run("Edge String", func(t *testing.T) {
n1 := &Node{Alias: "a"}
n2 := &Node{Alias: "b"}
e := &Edge{Source: n1, Destination: n2, Relation: "KNOWS"}
s := e.String()
assert.Contains(t, s, "-[:KNOWS]->")
})
t.Run("Edge String with properties", func(t *testing.T) {
n1 := &Node{Alias: "src"}
n2 := &Node{Alias: "dst"}
e := &Edge{Source: n1, Destination: n2, Relation: "LIKES", Properties: map[string]any{"weight": 0.5}}
s := e.String()
assert.Contains(t, s, "-[:LIKES{") // Properties are inside braces
assert.Contains(t, s, "weight")
})
t.Run("Edge String without relation", func(t *testing.T) {
n1 := &Node{Alias: "a"}
n2 := &Node{Alias: "b"}
e := &Edge{Source: n1, Destination: n2}
s := e.String()
assert.Contains(t, s, "-[")
assert.Contains(t, s, "]->")
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/store/knowledge_graph_test.go | rag/store/knowledge_graph_test.go | package store
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestInMemoryKnowledgeGraph(t *testing.T) {
ctx := context.Background()
kgInterface, err := NewKnowledgeGraph("memory://")
assert.NoError(t, err)
kg := kgInterface.(*MemoryGraph)
assert.NotNil(t, kg)
t.Run("Add and Get Entity", func(t *testing.T) {
e := &rag.Entity{ID: "e1", Name: "entity1", Type: "person"}
err := kg.AddEntity(ctx, e)
assert.NoError(t, err)
res, err := kg.GetEntity(ctx, "e1")
assert.NoError(t, err)
assert.Equal(t, "entity1", res.Name)
})
t.Run("Add Relationship", func(t *testing.T) {
r := &rag.Relationship{ID: "r1", Source: "e1", Target: "e2", Type: "knows"}
err := kg.AddRelationship(ctx, r)
assert.NoError(t, err)
rel, err := kg.GetRelationship(ctx, "r1")
assert.NoError(t, err)
assert.Equal(t, "knows", rel.Type)
})
t.Run("Related Entities", func(t *testing.T) {
kg.AddEntity(ctx, &rag.Entity{ID: "e2", Name: "entity2"})
related, err := kg.GetRelatedEntities(ctx, "e1", 1)
assert.NoError(t, err)
assert.NotEmpty(t, related)
})
t.Run("Delete and Update", func(t *testing.T) {
err := kg.DeleteEntity(ctx, "e1")
assert.NoError(t, err)
_, err = kg.GetEntity(ctx, "e1")
assert.Error(t, err)
})
t.Run("Query", func(t *testing.T) {
kg.AddEntity(ctx, &rag.Entity{ID: "e3", Type: "type1"})
res, err := kg.Query(ctx, &rag.GraphQuery{EntityTypes: []string{"type1"}})
assert.NoError(t, err)
assert.NotEmpty(t, res.Entities)
})
t.Run("Query with Filters", func(t *testing.T) {
kg.AddEntity(ctx, &rag.Entity{ID: "e5", Type: "T1"})
kg.AddRelationship(ctx, &rag.Relationship{ID: "r3", Type: "R1"})
q := &rag.GraphQuery{
EntityTypes: []string{"T1"},
Relationships: []string{"R1"},
}
res, err := kg.Query(ctx, q)
assert.NoError(t, err)
assert.NotEmpty(t, res.Entities)
assert.NotEmpty(t, res.Relationships)
})
t.Run("Related Entities Bi-directional", func(t *testing.T) {
kg.AddEntity(ctx, &rag.Entity{ID: "source"})
kg.AddEntity(ctx, &rag.Entity{ID: "target"})
kg.AddRelationship(ctx, &rag.Relationship{ID: "rel", Source: "source", Target: "target"})
rel1, _ := kg.GetRelatedEntities(ctx, "source", 1)
assert.Len(t, rel1, 1)
rel2, _ := kg.GetRelatedEntities(ctx, "target", 1)
assert.Len(t, rel2, 1)
})
t.Run("Update and Delete Relationship", func(t *testing.T) {
kg.AddRelationship(ctx, &rag.Relationship{ID: "r4", Type: "orig"})
kg.UpdateRelationship(ctx, &rag.Relationship{ID: "r4", Type: "upd"})
r, _ := kg.GetRelationship(ctx, "r4")
assert.Equal(t, "upd", r.Type)
assert.NoError(t, kg.DeleteRelationship(ctx, "r4"))
_, err := kg.GetRelationship(ctx, "r4")
assert.Error(t, err)
})
t.Run("Query Rel Type", func(t *testing.T) {
kg.AddRelationship(ctx, &rag.Relationship{ID: "r5", Type: "RT1"})
res, _ := kg.Query(ctx, &rag.GraphQuery{Relationships: []string{"RT1"}})
assert.NotEmpty(t, res.Relationships)
})
t.Run("Update Entity Type", func(t *testing.T) {
kg.AddEntity(ctx, &rag.Entity{ID: "e_upd", Name: "orig", Type: "T1"})
kg.UpdateEntity(ctx, &rag.Entity{ID: "e_upd", Name: "upd", Type: "T2"})
e, _ := kg.GetEntity(ctx, "e_upd")
assert.Equal(t, "T2", e.Type)
})
t.Run("Query Limit", func(t *testing.T) {
kg.AddEntity(ctx, &rag.Entity{ID: "l1", Type: "L"})
kg.AddEntity(ctx, &rag.Entity{ID: "l2", Type: "L"})
res, _ := kg.Query(ctx, &rag.GraphQuery{EntityTypes: []string{"L"}, Limit: 1})
assert.Len(t, res.Entities, 1)
})
t.Run("Close", func(t *testing.T) {
assert.NoError(t, kg.Close())
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine/vector.go | rag/engine/vector.go | package engine
import (
"context"
"fmt"
"math"
"strings"
"time"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/splitter"
)
// vectorStoreRetrieverAdapter adapts vector store to Retriever interface
type vectorStoreRetrieverAdapter struct {
vectorStore rag.VectorStore
embedder rag.Embedder
topK int
}
// Retrieve implements Retriever interface
func (a *vectorStoreRetrieverAdapter) Retrieve(ctx context.Context, query string) ([]rag.Document, error) {
return a.RetrieveWithK(ctx, query, a.topK)
}
// RetrieveWithK implements Retriever interface
func (a *vectorStoreRetrieverAdapter) RetrieveWithK(ctx context.Context, query string, k int) ([]rag.Document, error) {
// Embed the query
queryEmbedding, err := a.embedder.EmbedDocument(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to embed query: %w", err)
}
// Search in vector store
results, err := a.vectorStore.Search(ctx, queryEmbedding, k)
if err != nil {
return nil, fmt.Errorf("vector search failed: %w", err)
}
// Extract documents from results
docs := make([]rag.Document, len(results))
for i, result := range results {
docs[i] = result.Document
}
return docs, nil
}
// RetrieveWithConfig implements Retriever interface
func (a *vectorStoreRetrieverAdapter) RetrieveWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) ([]rag.DocumentSearchResult, error) {
if config == nil {
config = &rag.RetrievalConfig{
K: a.topK,
ScoreThreshold: 0.0,
SearchType: "similarity",
IncludeScores: false,
}
}
// Embed the query
queryEmbedding, err := a.embedder.EmbedDocument(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to embed query: %w", err)
}
// Perform search
var results []rag.DocumentSearchResult
if len(config.Filter) > 0 {
results, err = a.vectorStore.SearchWithFilter(ctx, queryEmbedding, config.K, config.Filter)
} else {
results, err = a.vectorStore.Search(ctx, queryEmbedding, config.K)
}
if err != nil {
return nil, fmt.Errorf("vector search failed: %w", err)
}
// Filter by score threshold if specified
if config.ScoreThreshold > 0 {
filtered := make([]rag.DocumentSearchResult, 0)
for _, result := range results {
if result.Score >= config.ScoreThreshold {
filtered = append(filtered, result)
}
}
results = filtered
}
return results, nil
}
// NewVectorStoreRetriever creates a vector store retriever
func NewVectorStoreRetriever(vectorStore rag.VectorStore, embedder rag.Embedder, topK int) rag.Retriever {
return &vectorStoreRetrieverAdapter{
vectorStore: vectorStore,
embedder: embedder,
topK: topK,
}
}
// VectorRAGEngine implements traditional vector-based RAG
type VectorRAGEngine struct {
vectorStore rag.VectorStore
embedder rag.Embedder
llm rag.LLMInterface
config rag.VectorRAGConfig
baseEngine *rag.BaseEngine
metrics *rag.Metrics
}
// NewVectorRAGEngine creates a new vector RAG engine
func NewVectorRAGEngine(llm rag.LLMInterface, embedder rag.Embedder, vectorStore rag.VectorStore, k int) (*VectorRAGEngine, error) {
config := rag.VectorRAGConfig{
ChunkSize: 1000,
ChunkOverlap: 200,
RetrieverConfig: rag.RetrievalConfig{
K: k,
ScoreThreshold: 0.5,
SearchType: "similarity",
},
}
return NewVectorRAGEngineWithConfig(llm, embedder, vectorStore, config)
}
// NewVectorRAGEngineWithConfig creates a new vector RAG engine with custom configuration
func NewVectorRAGEngineWithConfig(llm rag.LLMInterface, embedder rag.Embedder, vectorStore rag.VectorStore, config rag.VectorRAGConfig) (*VectorRAGEngine, error) {
if vectorStore == nil {
return nil, fmt.Errorf("vector store is required")
}
if embedder == nil {
return nil, fmt.Errorf("embedder is required")
}
// Set defaults
if config.ChunkSize == 0 {
config.ChunkSize = 1000
}
if config.ChunkOverlap == 0 {
config.ChunkOverlap = 200
}
if config.RetrieverConfig.K == 0 {
config.RetrieverConfig.K = 4
}
// Create a simple retriever adapter directly
retriever := &vectorStoreRetrieverAdapter{
vectorStore: vectorStore,
embedder: embedder,
topK: config.RetrieverConfig.K,
}
baseEngine := rag.NewBaseEngine(retriever, embedder, &rag.Config{
VectorRAG: &config,
})
return &VectorRAGEngine{
vectorStore: vectorStore,
embedder: embedder,
llm: llm,
config: config,
baseEngine: baseEngine,
metrics: &rag.Metrics{},
}, nil
}
// Query performs a vector RAG query
func (v *VectorRAGEngine) Query(ctx context.Context, query string) (*rag.QueryResult, error) {
startTime := time.Now()
// Perform similarity search
searchResults, err := v.SimilaritySearchWithScores(ctx, query, v.config.RetrieverConfig.K)
if err != nil {
return nil, fmt.Errorf("vector search failed: %w", err)
}
// Apply reranking if enabled
if v.config.EnableReranking && v.config.RetrieverConfig.SearchType != "mmr" {
searchResults2, err := v.rerankResults(ctx, query, searchResults)
if err != nil {
// Continue without reranking if it fails
// searchResults = searchResults // Use original results
} else {
searchResults = searchResults2
}
}
if len(searchResults) == 0 {
return &rag.QueryResult{
Query: query,
Answer: "No relevant information found.",
Sources: []rag.Document{},
Context: "",
Confidence: 0.0,
Metadata: map[string]any{
"engine_type": "vector_rag",
"search_type": v.config.RetrieverConfig.SearchType,
},
},
nil
}
// Extract documents from search results
docs := make([]rag.Document, len(searchResults))
for i, result := range searchResults {
docs[i] = result.Document
}
// Build context from retrieved documents
contextStr := v.buildContext(searchResults)
// Calculate confidence based on search scores
confidence := v.calculateConfidence(searchResults)
responseTime := time.Since(startTime)
return &rag.QueryResult{
Query: query,
Sources: docs,
Context: contextStr,
Confidence: confidence,
ResponseTime: responseTime,
Metadata: map[string]any{
"engine_type": "vector_rag",
"search_type": v.config.RetrieverConfig.SearchType,
"num_results": len(searchResults),
"avg_score": confidence,
"reranking_used": v.config.EnableReranking,
},
}, nil
}
// QueryWithConfig performs a vector RAG query with custom configuration
func (v *VectorRAGEngine) QueryWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) (*rag.QueryResult, error) {
if config == nil {
config = &v.config.RetrieverConfig
}
startTime := time.Now()
// Perform similarity search with custom config
searchResults, err := v.vectorStore.SearchWithFilter(
ctx,
v.embedQuery(ctx, query),
config.K,
config.Filter,
)
if err != nil {
return nil, fmt.Errorf("vector search failed: %w", err)
}
// Filter by score threshold
filteredResults := make([]rag.DocumentSearchResult, 0)
for _, result := range searchResults {
if result.Score >= config.ScoreThreshold {
filteredResults = append(filteredResults, result)
}
}
// Apply different search strategies
switch config.SearchType {
case "mmr":
filteredResults = v.applyMMR(filteredResults, config.K)
case "hybrid":
// Hybrid search would combine vector and keyword search
// For now, fall back to similarity search
}
// Apply reranking if enabled
if v.config.EnableReranking {
filteredResults, _ = v.rerankResults(ctx, query, filteredResults)
}
if len(filteredResults) == 0 {
return &rag.QueryResult{
Query: query,
Answer: "No relevant information found.",
Sources: []rag.Document{},
Context: "",
Confidence: 0.0,
},
nil
}
// Extract documents from search results
docs := make([]rag.Document, len(filteredResults))
for i, result := range filteredResults {
docs[i] = result.Document
}
// Build context from retrieved documents
contextStr := v.buildContext(filteredResults)
// Calculate confidence based on search scores
confidence := v.calculateConfidence(filteredResults)
responseTime := time.Since(startTime)
return &rag.QueryResult{
Query: query,
Sources: docs,
Context: contextStr,
Confidence: confidence,
ResponseTime: responseTime,
Metadata: map[string]any{
"engine_type": "vector_rag",
"search_type": config.SearchType,
"num_results": len(filteredResults),
"avg_score": confidence,
"reranking_used": v.config.EnableReranking,
"score_threshold": config.ScoreThreshold,
"filters_applied": config.Filter != nil,
},
}, nil
}
// AddDocuments adds documents to the vector store
func (v *VectorRAGEngine) AddDocuments(ctx context.Context, docs []rag.Document) error {
startTime := time.Now()
// Process documents: split into chunks if needed
processedDocs := make([]rag.Document, 0)
splitter := splitter.NewSimpleTextSplitter(v.config.ChunkSize, v.config.ChunkOverlap)
for _, doc := range docs {
// Split document into chunks
chunks := splitter.SplitDocuments([]rag.Document{doc})
processedDocs = append(processedDocs, chunks...)
}
// Generate embeddings for documents
for i := range processedDocs {
embedding, err := v.embedder.EmbedDocument(ctx, processedDocs[i].Content)
if err != nil {
return fmt.Errorf("failed to embed document %s: %w", processedDocs[i].ID, err)
}
processedDocs[i].Embedding = embedding
}
// Add documents to vector store
if err := v.vectorStore.Add(ctx, processedDocs); err != nil {
return fmt.Errorf("failed to add documents to vector store: %w", err)
}
// Update metrics
v.metrics.IndexingLatency = time.Since(startTime)
v.metrics.TotalDocuments += int64(len(docs))
return nil
}
// DeleteDocument removes documents from the vector store
func (v *VectorRAGEngine) DeleteDocument(ctx context.Context, docID string) error {
return v.vectorStore.Delete(ctx, []string{docID})
}
// UpdateDocument updates documents in the vector store
func (v *VectorRAGEngine) UpdateDocument(ctx context.Context, doc rag.Document) error {
// Generate new embedding for the document
embedding, err := v.embedder.EmbedDocument(ctx, doc.Content)
if err != nil {
return fmt.Errorf("failed to embed document %s: %w", doc.ID, err)
}
doc.Embedding = embedding
return v.vectorStore.Update(ctx, []rag.Document{doc})
}
// SimilaritySearch performs similarity search without generation
func (v *VectorRAGEngine) SimilaritySearch(ctx context.Context, query string, k int) ([]rag.Document, error) {
searchResults, err := v.SimilaritySearchWithScores(ctx, query, k)
if err != nil {
return nil, err
}
docs := make([]rag.Document, len(searchResults))
for i, result := range searchResults {
docs[i] = result.Document
}
return docs, nil
}
// SimilaritySearchWithScores performs similarity search with scores
func (v *VectorRAGEngine) SimilaritySearchWithScores(ctx context.Context, query string, k int) ([]rag.DocumentSearchResult, error) {
queryEmbedding := v.embedQuery(ctx, query)
return v.vectorStore.Search(ctx, queryEmbedding, k)
}
// embedQuery embeds a query using the configured embedder
func (v *VectorRAGEngine) embedQuery(ctx context.Context, query string) []float32 {
embedding, err := v.embedder.EmbedDocument(ctx, query)
if err != nil {
// Return empty embedding if embedding fails
return make([]float32, v.embedder.GetDimension())
}
return embedding
}
// buildContext builds context string from search results
func (v *VectorRAGEngine) buildContext(results []rag.DocumentSearchResult) string {
if len(results) == 0 {
return ""
}
var contextStr strings.Builder
for i, result := range results {
doc := result.Document
contextStr.WriteString(fmt.Sprintf("Document %d (Score: %.4f):\n", i+1, result.Score))
// Add key metadata if available
if doc.Metadata != nil {
if title, ok := doc.Metadata["title"]; ok {
contextStr.WriteString(fmt.Sprintf("Title: %v\n", title))
}
if source, ok := doc.Metadata["source"]; ok {
contextStr.WriteString(fmt.Sprintf("Source: %v\n", source))
}
if url, ok := doc.Metadata["url"]; ok {
contextStr.WriteString(fmt.Sprintf("URL: %v\n", url))
}
}
contextStr.WriteString(fmt.Sprintf("Content: %s\n\n", doc.Content))
}
return contextStr.String()
}
// calculateConfidence calculates average confidence from search results
func (v *VectorRAGEngine) calculateConfidence(results []rag.DocumentSearchResult) float64 {
if len(results) == 0 {
return 0.0
}
totalScore := 0.0
for _, result := range results {
totalScore += result.Score
}
return totalScore / float64(len(results))
}
// rerankResults reranks search results using the configured reranker
func (v *VectorRAGEngine) rerankResults(ctx context.Context, query string, results []rag.DocumentSearchResult) ([]rag.DocumentSearchResult, error) {
// This is a placeholder for reranking
// In a real implementation, this would use a reranking model or algorithm
return results, nil
}
// applyMMR applies Maximal Marginal Relevance to search results
func (v *VectorRAGEngine) applyMMR(results []rag.DocumentSearchResult, k int) []rag.DocumentSearchResult {
if len(results) <= k {
return results
}
// Simple MMR implementation
selected := make([]rag.DocumentSearchResult, 0)
selected = append(selected, results[0]) // Always select the highest scoring result
candidates := results[1:]
for len(selected) < k && len(candidates) > 0 {
// Find the candidate with highest MMR score
bestIdx := 0
bestScore := 0.0
for i, candidate := range candidates {
// Calculate relevance score
relevance := candidate.Score
// Calculate maximal similarity to already selected documents
maxSimilarity := 0.0
for _, selectedDoc := range selected {
similarity := v.calculateSimilarity(candidate.Document, selectedDoc.Document)
if similarity > maxSimilarity {
maxSimilarity = similarity
}
}
// MMR score: ฮป * relevance - (1-ฮป) * maxSimilarity
lambda := 0.5 // Balance between relevance and diversity
mmrScore := lambda*relevance - (1-lambda)*maxSimilarity
if mmrScore > bestScore {
bestScore = mmrScore
bestIdx = i
}
}
// Add the best candidate to selected results
selected = append(selected, candidates[bestIdx])
// Remove from candidates
candidates = append(candidates[:bestIdx], candidates[bestIdx+1:]...)
}
return selected
}
// calculateSimilarity calculates similarity between two documents
func (v *VectorRAGEngine) calculateSimilarity(doc1, doc2 rag.Document) float64 {
// Simple cosine similarity if embeddings are available
if len(doc1.Embedding) > 0 && len(doc2.Embedding) > 0 {
return cosineSimilarity(doc1.Embedding, doc2.Embedding)
}
// Fallback to Jaccard similarity on content
return jaccardSimilarity(doc1.Content, doc2.Content)
}
// cosineSimilarity calculates cosine similarity between two embeddings
func cosineSimilarity(a, b []float32) float64 {
if len(a) != len(b) {
return 0.0
}
var dotProduct, normA, normB float32
for i := range a {
dotProduct += a[i] * b[i]
normA += a[i] * a[i]
normB += b[i] * b[i]
}
if normA == 0 || normB == 0 {
return 0.0
}
return float64(dotProduct / (float32(math.Sqrt(float64(normA))) * float32(math.Sqrt(float64(normB)))))
}
// jaccardSimilarity calculates Jaccard similarity between two texts
func jaccardSimilarity(a, b string) float64 {
setA := make(map[string]bool)
setB := make(map[string]bool)
// Create sets of words
wordsA := strings.Fields(strings.ToLower(a))
wordsB := strings.Fields(strings.ToLower(b))
for _, word := range wordsA {
setA[word] = true
}
for _, word := range wordsB {
setB[word] = true
}
// Calculate Jaccard similarity
intersection := 0
for word := range setA {
if setB[word] {
intersection++
}
}
union := len(setA) + len(setB) - intersection
if union == 0 {
return 1.0
}
return float64(intersection) / float64(union)
}
// GetVectorStore returns the underlying vector store for advanced operations
func (v *VectorRAGEngine) GetVectorStore() rag.VectorStore {
return v.vectorStore
}
// GetMetrics returns the current metrics
func (v *VectorRAGEngine) GetMetrics() *rag.Metrics {
return v.metrics
}
// GetStats returns vector store statistics
func (v *VectorRAGEngine) GetStats(ctx context.Context) (*rag.VectorStoreStats, error) {
return v.vectorStore.GetStats(ctx)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine/graph.go | rag/engine/graph.go | package engine
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"unicode"
"github.com/smallnest/langgraphgo/rag"
)
// GraphRAGEngine implements GraphRAG functionality with knowledge graphs
type GraphRAGEngine struct {
config rag.GraphRAGConfig
knowledgeGraph rag.KnowledgeGraph
embedder rag.Embedder
llm rag.LLMInterface
baseEngine *rag.BaseEngine
metrics *rag.Metrics
}
// NewGraphRAGEngine creates a new GraphRAG engine
func NewGraphRAGEngine(config rag.GraphRAGConfig, llm rag.LLMInterface, embedder rag.Embedder, kg rag.KnowledgeGraph) (*GraphRAGEngine, error) {
if kg == nil {
return nil, fmt.Errorf("knowledge graph is required")
}
// Set default extraction prompt if not provided
if config.ExtractionPrompt == "" {
config.ExtractionPrompt = DefaultExtractionPrompt
}
// Set default entity types if not provided
if len(config.EntityTypes) == 0 {
config.EntityTypes = DefaultEntityTypes
}
// Set default max depth if not provided
if config.MaxDepth == 0 {
config.MaxDepth = 3
}
baseEngine := rag.NewBaseEngine(nil, embedder, &rag.Config{
GraphRAG: &config,
})
return &GraphRAGEngine{
config: config,
knowledgeGraph: kg,
embedder: embedder,
llm: llm,
baseEngine: baseEngine,
metrics: &rag.Metrics{},
}, nil
}
// Query performs a GraphRAG query
func (g *GraphRAGEngine) Query(ctx context.Context, query string) (*rag.QueryResult, error) {
return g.QueryWithConfig(ctx, query, &rag.RetrievalConfig{
K: 5,
ScoreThreshold: 0.3,
SearchType: "graph",
IncludeScores: true,
})
}
// QueryWithConfig performs a GraphRAG query with custom configuration
func (g *GraphRAGEngine) QueryWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) (*rag.QueryResult, error) {
startTime := time.Now()
// Extract entities from the query
queryEntities, err := g.extractEntities(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to extract entities from query: %w", err)
}
// Build graph query
graphQuery := rag.GraphQuery{
Limit: config.K,
Filters: config.Filter,
}
// Add extracted entities to the query
if len(queryEntities) > 0 {
graphQuery.EntityTypes = []string{queryEntities[0].Type}
}
// Perform graph search
graphResult, err := g.knowledgeGraph.Query(ctx, &graphQuery)
if err != nil {
return nil, fmt.Errorf("failed to perform graph search: %w", err)
}
// Convert graph results to documents
docs := g.graphResultsToDocuments(graphResult)
// If no entities were found, fall back to entity search
if len(docs) == 0 && len(queryEntities) > 0 {
docs, err = g.entityBasedSearch(ctx, queryEntities, config.K)
if err != nil {
return nil, fmt.Errorf("failed entity-based search: %w", err)
}
}
// Build context from graph results
contextStr := g.buildGraphContext(graphResult, queryEntities)
// Calculate confidence based on entity matches and relationships
confidence := g.calculateGraphConfidence(graphResult, queryEntities)
responseTime := time.Since(startTime)
return &rag.QueryResult{
Query: query,
Sources: docs,
Context: contextStr,
Confidence: confidence,
ResponseTime: responseTime,
Metadata: map[string]any{
"engine_type": "graph_rag",
"entities_found": len(graphResult.Entities),
"relationships": len(graphResult.Relationships),
"paths_found": len(graphResult.Paths),
"graph_query": graphQuery,
"extraction_time": responseTime,
},
}, nil
}
// AddDocuments adds documents to the knowledge graph
func (g *GraphRAGEngine) AddDocuments(ctx context.Context, docs []rag.Document) error {
startTime := time.Now()
for _, doc := range docs {
// Extract entities from the document
entities, err := g.extractEntities(ctx, doc.Content)
if err != nil {
return fmt.Errorf("failed to extract entities from document %s: %w", doc.ID, err)
}
// Extract relationships between entities
relationships, err := g.extractRelationships(ctx, doc.Content, entities)
if err != nil {
return fmt.Errorf("failed to extract relationships from document %s: %w", doc.ID, err)
}
// Add entities to the knowledge graph
for _, entity := range entities {
if err := g.knowledgeGraph.AddEntity(ctx, entity); err != nil {
return fmt.Errorf("failed to add entity %s: %w", entity.ID, err)
}
}
// Add relationships to the knowledge graph
for _, rel := range relationships {
if err := g.knowledgeGraph.AddRelationship(ctx, rel); err != nil {
return fmt.Errorf("failed to add relationship %s: %w", rel.ID, err)
}
}
}
g.metrics.IndexingLatency = time.Since(startTime)
g.metrics.TotalDocuments += int64(len(docs))
return nil
}
// DeleteDocument removes entities and relationships associated with a document
func (g *GraphRAGEngine) DeleteDocument(ctx context.Context, docID string) error {
// This would require tracking which entities/relationships belong to which documents
// For now, this is a placeholder implementation
return fmt.Errorf("document deletion not implemented for GraphRAG engine")
}
// UpdateDocument updates a document in the knowledge graph
func (g *GraphRAGEngine) UpdateDocument(ctx context.Context, doc rag.Document) error {
// Delete old entities and relationships, then add new ones
if err := g.DeleteDocument(ctx, doc.ID); err != nil {
return err
}
return g.AddDocuments(ctx, []rag.Document{doc})
}
// SimilaritySearch performs entity-based similarity search
func (g *GraphRAGEngine) SimilaritySearch(ctx context.Context, query string, k int) ([]rag.Document, error) {
queryEntities, err := g.extractEntities(ctx, query)
if err != nil {
return nil, err
}
return g.entityBasedSearch(ctx, queryEntities, k)
}
// SimilaritySearchWithScores performs entity-based similarity search with scores
func (g *GraphRAGEngine) SimilaritySearchWithScores(ctx context.Context, query string, k int) ([]rag.DocumentSearchResult, error) {
docs, err := g.SimilaritySearch(ctx, query, k)
if err != nil {
return nil, err
}
results := make([]rag.DocumentSearchResult, len(docs))
for i, doc := range docs {
results[i] = rag.DocumentSearchResult{
Document: doc,
Score: 1.0, // GraphRAG doesn't provide traditional similarity scores
}
}
return results, nil
}
// extractEntities extracts entities from text using the LLM
func (g *GraphRAGEngine) extractEntities(ctx context.Context, text string) ([]*rag.Entity, error) {
prompt := fmt.Sprintf(g.config.ExtractionPrompt, text, strings.Join(g.config.EntityTypes, ", "))
response, err := g.llm.Generate(ctx, prompt)
if err != nil {
return nil, err
}
var extractionResult EntityExtractionResult
if err := json.Unmarshal([]byte(response), &extractionResult); err != nil {
// Try to extract entities manually if JSON parsing fails
return g.manualEntityExtraction(ctx, text), nil
}
// Convert extracted entities to Entity structs
entities := make([]*rag.Entity, len(extractionResult.Entities))
for i, extracted := range extractionResult.Entities {
entity := &rag.Entity{
ID: extracted.Name,
Type: extracted.Type,
Name: extracted.Name,
Properties: extracted.Properties,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
entities[i] = entity
}
return entities, nil
}
// extractRelationships extracts relationships between entities using the LLM
func (g *GraphRAGEngine) extractRelationships(ctx context.Context, text string, entities []*rag.Entity) ([]*rag.Relationship, error) {
if len(entities) < 2 {
return nil, nil
}
// Create a prompt for relationship extraction
entityList := make([]string, len(entities))
for i, entity := range entities {
entityList[i] = fmt.Sprintf("%s (%s)", entity.Name, entity.Type)
}
prompt := fmt.Sprintf(RelationshipExtractionPrompt, text, strings.Join(entityList, ", "))
response, err := g.llm.Generate(ctx, prompt)
if err != nil {
return nil, err
}
var extractionResult RelationshipExtractionResult
if err := json.Unmarshal([]byte(response), &extractionResult); err != nil {
return g.manualRelationshipExtraction(ctx, text, entities), nil
}
// Convert extracted relationships to Relationship structs
relationships := make([]*rag.Relationship, len(extractionResult.Relationships))
for i, extracted := range extractionResult.Relationships {
relationships[i] = &rag.Relationship{
ID: fmt.Sprintf("%s_%s_%s", extracted.Source, extracted.Type, extracted.Target),
Source: extracted.Source,
Target: extracted.Target,
Type: extracted.Type,
Properties: extracted.Properties,
CreatedAt: time.Now(),
}
}
return relationships, nil
}
// entityBasedSearch performs search based on entities
func (g *GraphRAGEngine) entityBasedSearch(ctx context.Context, entities []*rag.Entity, k int) ([]rag.Document, error) {
if len(entities) == 0 {
return []rag.Document{}, nil
}
// Use the first entity as the starting point for graph traversal
relatedEntities, err := g.knowledgeGraph.GetRelatedEntities(ctx, entities[0].ID, 1)
if err != nil {
return nil, err
}
// Convert related entities to documents
docs := make([]rag.Document, 0, len(relatedEntities))
count := 0
for _, entity := range relatedEntities {
if count >= k {
break
}
// Create a document from the entity
content := fmt.Sprintf("Entity: %s\nType: %s\nDescription: %v",
entity.Name, entity.Type, entity.Properties["description"])
doc := rag.Document{
ID: entity.ID,
Content: content,
Metadata: map[string]any{
"entity_type": entity.Type,
"properties": entity.Properties,
"source": "knowledge_graph",
},
CreatedAt: entity.CreatedAt,
UpdatedAt: entity.UpdatedAt,
}
docs = append(docs, doc)
count++
}
return docs, nil
}
// graphResultsToDocuments converts graph query results to documents
func (g *GraphRAGEngine) graphResultsToDocuments(result *rag.GraphQueryResult) []rag.Document {
docs := make([]rag.Document, 0, len(result.Entities))
for _, entity := range result.Entities {
content := fmt.Sprintf("Entity: %s\nType: %s\n", entity.Name, entity.Type)
if entity.Properties != nil {
if desc, ok := entity.Properties["description"]; ok {
content += fmt.Sprintf("Description: %v\n", desc)
}
}
doc := rag.Document{
ID: entity.ID,
Content: content,
Metadata: map[string]any{
"entity_type": entity.Type,
"properties": entity.Properties,
"source": "knowledge_graph",
},
CreatedAt: entity.CreatedAt,
UpdatedAt: entity.UpdatedAt,
}
docs = append(docs, doc)
}
return docs
}
// buildGraphContext builds context string from graph results
func (g *GraphRAGEngine) buildGraphContext(result *rag.GraphQueryResult, queryEntities []*rag.Entity) string {
if len(result.Entities) == 0 {
return "No relevant entities found in the knowledge graph."
}
var contextStr strings.Builder
contextStr.WriteString("Knowledge Graph Information:\n\n")
// Add entities
contextStr.WriteString("Relevant Entities:\n")
for _, entity := range result.Entities {
contextStr.WriteString(fmt.Sprintf("- %s (%s): %v\n", entity.Name, entity.Type, entity.Properties))
}
// Add relationships
if len(result.Relationships) > 0 {
contextStr.WriteString("\nRelationships:\n")
for _, rel := range result.Relationships {
contextStr.WriteString(fmt.Sprintf("- %s -> %s (%s)\n",
rel.Source, rel.Target, rel.Type))
}
}
// Add paths
if len(result.Paths) > 0 {
contextStr.WriteString("\nEntity Paths:\n")
for i, path := range result.Paths {
pathStr := make([]string, len(path))
for j, entity := range path {
pathStr[j] = fmt.Sprintf("%s(%s)", entity.Name, entity.Type)
}
contextStr.WriteString(fmt.Sprintf("Path %d: %s\n", i+1, strings.Join(pathStr, " -> ")))
}
}
return contextStr.String()
}
// calculateGraphConfidence calculates confidence based on graph results
func (g *GraphRAGEngine) calculateGraphConfidence(result *rag.GraphQueryResult, queryEntities []*rag.Entity) float64 {
if len(result.Entities) == 0 {
return 0.0
}
// Base confidence from number of entities found
entityConfidence := float64(len(result.Entities)) / 10.0
if entityConfidence > 1.0 {
entityConfidence = 1.0
}
// Boost confidence if query entities were matched
if len(queryEntities) > 0 {
matchedEntities := 0
for _, queryEntity := range queryEntities {
for _, foundEntity := range result.Entities {
if queryEntity.ID == foundEntity.ID || queryEntity.Name == foundEntity.Name {
matchedEntities++
break
}
}
}
entityConfidence += float64(matchedEntities) / float64(len(queryEntities)) * 0.3
}
// Consider relationships count (since store.Relationship doesn't have confidence)
relConfidence := 0.0
if len(result.Relationships) > 0 {
relConfidence = float64(len(result.Relationships)) * 0.1 // Give some weight for having relationships
}
totalConfidence := entityConfidence + relConfidence
if totalConfidence > 1.0 {
totalConfidence = 1.0
}
return totalConfidence
}
// manualEntityExtraction provides a fallback for entity extraction
func (g *GraphRAGEngine) manualEntityExtraction(ctx context.Context, text string) []*rag.Entity {
// Simple keyword-based entity extraction as fallback
// In a real implementation, this would use more sophisticated NLP
entities := make([]*rag.Entity, 0)
// Look for capitalized words (potential entities)
words := strings.FieldsSeq(text)
for word := range words {
if len(word) > 2 && unicode.IsUpper(rune(word[0])) {
entity := &rag.Entity{
ID: word,
Type: "UNKNOWN",
Name: word,
Properties: map[string]any{
"description": fmt.Sprintf("Entity extracted from text: %s", word),
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
entities = append(entities, entity)
}
}
return entities
}
// manualRelationshipExtraction provides a fallback for relationship extraction
func (g *GraphRAGEngine) manualRelationshipExtraction(ctx context.Context, text string, entities []*rag.Entity) []*rag.Relationship {
// Simple co-occurrence based relationship extraction as fallback
relationships := make([]*rag.Relationship, 0)
// If entities appear close together in text, assume a relationship
for i, entity1 := range entities {
for j, entity2 := range entities {
if i >= j {
continue
}
relationship := &rag.Relationship{
ID: fmt.Sprintf("%s_related_to_%s", entity1.ID, entity2.ID),
Source: entity1.ID,
Target: entity2.ID,
Type: "RELATED_TO",
Properties: map[string]any{},
CreatedAt: time.Now(),
}
relationships = append(relationships, relationship)
}
}
return relationships
}
// GetKnowledgeGraph returns the underlying knowledge graph for advanced operations
func (g *GraphRAGEngine) GetKnowledgeGraph() rag.KnowledgeGraph {
return g.knowledgeGraph
}
// GetMetrics returns the current metrics
func (g *GraphRAGEngine) GetMetrics() *rag.Metrics {
return g.metrics
}
// Constants for default prompts and entity types
const (
DefaultExtractionPrompt = `
Extract entities from the following text. Focus on these entity types: %s.
Return a JSON response with this structure:
{
"entities": [
{
"name": "entity_name",
"type": "entity_type",
"description": "brief description",
"properties": {}
}
]
}
Text: %s
`
RelationshipExtractionPrompt = `
Extract relationships between the following entities from this text.
Consider relationships like: works_with, located_in, created_by, part_of, related_to, etc.
Return a JSON response with this structure:
{
"relationships": [
{
"source": "entity1_name",
"target": "entity2_name",
"type": "relationship_type",
"properties": {},
"confidence": 0.9
}
]
}
Text: %s
Entities: %s
`
)
// DefaultEntityTypes contains commonly used entity types
var DefaultEntityTypes = []string{
"PERSON",
"ORGANIZATION",
"LOCATION",
"DATE",
"PRODUCT",
"EVENT",
"CONCEPT",
"TECHNOLOGY",
}
// Supporting structs for JSON parsing
type EntityExtractionResult struct {
Entities []ExtractedEntity `json:"entities"`
}
type ExtractedEntity struct {
Name string `json:"name"`
Type string `json:"type"`
Description string `json:"description"`
Properties map[string]any `json:"properties"`
}
type RelationshipExtractionResult struct {
Relationships []ExtractedRelationship `json:"relationships"`
}
type ExtractedRelationship struct {
Source string `json:"source"`
Target string `json:"target"`
Type string `json:"type"`
Properties map[string]any `json:"properties"`
Confidence float64 `json:"confidence"`
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine/common_test.go | rag/engine/common_test.go | package engine
import (
"context"
"github.com/smallnest/langgraphgo/rag"
)
type mockRetriever struct {
docs []rag.Document
}
func (m *mockRetriever) Retrieve(ctx context.Context, query string) ([]rag.Document, error) {
return m.docs, nil
}
func (m *mockRetriever) RetrieveWithK(ctx context.Context, query string, k int) ([]rag.Document, error) {
return m.docs, nil
}
func (m *mockRetriever) RetrieveWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) ([]rag.DocumentSearchResult, error) {
res := make([]rag.DocumentSearchResult, len(m.docs))
for i, d := range m.docs {
res[i] = rag.DocumentSearchResult{Document: d, Score: 0.9}
}
return res, nil
}
type mockEmbedder struct{}
func (m *mockEmbedder) EmbedDocument(ctx context.Context, text string) ([]float32, error) {
return []float32{0.1}, nil
}
func (m *mockEmbedder) EmbedDocuments(ctx context.Context, texts []string) ([][]float32, error) {
return [][]float32{{0.1}}, nil
}
func (m *mockEmbedder) GetDimension() int { return 1 }
type mockLLM struct{}
func (m *mockLLM) Generate(ctx context.Context, prompt string) (string, error) {
return `{"entities": [{"name": "e1", "type": "person"}]}`, nil
}
func (m *mockLLM) GenerateWithConfig(ctx context.Context, prompt string, config map[string]any) (string, error) {
return `{"entities": [{"name": "e1", "type": "person"}]}`, nil
}
func (m *mockLLM) GenerateWithSystem(ctx context.Context, system, prompt string) (string, error) {
return `{"entities": [{"name": "e1", "type": "person"}]}`, nil
}
type mockVectorStore struct {
docs []rag.Document
}
func (m *mockVectorStore) Add(ctx context.Context, docs []rag.Document) error { return nil }
func (m *mockVectorStore) Search(ctx context.Context, q []float32, k int) ([]rag.DocumentSearchResult, error) {
res := make([]rag.DocumentSearchResult, len(m.docs))
for i, d := range m.docs {
res[i] = rag.DocumentSearchResult{Document: d, Score: 0.9}
}
return res, nil
}
func (m *mockVectorStore) SearchWithFilter(ctx context.Context, q []float32, k int, f map[string]any) ([]rag.DocumentSearchResult, error) {
return m.Search(ctx, q, k)
}
func (m *mockVectorStore) Delete(ctx context.Context, ids []string) error { return nil }
func (m *mockVectorStore) Update(ctx context.Context, docs []rag.Document) error { return nil }
func (m *mockVectorStore) GetStats(ctx context.Context) (*rag.VectorStoreStats, error) {
return &rag.VectorStoreStats{}, nil
}
func (m *mockVectorStore) Close() error { return nil }
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine/graph_test.go | rag/engine/graph_test.go | package engine
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
type mockKG struct {
entities []*rag.Entity
}
func (m *mockKG) Query(ctx context.Context, q *rag.GraphQuery) (*rag.GraphQueryResult, error) {
return &rag.GraphQueryResult{Entities: m.entities}, nil
}
func (m *mockKG) AddEntity(ctx context.Context, e *rag.Entity) error { return nil }
func (m *mockKG) AddRelationship(ctx context.Context, r *rag.Relationship) error { return nil }
func (m *mockKG) GetRelatedEntities(ctx context.Context, id string, d int) ([]*rag.Entity, error) {
return m.entities, nil
}
func (m *mockKG) GetEntity(ctx context.Context, id string) (*rag.Entity, error) {
if len(m.entities) > 0 && m.entities[0].ID == id {
return m.entities[0], nil
}
return nil, nil
}
func TestGraphRAGEngine(t *testing.T) {
ctx := context.Background()
llm := &mockLLM{}
kg := &mockKG{entities: []*rag.Entity{{ID: "e1", Name: "e1", Type: "person"}}}
embedder := &mockEmbedder{}
e, err := NewGraphRAGEngine(rag.GraphRAGConfig{}, llm, embedder, kg)
assert.NoError(t, err)
assert.NotNil(t, e)
t.Run("Query", func(t *testing.T) {
res, err := e.Query(ctx, "e1")
assert.NoError(t, err)
assert.NotNil(t, res)
})
t.Run("SimilaritySearch", func(t *testing.T) {
docs, err := e.SimilaritySearch(ctx, "e1", 1)
assert.NoError(t, err)
assert.NotEmpty(t, docs)
})
t.Run("AddDocuments", func(t *testing.T) {
docs := []rag.Document{{ID: "d1", Content: "e1 knows e2"}}
err := e.AddDocuments(ctx, docs)
assert.NoError(t, err)
})
t.Run("Context and Confidence", func(t *testing.T) {
qr := &rag.GraphQueryResult{
Entities: []*rag.Entity{{Name: "e1", Type: "p"}},
Relationships: []*rag.Relationship{{Source: "e1", Target: "e2", Type: "k"}},
}
ctxStr := e.buildGraphContext(qr, nil)
assert.NotEmpty(t, ctxStr)
conf := e.calculateGraphConfidence(qr, nil)
assert.Greater(t, conf, 0.0)
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine/lightrag_test.go | rag/engine/lightrag_test.go | package engine
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/store"
)
// MockLLM implements the LLMInterface for testing
type MockLLM struct{}
func (m *MockLLM) Generate(ctx context.Context, prompt string) (string, error) {
return `{
"entities": [
{
"id": "test_entity_1",
"name": "Test Entity",
"type": "TEST",
"description": "A test entity",
"properties": {}
}
]
}`, nil
}
func (m *MockLLM) GenerateWithConfig(ctx context.Context, prompt string, config map[string]any) (string, error) {
return m.Generate(ctx, prompt)
}
func (m *MockLLM) GenerateWithSystem(ctx context.Context, system, prompt string) (string, error) {
return m.Generate(ctx, prompt)
}
func TestNewLightRAGEngine(t *testing.T) {
_ = context.Background()
config := rag.LightRAGConfig{
Mode: "hybrid",
ChunkSize: 512,
ChunkOverlap: 50,
MaxEntitiesPerChunk: 20,
EntityExtractionThreshold: 0.5,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
// Test without vector store
engine, err := NewLightRAGEngine(config, llm, embedder, kg, nil)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
if engine == nil {
t.Fatal("Expected non-nil engine")
}
// Test default values
if engine.config.Mode != "hybrid" {
t.Errorf("Expected mode 'hybrid', got '%s'", engine.config.Mode)
}
if engine.config.ChunkSize != 512 {
t.Errorf("Expected chunk size 512, got %d", engine.config.ChunkSize)
}
}
func TestLightRAGEngine_NaiveRetrieval(t *testing.T) {
ctx := context.Background()
config := rag.LightRAGConfig{
Mode: "naive",
ChunkSize: 512,
ChunkOverlap: 50,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
vectorStore := store.NewInMemoryVectorStore(embedder)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
engine, err := NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
// Add test documents
docs := []rag.Document{
{
ID: "doc1",
Content: "This is a test document about artificial intelligence and machine learning.",
Metadata: map[string]any{
"source": "test.txt",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
{
ID: "doc2",
Content: "This document discusses neural networks and deep learning algorithms.",
Metadata: map[string]any{
"source": "test2.txt",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
err = engine.AddDocuments(ctx, docs)
if err != nil {
t.Fatalf("Failed to add documents: %v", err)
}
// Test naive retrieval
result, err := engine.Query(ctx, "artificial intelligence")
if err != nil {
t.Fatalf("Failed to query: %v", err)
}
if result == nil {
t.Fatal("Expected non-nil result")
}
if result.Query != "artificial intelligence" {
t.Errorf("Expected query 'artificial intelligence', got '%s'", result.Query)
}
if len(result.Sources) == 0 {
t.Error("Expected at least one source")
}
if result.Metadata["mode"] != "naive" {
t.Errorf("Expected mode 'naive', got '%v'", result.Metadata["mode"])
}
}
func TestLightRAGEngine_LocalRetrieval(t *testing.T) {
ctx := context.Background()
config := rag.LightRAGConfig{
Mode: "local",
LocalConfig: rag.LocalRetrievalConfig{
TopK: 10,
MaxHops: 2,
IncludeDescriptions: true,
},
ChunkSize: 512,
ChunkOverlap: 50,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
vectorStore := store.NewInMemoryVectorStore(embedder)
engine, err := NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
// Add test documents
docs := []rag.Document{
{
ID: "doc1",
Content: "Elon Musk is the CEO of Tesla and SpaceX. He is known for his work in electric vehicles and space exploration.",
Metadata: map[string]any{
"source": "biography.txt",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
err = engine.AddDocuments(ctx, docs)
if err != nil {
t.Fatalf("Failed to add documents: %v", err)
}
// Test local retrieval
result, err := engine.Query(ctx, "Who is Elon Musk?")
if err != nil {
t.Fatalf("Failed to query: %v", err)
}
if result == nil {
t.Fatal("Expected non-nil result")
}
if result.Metadata["mode"] != "local" {
t.Errorf("Expected mode 'local', got '%v'", result.Metadata["mode"])
}
}
func TestLightRAGEngine_GlobalRetrieval(t *testing.T) {
ctx := context.Background()
config := rag.LightRAGConfig{
Mode: "global",
GlobalConfig: rag.GlobalRetrievalConfig{
MaxCommunities: 5,
IncludeHierarchy: false,
MaxHierarchyDepth: 3,
},
EnableCommunityDetection: true,
ChunkSize: 512,
ChunkOverlap: 50,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
vectorStore := store.NewInMemoryVectorStore(embedder)
engine, err := NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
// Add test documents
docs := []rag.Document{
{
ID: "doc1",
Content: "Python is a programming language used for web development, data science, and machine learning.",
Metadata: map[string]any{
"source": "python.txt",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
err = engine.AddDocuments(ctx, docs)
if err != nil {
t.Fatalf("Failed to add documents: %v", err)
}
// Test global retrieval
result, err := engine.Query(ctx, "What is Python used for?")
if err != nil {
t.Fatalf("Failed to query: %v", err)
}
if result == nil {
t.Fatal("Expected non-nil result")
}
if result.Metadata["mode"] != "global" {
t.Errorf("Expected mode 'global', got '%v'", result.Metadata["mode"])
}
}
func TestLightRAGEngine_HybridRetrieval(t *testing.T) {
ctx := context.Background()
config := rag.LightRAGConfig{
Mode: "hybrid",
HybridConfig: rag.HybridRetrievalConfig{
LocalWeight: 0.5,
GlobalWeight: 0.5,
FusionMethod: "rrf",
RFFK: 60,
},
LocalConfig: rag.LocalRetrievalConfig{
TopK: 10,
MaxHops: 2,
},
GlobalConfig: rag.GlobalRetrievalConfig{
MaxCommunities: 5,
},
ChunkSize: 512,
ChunkOverlap: 50,
EnableCommunityDetection: true,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
vectorStore := store.NewInMemoryVectorStore(embedder)
engine, err := NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
// Add test documents
docs := []rag.Document{
{
ID: "doc1",
Content: "Go is a statically typed, compiled programming language designed at Google.",
Metadata: map[string]any{
"source": "go.txt",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
err = engine.AddDocuments(ctx, docs)
if err != nil {
t.Fatalf("Failed to add documents: %v", err)
}
// Test hybrid retrieval
result, err := engine.Query(ctx, "Tell me about Go programming language")
if err != nil {
t.Fatalf("Failed to query: %v", err)
}
if result == nil {
t.Fatal("Expected non-nil result")
}
if result.Metadata["mode"] != "hybrid" {
t.Errorf("Expected mode 'hybrid', got '%v'", result.Metadata["mode"])
}
if result.Metadata["fusion_method"] != "rrf" {
t.Errorf("Expected fusion method 'rrf', got '%v'", result.Metadata["fusion_method"])
}
}
func TestLightRAGEngine_SplitDocument(t *testing.T) {
_ = context.Background()
config := rag.LightRAGConfig{
Mode: "hybrid",
ChunkSize: 100,
ChunkOverlap: 20,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
engine, err := NewLightRAGEngine(config, llm, embedder, kg, nil)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
// Create a document that will be split into multiple chunks
doc := rag.Document{
ID: "test_doc",
Content: strings.Repeat("This is a test sentence. ", 20),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
chunks := engine.splitDocument(doc)
if len(chunks) < 2 {
t.Errorf("Expected at least 2 chunks, got %d", len(chunks))
}
// Verify chunk IDs are unique
ids := make(map[string]bool)
for _, chunk := range chunks {
if ids[chunk.ID] {
t.Errorf("Duplicate chunk ID: %s", chunk.ID)
}
ids[chunk.ID] = true
// Verify metadata
if chunk.Metadata["source_doc"] != "test_doc" {
t.Errorf("Expected source_doc 'test_doc', got '%v'", chunk.Metadata["source_doc"])
}
}
}
func TestLightRAGEngine_SimilaritySearch(t *testing.T) {
ctx := context.Background()
config := rag.LightRAGConfig{
Mode: "naive",
ChunkSize: 512,
ChunkOverlap: 50,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
vectorStore := store.NewInMemoryVectorStore(embedder)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
engine, err := NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
docs := []rag.Document{
{
ID: "doc1",
Content: "Test document one",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
err = engine.AddDocuments(ctx, docs)
if err != nil {
t.Fatalf("Failed to add documents: %v", err)
}
// Test similarity search
results, err := engine.SimilaritySearch(ctx, "test", 5)
if err != nil {
t.Fatalf("Failed to perform similarity search: %v", err)
}
if len(results) == 0 {
t.Error("Expected at least one result")
}
// Test similarity search with scores
scoreResults, err := engine.SimilaritySearchWithScores(ctx, "test", 5)
if err != nil {
t.Fatalf("Failed to perform similarity search with scores: %v", err)
}
if len(scoreResults) == 0 {
t.Error("Expected at least one scored result")
}
}
func TestLightRAGEngine_GetMetrics(t *testing.T) {
_ = context.Background()
config := rag.LightRAGConfig{
Mode: "hybrid",
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
engine, err := NewLightRAGEngine(config, llm, embedder, kg, nil)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
metrics := engine.GetMetrics()
if metrics == nil {
t.Fatal("Expected non-nil metrics")
}
// Initial metrics should be zero
if metrics.TotalQueries != 0 {
t.Errorf("Expected 0 total queries, got %d", metrics.TotalQueries)
}
if metrics.TotalDocuments != 0 {
t.Errorf("Expected 0 total documents, got %d", metrics.TotalDocuments)
}
}
func TestLightRAGEngine_GetConfig(t *testing.T) {
_ = context.Background()
config := rag.LightRAGConfig{
Mode: "hybrid",
ChunkSize: 1024,
ChunkOverlap: 100,
MaxEntitiesPerChunk: 30,
EntityExtractionThreshold: 0.7,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
t.Fatalf("Failed to create knowledge graph: %v", err)
}
engine, err := NewLightRAGEngine(config, llm, embedder, kg, nil)
if err != nil {
t.Fatalf("Failed to create LightRAG engine: %v", err)
}
retrievedConfig := engine.GetConfig()
if retrievedConfig.Mode != config.Mode {
t.Errorf("Expected mode '%s', got '%s'", config.Mode, retrievedConfig.Mode)
}
if retrievedConfig.ChunkSize != config.ChunkSize {
t.Errorf("Expected chunk size %d, got %d", config.ChunkSize, retrievedConfig.ChunkSize)
}
}
// Benchmark tests
func BenchmarkLightRAGEngine_AddDocuments(b *testing.B) {
ctx := context.Background()
config := rag.LightRAGConfig{
Mode: "hybrid",
ChunkSize: 512,
ChunkOverlap: 50,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
vectorStore := store.NewInMemoryVectorStore(embedder)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
b.Fatalf("Failed to create knowledge graph: %v", err)
}
engine, err := NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
b.Fatalf("Failed to create LightRAG engine: %v", err)
}
docs := make([]rag.Document, 100)
for i := range 100 {
docs[i] = rag.Document{
ID: fmt.Sprintf("doc%d", i),
Content: strings.Repeat("Test content ", 100),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
}
for b.Loop() {
_ = engine.AddDocuments(ctx, docs)
}
}
func BenchmarkLightRAGEngine_Query(b *testing.B) {
ctx := context.Background()
config := rag.LightRAGConfig{
Mode: "hybrid",
ChunkSize: 512,
ChunkOverlap: 50,
}
llm := &MockLLM{}
embedder := store.NewMockEmbedder(128)
vectorStore := store.NewInMemoryVectorStore(embedder)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
b.Fatalf("Failed to create knowledge graph: %v", err)
}
engine, err := NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
b.Fatalf("Failed to create LightRAG engine: %v", err)
}
docs := make([]rag.Document, 100)
for i := range 100 {
docs[i] = rag.Document{
ID: fmt.Sprintf("doc%d", i),
Content: strings.Repeat("Test content ", 100),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
}
_ = engine.AddDocuments(ctx, docs)
for b.Loop() {
_, _ = engine.Query(ctx, "test query")
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine/vector_test.go | rag/engine/vector_test.go | package engine
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestVectorRAGEngine(t *testing.T) {
ctx := context.Background()
llm := &mockLLM{}
store := &mockVectorStore{docs: []rag.Document{{Content: "c1"}}}
embedder := &mockEmbedder{}
e, err := NewVectorRAGEngine(llm, embedder, store, 1)
assert.NoError(t, err)
assert.NotNil(t, e)
t.Run("Query", func(t *testing.T) {
res, err := e.Query(ctx, "test")
assert.NoError(t, err)
assert.NotEmpty(t, res.Context)
})
t.Run("Operations", func(t *testing.T) {
err := e.AddDocuments(ctx, []rag.Document{{Content: "new"}})
assert.NoError(t, err)
assert.NoError(t, e.DeleteDocument(ctx, "1"))
assert.NoError(t, e.UpdateDocument(ctx, rag.Document{}))
})
t.Run("Similarity Search", func(t *testing.T) {
docs, err := e.SimilaritySearch(ctx, "test", 1)
assert.NoError(t, err)
assert.Len(t, docs, 1)
})
t.Run("QueryWithConfig", func(t *testing.T) {
config := &rag.RetrievalConfig{
K: 1,
SearchType: "mmr",
}
res, err := e.QueryWithConfig(ctx, "test", config)
assert.NoError(t, err)
assert.Len(t, res.Sources, 1)
})
t.Run("Query with Reranking", func(t *testing.T) {
config := rag.VectorRAGConfig{
EnableReranking: true,
RetrieverConfig: rag.RetrievalConfig{K: 1},
}
e2, _ := NewVectorRAGEngineWithConfig(llm, embedder, store, config)
res, err := e2.Query(ctx, "test")
assert.NoError(t, err)
assert.NotNil(t, res)
})
t.Run("Calculate Similarity", func(t *testing.T) {
d1 := rag.Document{Content: "word1 word2"}
d2 := rag.Document{Content: "word1 word3"}
sim := e.calculateSimilarity(d1, d2)
assert.Greater(t, sim, 0.0)
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine/engine_test.go | rag/engine/engine_test.go | package engine
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestBaseEngine(t *testing.T) {
ctx := context.Background()
retriever := &mockRetriever{docs: []rag.Document{{ID: "1", Content: "c1"}}}
embedder := &mockEmbedder{}
e := rag.NewBaseEngine(retriever, embedder, nil)
assert.NotNil(t, e)
t.Run("Query", func(t *testing.T) {
res, err := e.Query(ctx, "test")
assert.NoError(t, err)
assert.NotEmpty(t, res.Context)
})
t.Run("SimilaritySearch", func(t *testing.T) {
docs, err := e.SimilaritySearch(ctx, "test", 1)
assert.NoError(t, err)
assert.Len(t, docs, 1)
res, err := e.SimilaritySearchWithScores(ctx, "test", 1)
assert.NoError(t, err)
assert.Len(t, res, 1)
})
t.Run("Engine Operations", func(t *testing.T) {
assert.Error(t, e.AddDocuments(ctx, nil))
assert.Error(t, e.DeleteDocument(ctx, "1"))
assert.Error(t, e.UpdateDocument(ctx, rag.Document{}))
})
t.Run("Stats", func(t *testing.T) {
assert.NotNil(t, e.GetMetrics())
e.ResetMetrics()
})
}
func TestCompositeEngine(t *testing.T) {
ctx := context.Background()
retriever := &mockRetriever{docs: []rag.Document{{ID: "1", Content: "c1"}}}
embedder := &mockEmbedder{}
engine1 := rag.NewBaseEngine(retriever, embedder, nil)
comp := rag.NewCompositeEngine([]rag.Engine{engine1}, nil)
t.Run("Composite Query", func(t *testing.T) {
res, err := comp.Query(ctx, "test")
assert.NoError(t, err)
assert.Len(t, res.Sources, 1)
res2, _ := comp.QueryWithConfig(ctx, "test", nil)
assert.Len(t, res2.Sources, 1)
})
t.Run("Composite Operations", func(t *testing.T) {
// BaseEngine returns errors for these, so Composite should too
assert.Error(t, comp.AddDocuments(ctx, []rag.Document{{}}))
assert.Error(t, comp.DeleteDocument(ctx, "1"))
assert.Error(t, comp.UpdateDocument(ctx, rag.Document{}))
})
t.Run("Composite Search", func(t *testing.T) {
docs, err := comp.SimilaritySearch(ctx, "test", 1)
assert.NoError(t, err)
assert.Len(t, docs, 1)
res, err := comp.SimilaritySearchWithScores(ctx, "test", 1)
assert.NoError(t, err)
assert.Len(t, res, 1)
})
}
func TestAggregators(t *testing.T) {
res1 := &rag.QueryResult{
Confidence: 0.5,
Sources: []rag.Document{{ID: "1"}},
Metadata: make(map[string]any),
}
res2 := &rag.QueryResult{
Confidence: 0.8,
Sources: []rag.Document{{ID: "2"}},
Metadata: make(map[string]any),
}
t.Run("DefaultAggregator", func(t *testing.T) {
agg := rag.DefaultAggregator([]*rag.QueryResult{res1, res2})
assert.Equal(t, 0.8, agg.Confidence)
assert.Len(t, agg.Sources, 2)
})
t.Run("WeightedAggregator", func(t *testing.T) {
wAgg := rag.WeightedAggregator([]float64{1.0, 0.1})([]*rag.QueryResult{res1, res2})
assert.Equal(t, 0.5, wAgg.Confidence)
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/engine/lightrag.go | rag/engine/lightrag.go | package engine
import (
"context"
"fmt"
"slices"
"strings"
"sync"
"time"
"github.com/smallnest/langgraphgo/rag"
)
// LightRAGEngine implements LightRAG functionality
// LightRAG combines low-level semantic chunks with high-level graph structures
// It supports four retrieval modes: naive, local, global, and hybrid
type LightRAGEngine struct {
config rag.LightRAGConfig
knowledgeGraph rag.KnowledgeGraph
embedder rag.Embedder
llm rag.LLMInterface
vectorStore rag.VectorStore
chunkCache map[string][]rag.Document
communityCache map[string]*rag.Community
cacheMutex sync.RWMutex
metrics *rag.Metrics
baseEngine *rag.BaseEngine
}
// NewLightRAGEngine creates a new LightRAG engine
func NewLightRAGEngine(
config rag.LightRAGConfig,
llm rag.LLMInterface,
embedder rag.Embedder,
kg rag.KnowledgeGraph,
vectorStore rag.VectorStore,
) (*LightRAGEngine, error) {
if kg == nil {
return nil, fmt.Errorf("knowledge graph is required")
}
if embedder == nil {
return nil, fmt.Errorf("embedder is required")
}
if llm == nil {
return nil, fmt.Errorf("llm is required")
}
// Set default values
if config.Mode == "" {
config.Mode = "hybrid"
}
if config.ChunkSize == 0 {
config.ChunkSize = 512
}
if config.ChunkOverlap == 0 {
config.ChunkOverlap = 50
}
if config.LocalConfig.TopK == 0 {
config.LocalConfig.TopK = 10
}
if config.LocalConfig.MaxHops == 0 {
config.LocalConfig.MaxHops = 2
}
if config.GlobalConfig.MaxCommunities == 0 {
config.GlobalConfig.MaxCommunities = 5
}
if config.MaxEntitiesPerChunk == 0 {
config.MaxEntitiesPerChunk = 20
}
if config.HybridConfig.LocalWeight == 0 {
config.HybridConfig.LocalWeight = 0.5
}
if config.HybridConfig.GlobalWeight == 0 {
config.HybridConfig.GlobalWeight = 0.5
}
if config.HybridConfig.RFFK == 0 {
config.HybridConfig.RFFK = 60
}
baseEngine := rag.NewBaseEngine(nil, embedder, &rag.Config{
LightRAG: &config,
})
return &LightRAGEngine{
config: config,
knowledgeGraph: kg,
embedder: embedder,
llm: llm,
vectorStore: vectorStore,
chunkCache: make(map[string][]rag.Document),
communityCache: make(map[string]*rag.Community),
metrics: &rag.Metrics{},
baseEngine: baseEngine,
}, nil
}
// Query performs a LightRAG query with the configured mode
func (l *LightRAGEngine) Query(ctx context.Context, query string) (*rag.QueryResult, error) {
return l.QueryWithConfig(ctx, query, &rag.RetrievalConfig{
K: 5,
ScoreThreshold: 0.3,
SearchType: l.config.Mode,
IncludeScores: true,
})
}
// QueryWithConfig performs a LightRAG query with custom configuration
func (l *LightRAGEngine) QueryWithConfig(ctx context.Context, query string, config *rag.RetrievalConfig) (*rag.QueryResult, error) {
startTime := time.Now()
mode := config.SearchType
if mode == "" {
mode = l.config.Mode
}
var result *rag.QueryResult
var err error
// Route to appropriate retrieval mode
switch mode {
case "naive":
result, err = l.naiveRetrieval(ctx, query, config)
case "local":
result, err = l.localRetrieval(ctx, query, config)
case "global":
result, err = l.globalRetrieval(ctx, query, config)
case "hybrid":
result, err = l.hybridRetrieval(ctx, query, config)
default:
return nil, fmt.Errorf("unsupported retrieval mode: %s (supported: naive, local, global, hybrid)", mode)
}
if err != nil {
return nil, err
}
// Update metrics
l.metrics.TotalQueries++
l.metrics.LastQueryTime = time.Now()
latency := time.Since(startTime)
l.metrics.AverageLatency = time.Duration((int64(l.metrics.AverageLatency)*l.metrics.TotalQueries + int64(latency)) / (l.metrics.TotalQueries + 1))
result.ResponseTime = latency
return result, nil
}
// naiveRetrieval performs simple retrieval without graph structure
func (l *LightRAGEngine) naiveRetrieval(ctx context.Context, query string, config *rag.RetrievalConfig) (*rag.QueryResult, error) {
if l.vectorStore == nil {
return nil, fmt.Errorf("vector store is required for naive retrieval")
}
// Generate query embedding
queryEmbedding, err := l.embedder.EmbedDocument(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to generate query embedding: %w", err)
}
// Search vector store
searchResults, err := l.vectorStore.Search(ctx, queryEmbedding, config.K)
if err != nil {
return nil, fmt.Errorf("failed to search vector store: %w", err)
}
// Convert to documents
docs := make([]rag.Document, len(searchResults))
for i, result := range searchResults {
docs[i] = result.Document
}
// Build context
contextStr := l.buildNaiveContext(searchResults)
return &rag.QueryResult{
Query: query,
Sources: docs,
Context: contextStr,
Confidence: l.calculateNaiveConfidence(searchResults),
Metadata: map[string]any{
"mode": "naive",
"num_results": len(searchResults),
"avg_score": l.avgScore(searchResults),
},
}, nil
}
// localRetrieval performs local mode retrieval
// Local mode retrieves relevant entities and their relationships within a localized context
func (l *LightRAGEngine) localRetrieval(ctx context.Context, query string, config *rag.RetrievalConfig) (*rag.QueryResult, error) {
// Extract entities from query
queryEntities, err := l.extractEntities(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to extract entities: %w", err)
}
// Build entity context
entityDocs := make([]rag.Document, 0)
seenEntities := make(map[string]bool)
for _, queryEntity := range queryEntities {
// Traverse the knowledge graph to find related entities
relatedDocs, err := l.traverseEntities(ctx, queryEntity.ID, l.config.LocalConfig.MaxHops, seenEntities)
if err != nil {
continue
}
entityDocs = append(entityDocs, relatedDocs...)
}
// If we have a vector store, supplement with vector search
if l.vectorStore != nil && len(entityDocs) < config.K {
queryEmbedding, err := l.embedder.EmbedDocument(ctx, query)
if err == nil {
vectorResults, _ := l.vectorStore.Search(ctx, queryEmbedding, config.K-len(entityDocs))
for _, result := range vectorResults {
if len(entityDocs) >= config.K {
break
}
entityDocs = append(entityDocs, result.Document)
}
}
}
// Limit results
if len(entityDocs) > config.K {
entityDocs = entityDocs[:config.K]
}
// Build context
contextStr := l.buildLocalContext(queryEntities, entityDocs)
return &rag.QueryResult{
Query: query,
Sources: entityDocs,
Context: contextStr,
Confidence: l.calculateLocalConfidence(queryEntities, entityDocs),
Metadata: map[string]any{
"mode": "local",
"query_entities": len(queryEntities),
"retrieved_entities": len(entityDocs),
"max_hops": l.config.LocalConfig.MaxHops,
},
}, nil
}
// globalRetrieval performs global mode retrieval
// Global mode retrieves information from community-level summaries
func (l *LightRAGEngine) globalRetrieval(ctx context.Context, query string, config *rag.RetrievalConfig) (*rag.QueryResult, error) {
// Extract entities from query
queryEntities, err := l.extractEntities(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to extract entities: %w", err)
}
// Find relevant communities
communities, err := l.findRelevantCommunities(ctx, query, queryEntities)
if err != nil {
return nil, fmt.Errorf("failed to find relevant communities: %w", err)
}
// Limit to max communities
if len(communities) > l.config.GlobalConfig.MaxCommunities {
communities = communities[:l.config.GlobalConfig.MaxCommunities]
}
// Build community documents
communityDocs := make([]rag.Document, len(communities))
for i, community := range communities {
content := fmt.Sprintf("Community: %s\nSummary: %s\nEntities: %s",
community.Title,
community.Summary,
strings.Join(community.Entities, ", "))
communityDocs[i] = rag.Document{
ID: community.ID,
Content: content,
Metadata: map[string]any{
"community_level": community.Level,
"num_entities": len(community.Entities),
"score": community.Score,
},
}
}
// Build context
contextStr := l.buildGlobalContext(communities)
return &rag.QueryResult{
Query: query,
Sources: communityDocs,
Context: contextStr,
Confidence: l.calculateGlobalConfidence(communities),
Metadata: map[string]any{
"mode": "global",
"num_communities": len(communities),
"query_entities": len(queryEntities),
"include_hierarchy": l.config.GlobalConfig.IncludeHierarchy,
},
}, nil
}
// hybridRetrieval combines local and global retrieval results
func (l *LightRAGEngine) hybridRetrieval(ctx context.Context, query string, config *rag.RetrievalConfig) (*rag.QueryResult, error) {
// Perform local and global retrieval
localResult, err := l.localRetrieval(ctx, query, config)
if err != nil {
return nil, fmt.Errorf("local retrieval failed: %w", err)
}
globalResult, err := l.globalRetrieval(ctx, query, config)
if err != nil {
return nil, fmt.Errorf("global retrieval failed: %w", err)
}
// Fuse results based on fusion method
var fusedDocs []rag.Document
var fusedScores []float64
switch l.config.HybridConfig.FusionMethod {
case "rrf":
fusedDocs, fusedScores = l.reciprocalRankFusion(localResult, globalResult)
case "weighted":
fusedDocs, fusedScores = l.weightedFusion(localResult, globalResult)
default:
fusedDocs, fusedScores = l.reciprocalRankFusion(localResult, globalResult)
}
// Limit results
if len(fusedDocs) > config.K {
fusedDocs = fusedDocs[:config.K]
fusedScores = fusedScores[:config.K]
}
// Build context
contextStr := l.buildHybridContext(localResult, globalResult, fusedDocs)
// Calculate combined confidence
combinedConfidence := (localResult.Confidence * l.config.HybridConfig.LocalWeight) +
(globalResult.Confidence * l.config.HybridConfig.GlobalWeight)
// Build metadata with scores
metadata := map[string]any{
"mode": "hybrid",
"fusion_method": l.config.HybridConfig.FusionMethod,
"local_weight": l.config.HybridConfig.LocalWeight,
"global_weight": l.config.HybridConfig.GlobalWeight,
"local_confidence": localResult.Confidence,
"global_confidence": globalResult.Confidence,
"local_count": len(localResult.Sources),
"global_count": len(globalResult.Sources),
}
// Add fused scores to metadata if available
if fusedScores != nil {
metadata["fused_scores"] = fusedScores
}
return &rag.QueryResult{
Query: query,
Sources: fusedDocs,
Context: contextStr,
Confidence: combinedConfidence,
Metadata: metadata,
}, nil
}
// AddDocuments adds documents to the LightRAG system
func (l *LightRAGEngine) AddDocuments(ctx context.Context, docs []rag.Document) error {
startTime := time.Now()
for _, doc := range docs {
// Split document into chunks
chunks := l.splitDocument(doc)
// Cache chunks
l.cacheMutex.Lock()
l.chunkCache[doc.ID] = chunks
l.cacheMutex.Unlock()
// Add chunks to vector store if available
if l.vectorStore != nil {
if err := l.vectorStore.Add(ctx, chunks); err != nil {
return fmt.Errorf("failed to add chunks to vector store: %w", err)
}
}
// Extract entities and relationships from each chunk
for _, chunk := range chunks {
entities, err := l.extractEntities(ctx, chunk.Content)
if err != nil {
continue
}
// Add entities to knowledge graph
for _, entity := range entities {
if err := l.knowledgeGraph.AddEntity(ctx, entity); err != nil {
return fmt.Errorf("failed to add entity: %w", err)
}
}
// Extract and add relationships
relationships, err := l.extractRelationships(ctx, chunk.Content, entities)
if err != nil {
continue
}
for _, rel := range relationships {
if err := l.knowledgeGraph.AddRelationship(ctx, rel); err != nil {
return fmt.Errorf("failed to add relationship: %w", err)
}
}
}
}
// Build communities if enabled
if l.config.EnableCommunityDetection {
if err := l.buildCommunities(ctx); err != nil {
return fmt.Errorf("failed to build communities: %w", err)
}
}
l.metrics.TotalDocuments += int64(len(docs))
l.metrics.IndexingLatency = time.Since(startTime)
return nil
}
// DeleteDocument removes a document from the system
func (l *LightRAGEngine) DeleteDocument(ctx context.Context, docID string) error {
l.cacheMutex.Lock()
delete(l.chunkCache, docID)
l.cacheMutex.Unlock()
if l.vectorStore != nil {
return l.vectorStore.Delete(ctx, []string{docID})
}
return fmt.Errorf("document deletion not fully implemented for LightRAG")
}
// UpdateDocument updates a document in the system
func (l *LightRAGEngine) UpdateDocument(ctx context.Context, doc rag.Document) error {
if err := l.DeleteDocument(ctx, doc.ID); err != nil {
return err
}
return l.AddDocuments(ctx, []rag.Document{doc})
}
// SimilaritySearch performs similarity search
func (l *LightRAGEngine) SimilaritySearch(ctx context.Context, query string, k int) ([]rag.Document, error) {
result, err := l.Query(ctx, query)
if err != nil {
return nil, err
}
return result.Sources, nil
}
// SimilaritySearchWithScores performs similarity search with scores
func (l *LightRAGEngine) SimilaritySearchWithScores(ctx context.Context, query string, k int) ([]rag.DocumentSearchResult, error) {
docs, err := l.SimilaritySearch(ctx, query, k)
if err != nil {
return nil, err
}
results := make([]rag.DocumentSearchResult, len(docs))
for i, doc := range docs {
results[i] = rag.DocumentSearchResult{
Document: doc,
Score: 1.0 - float64(i)/float64(len(docs)), // Simple ranking
}
}
return results, nil
}
// splitDocument splits a document into chunks
func (l *LightRAGEngine) splitDocument(doc rag.Document) []rag.Document {
chunks := make([]rag.Document, 0)
content := doc.Content
chunkSize := l.config.ChunkSize
overlap := l.config.ChunkOverlap
for i := 0; i < len(content); {
end := min(i+chunkSize, len(content))
chunk := rag.Document{
ID: fmt.Sprintf("%s_chunk_%d", doc.ID, len(chunks)),
Content: content[i:end],
Metadata: map[string]any{
"source_doc": doc.ID,
"chunk_index": len(chunks),
"metadata": doc.Metadata,
},
CreatedAt: doc.CreatedAt,
UpdatedAt: doc.UpdatedAt,
}
chunks = append(chunks, chunk)
i += (chunkSize - overlap)
}
return chunks
}
// extractEntities extracts entities from text
func (l *LightRAGEngine) extractEntities(ctx context.Context, text string) ([]*rag.Entity, error) {
prompt := l.getEntityExtractionPrompt(text)
response, err := l.llm.Generate(ctx, prompt)
if err != nil {
return nil, err
}
return l.parseEntityExtraction(response)
}
// extractRelationships extracts relationships between entities
func (l *LightRAGEngine) extractRelationships(ctx context.Context, text string, entities []*rag.Entity) ([]*rag.Relationship, error) {
if len(entities) < 2 {
return nil, nil
}
prompt := l.getRelationshipExtractionPrompt(text, entities)
response, err := l.llm.Generate(ctx, prompt)
if err != nil {
return nil, err
}
return l.parseRelationshipExtraction(response)
}
// traverseEntities traverses the knowledge graph to find related entities
func (l *LightRAGEngine) traverseEntities(ctx context.Context, entityID string, maxHops int, seen map[string]bool) ([]rag.Document, error) {
docs := make([]rag.Document, 0)
if maxHops <= 0 || seen[entityID] {
return docs, nil
}
seen[entityID] = true
// Get related entities
relatedEntities, err := l.knowledgeGraph.GetRelatedEntities(ctx, entityID, 1)
if err != nil {
return docs, err
}
for _, entity := range relatedEntities {
if !seen[entity.ID] {
// Create document from entity
content := fmt.Sprintf("Entity: %s\nType: %s", entity.Name, entity.Type)
if l.config.LocalConfig.IncludeDescriptions {
if desc, ok := entity.Properties["description"]; ok {
content += fmt.Sprintf("\nDescription: %v", desc)
}
}
doc := rag.Document{
ID: entity.ID,
Content: content,
Metadata: map[string]any{
"entity_type": entity.Type,
"properties": entity.Properties,
"source": "local_traversal",
},
}
docs = append(docs, doc)
// Recursively traverse
moreDocs, _ := l.traverseEntities(ctx, entity.ID, maxHops-1, seen)
docs = append(docs, moreDocs...)
}
}
return docs, nil
}
// findRelevantCommunities finds communities relevant to the query
func (l *LightRAGEngine) findRelevantCommunities(ctx context.Context, query string, queryEntities []*rag.Entity) ([]*rag.Community, error) {
communities := make([]*rag.Community, 0)
l.cacheMutex.RLock()
defer l.cacheMutex.RUnlock()
// For each query entity, find its community
for _, entity := range queryEntities {
for _, community := range l.communityCache {
// Check if entity is in this community
if slices.Contains(community.Entities, entity.ID) {
communities = append(communities, community)
}
}
}
return communities, nil
}
// buildCommunities builds communities using community detection
func (l *LightRAGEngine) buildCommunities(ctx context.Context) error {
// This is a simplified implementation
// In a production system, you would use proper community detection algorithms
// like Louvain or Leiden
l.cacheMutex.Lock()
defer l.cacheMutex.Unlock()
// Create a default community
community := &rag.Community{
ID: "community_0",
Level: 0,
Title: "Default Community",
Summary: "All entities grouped together",
Entities: make([]string, 0),
Properties: make(map[string]any),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
// Get all entities from the knowledge graph
// This is a placeholder - in real implementation, you'd query the KG
// to get all entities and group them properly
l.communityCache["community_0"] = community
return nil
}
// reciprocalRankFusion fuses results using RRF
func (l *LightRAGEngine) reciprocalRankFusion(localResult, globalResult *rag.QueryResult) ([]rag.Document, []float64) {
k := float64(l.config.HybridConfig.RFFK)
scores := make(map[string]float64)
docs := make(map[string]rag.Document)
// Process local results
for i, doc := range localResult.Sources {
score := 1.0 / (k + float64(i+1))
if s, ok := scores[doc.ID]; ok {
scores[doc.ID] = s + score
} else {
scores[doc.ID] = score
docs[doc.ID] = doc
}
}
// Process global results
for i, doc := range globalResult.Sources {
score := 1.0 / (k + float64(i+1))
if s, ok := scores[doc.ID]; ok {
scores[doc.ID] = s + score
} else {
scores[doc.ID] = score
docs[doc.ID] = doc
}
}
// Sort by score
type docScore struct {
doc rag.Document
score float64
}
sortedDocs := make([]docScore, 0, len(scores))
for id, score := range scores {
sortedDocs = append(sortedDocs, docScore{doc: docs[id], score: score})
}
// Simple sort
for i := 0; i < len(sortedDocs); i++ {
for j := i + 1; j < len(sortedDocs); j++ {
if sortedDocs[j].score > sortedDocs[i].score {
sortedDocs[i], sortedDocs[j] = sortedDocs[j], sortedDocs[i]
}
}
}
resultDocs := make([]rag.Document, len(sortedDocs))
resultScores := make([]float64, len(sortedDocs))
for i, ds := range sortedDocs {
resultDocs[i] = ds.doc
resultScores[i] = ds.score
}
return resultDocs, resultScores
}
// weightedFusion fuses results using weighted scores
func (l *LightRAGEngine) weightedFusion(localResult, globalResult *rag.QueryResult) ([]rag.Document, []float64) {
scores := make(map[string]float64)
docs := make(map[string]rag.Document)
// Process local results
for i, doc := range localResult.Sources {
score := (1.0 - float64(i)/float64(len(localResult.Sources))) * l.config.HybridConfig.LocalWeight
if s, ok := scores[doc.ID]; ok {
scores[doc.ID] = s + score
} else {
scores[doc.ID] = score
docs[doc.ID] = doc
}
}
// Process global results
for i, doc := range globalResult.Sources {
score := (1.0 - float64(i)/float64(len(globalResult.Sources))) * l.config.HybridConfig.GlobalWeight
if s, ok := scores[doc.ID]; ok {
scores[doc.ID] = s + score
} else {
scores[doc.ID] = score
docs[doc.ID] = doc
}
}
// Sort by score
type docScore struct {
doc rag.Document
score float64
}
sortedDocs := make([]docScore, 0, len(scores))
for id, score := range scores {
sortedDocs = append(sortedDocs, docScore{doc: docs[id], score: score})
}
// Simple sort
for i := 0; i < len(sortedDocs); i++ {
for j := i + 1; j < len(sortedDocs); j++ {
if sortedDocs[j].score > sortedDocs[i].score {
sortedDocs[i], sortedDocs[j] = sortedDocs[j], sortedDocs[i]
}
}
}
resultDocs := make([]rag.Document, len(sortedDocs))
resultScores := make([]float64, len(sortedDocs))
for i, ds := range sortedDocs {
resultDocs[i] = ds.doc
resultScores[i] = ds.score
}
return resultDocs, resultScores
}
// Context building methods
func (l *LightRAGEngine) buildNaiveContext(results []rag.DocumentSearchResult) string {
var sb strings.Builder
sb.WriteString("Retrieved Context:\n\n")
for i, result := range results {
sb.WriteString(fmt.Sprintf("[%d] %s\n", i+1, result.Document.Content))
sb.WriteString(fmt.Sprintf(" Score: %.4f\n\n", result.Score))
}
return sb.String()
}
func (l *LightRAGEngine) buildLocalContext(entities []*rag.Entity, docs []rag.Document) string {
var sb strings.Builder
sb.WriteString("Local Retrieval Context:\n\n")
sb.WriteString("Query Entities:\n")
for _, entity := range entities {
sb.WriteString(fmt.Sprintf("- %s (%s)\n", entity.Name, entity.Type))
}
sb.WriteString("\n")
sb.WriteString("Related Information:\n")
for i, doc := range docs {
sb.WriteString(fmt.Sprintf("[%d] %s\n\n", i+1, doc.Content))
}
return sb.String()
}
func (l *LightRAGEngine) buildGlobalContext(communities []*rag.Community) string {
var sb strings.Builder
sb.WriteString("Global Retrieval Context:\n\n")
for i, community := range communities {
sb.WriteString(fmt.Sprintf("Community %d: %s\n", i+1, community.Title))
sb.WriteString(fmt.Sprintf("Summary: %s\n", community.Summary))
sb.WriteString(fmt.Sprintf("Entities: %s\n", strings.Join(community.Entities, ", ")))
sb.WriteString("\n")
}
return sb.String()
}
func (l *LightRAGEngine) buildHybridContext(localResult, globalResult *rag.QueryResult, fusedDocs []rag.Document) string {
var sb strings.Builder
sb.WriteString("Hybrid Retrieval Context:\n\n")
sb.WriteString("=== Local Results ===\n")
sb.WriteString(localResult.Context)
sb.WriteString("\n")
sb.WriteString("=== Global Results ===\n")
sb.WriteString(globalResult.Context)
sb.WriteString("\n")
sb.WriteString("=== Fused Results ===\n")
for i, doc := range fusedDocs {
sb.WriteString(fmt.Sprintf("[%d] %s\n\n", i+1, doc.Content))
}
return sb.String()
}
// Confidence calculation methods
func (l *LightRAGEngine) calculateNaiveConfidence(results []rag.DocumentSearchResult) float64 {
if len(results) == 0 {
return 0.0
}
return l.avgScore(results)
}
func (l *LightRAGEngine) calculateLocalConfidence(entities []*rag.Entity, docs []rag.Document) float64 {
if len(entities) == 0 {
return 0.0
}
entityFactor := float64(len(entities)) / 10.0
if entityFactor > 1.0 {
entityFactor = 1.0
}
docFactor := float64(len(docs)) / 20.0
if docFactor > 1.0 {
docFactor = 1.0
}
return (entityFactor + docFactor) / 2.0
}
func (l *LightRAGEngine) calculateGlobalConfidence(communities []*rag.Community) float64 {
if len(communities) == 0 {
return 0.0
}
return float64(len(communities)) / float64(l.config.GlobalConfig.MaxCommunities)
}
// Helper methods
func (l *LightRAGEngine) avgScore(results []rag.DocumentSearchResult) float64 {
if len(results) == 0 {
return 0.0
}
sum := 0.0
for _, result := range results {
sum += result.Score
}
return sum / float64(len(results))
}
// getEntityExtractionPrompt returns the prompt for entity extraction
func (l *LightRAGEngine) getEntityExtractionPrompt(text string) string {
if customPrompt, ok := l.config.PromptTemplates["entity_extraction"]; ok {
return fmt.Sprintf(customPrompt, text)
}
return fmt.Sprintf(`Extract entities from the following text. Focus on important entities like:
- People (PERSON)
- Organizations (ORGANIZATION)
- Locations (LOCATION)
- Products/Technologies (PRODUCT)
- Concepts (CONCEPT)
Return a JSON response with this structure:
{
"entities": [
{
"id": "unique_id",
"name": "entity_name",
"type": "entity_type",
"description": "brief_description",
"properties": {}
}
]
}
Limit to %d most important entities.
Text: %s`, l.config.MaxEntitiesPerChunk, text)
}
// getRelationshipExtractionPrompt returns the prompt for relationship extraction
func (l *LightRAGEngine) getRelationshipExtractionPrompt(text string, entities []*rag.Entity) string {
if customPrompt, ok := l.config.PromptTemplates["relationship_extraction"]; ok {
entityList := make([]string, len(entities))
for i, e := range entities {
entityList[i] = fmt.Sprintf("%s (%s)", e.Name, e.Type)
}
return fmt.Sprintf(customPrompt, text, strings.Join(entityList, ", "))
}
entityList := make([]string, len(entities))
for i, e := range entities {
entityList[i] = fmt.Sprintf("%s (%s)", e.Name, e.Type)
}
return fmt.Sprintf(`Extract relationships between the following entities from the text.
Consider relationship types like: RELATED_TO, PART_OF, WORKS_WITH, LOCATED_IN, CREATED_BY, etc.
Entities: %s
Return a JSON response with this structure:
{
"relationships": [
{
"source": "entity1_name",
"target": "entity2_name",
"type": "relationship_type",
"properties": {},
"confidence": 0.9
}
]
}
Text: %s`, strings.Join(entityList, ", "), text)
}
// parseEntityExtraction parses the entity extraction response
func (l *LightRAGEngine) parseEntityExtraction(response string) ([]*rag.Entity, error) {
// Simplified parsing - in production, use proper JSON parsing
entities := make([]*rag.Entity, 0)
// For now, return a simple default entity
// In production, parse the JSON response properly
entity := &rag.Entity{
ID: "entity_1",
Type: "UNKNOWN",
Name: "Extracted Entity",
Properties: map[string]any{
"description": "Entity extracted from text",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
entities = append(entities, entity)
return entities, nil
}
// parseRelationshipExtraction parses the relationship extraction response
func (l *LightRAGEngine) parseRelationshipExtraction(response string) ([]*rag.Relationship, error) {
// Simplified parsing
relationships := make([]*rag.Relationship, 0)
rel := &rag.Relationship{
ID: "rel_1",
Source: "entity_1",
Target: "entity_2",
Type: "RELATED_TO",
Properties: make(map[string]any),
Confidence: 0.8,
CreatedAt: time.Now(),
}
relationships = append(relationships, rel)
return relationships, nil
}
// GetMetrics returns the current metrics
func (l *LightRAGEngine) GetMetrics() *rag.Metrics {
return l.metrics
}
// GetConfig returns the current configuration
func (l *LightRAGEngine) GetConfig() rag.LightRAGConfig {
return l.config
}
// GetKnowledgeGraph returns the underlying knowledge graph
func (l *LightRAGEngine) GetKnowledgeGraph() rag.KnowledgeGraph {
return l.knowledgeGraph
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/loader/static.go | rag/loader/static.go | package loader
import (
"context"
"maps"
"github.com/smallnest/langgraphgo/rag"
)
// StaticDocumentLoader loads documents from a static list
type StaticDocumentLoader struct {
Documents []rag.Document
}
// NewStaticDocumentLoader creates a new StaticDocumentLoader
func NewStaticDocumentLoader(documents []rag.Document) *StaticDocumentLoader {
return &StaticDocumentLoader{
Documents: documents,
}
}
// Load returns the static list of documents
func (l *StaticDocumentLoader) Load(ctx context.Context) ([]rag.Document, error) {
return l.Documents, nil
}
// LoadWithMetadata returns the static list of documents with additional metadata
func (l *StaticDocumentLoader) LoadWithMetadata(ctx context.Context, metadata map[string]any) ([]rag.Document, error) {
if metadata == nil {
return l.Documents, nil
}
// Copy documents and add metadata
docs := make([]rag.Document, len(l.Documents))
for i, doc := range l.Documents {
newDoc := doc
if newDoc.Metadata == nil {
newDoc.Metadata = make(map[string]any)
}
maps.Copy(newDoc.Metadata, metadata)
docs[i] = newDoc
}
return docs, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/loader/static_test.go | rag/loader/static_test.go | package loader
import (
"context"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestStaticDocumentLoader(t *testing.T) {
ctx := context.Background()
docs := []rag.Document{
{ID: "1", Content: "static 1"},
{ID: "2", Content: "static 2"},
}
loader := NewStaticDocumentLoader(docs)
t.Run("Basic Load", func(t *testing.T) {
loaded, err := loader.Load(ctx)
assert.NoError(t, err)
assert.Equal(t, docs, loaded)
})
t.Run("Load with Metadata", func(t *testing.T) {
loaded, err := loader.LoadWithMetadata(ctx, map[string]any{"extra": "meta"})
assert.NoError(t, err)
assert.Len(t, loaded, 2)
assert.Equal(t, "meta", loaded[0].Metadata["extra"])
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/loader/text.go | rag/loader/text.go | package loader
import (
"bufio"
"context"
"fmt"
"io"
"maps"
"os"
"strings"
"github.com/smallnest/langgraphgo/rag"
)
// TextLoader loads documents from text files
type TextLoader struct {
filePath string
encoding string
metadata map[string]any
lineSeparator string
}
// TextLoaderOption configures the TextLoader
type TextLoaderOption func(*TextLoader)
// WithEncoding sets the text encoding
func WithEncoding(encoding string) TextLoaderOption {
return func(l *TextLoader) {
l.encoding = encoding
}
}
// WithMetadata sets additional metadata for loaded documents
func WithMetadata(metadata map[string]any) TextLoaderOption {
return func(l *TextLoader) {
maps.Copy(l.metadata, metadata)
}
}
// WithLineSeparator sets the line separator
func WithLineSeparator(separator string) TextLoaderOption {
return func(l *TextLoader) {
l.lineSeparator = separator
}
}
// NewTextLoader creates a new TextLoader
func NewTextLoader(filePath string, opts ...TextLoaderOption) rag.DocumentLoader {
l := &TextLoader{
filePath: filePath,
encoding: "utf-8",
metadata: make(map[string]any),
lineSeparator: "\n",
}
// Add default metadata
l.metadata["source"] = filePath
l.metadata["type"] = "text"
for _, opt := range opts {
opt(l)
}
return l
}
// Load loads documents from the text file
func (l *TextLoader) Load(ctx context.Context) ([]rag.Document, error) {
return l.LoadWithMetadata(ctx, l.metadata)
}
// LoadWithMetadata loads documents with additional metadata
func (l *TextLoader) LoadWithMetadata(ctx context.Context, metadata map[string]any) ([]rag.Document, error) {
// Combine default metadata with provided metadata
combinedMetadata := make(map[string]any)
maps.Copy(combinedMetadata, l.metadata)
maps.Copy(combinedMetadata, metadata)
// Open the file
file, err := os.Open(l.filePath)
if err != nil {
return nil, fmt.Errorf("failed to open file %s: %w", l.filePath, err)
}
defer file.Close()
// Read file content
content, err := io.ReadAll(file)
if err != nil {
return nil, fmt.Errorf("failed to read file %s: %w", l.filePath, err)
}
// Create document
doc := rag.Document{
ID: l.generateDocumentID(),
Content: string(content),
Metadata: combinedMetadata,
}
return []rag.Document{doc}, nil
}
// generateDocumentID generates a unique document ID
func (l *TextLoader) generateDocumentID() string {
return fmt.Sprintf("text_%s", l.filePath)
}
// TextByLinesLoader loads documents splitting by lines
type TextByLinesLoader struct {
filePath string
metadata map[string]any
}
// NewTextByLinesLoader creates a new TextByLinesLoader
func NewTextByLinesLoader(filePath string, metadata map[string]any) rag.DocumentLoader {
if metadata == nil {
metadata = make(map[string]any)
}
metadata["source"] = filePath
metadata["type"] = "text_lines"
return &TextByLinesLoader{
filePath: filePath,
metadata: metadata,
}
}
// Load loads documents from the text file, splitting by lines
func (l *TextByLinesLoader) Load(ctx context.Context) ([]rag.Document, error) {
return l.LoadWithMetadata(ctx, l.metadata)
}
// LoadWithMetadata loads documents with additional metadata, splitting by lines
func (l *TextByLinesLoader) LoadWithMetadata(ctx context.Context, metadata map[string]any) ([]rag.Document, error) {
// Combine metadata
combinedMetadata := make(map[string]any)
maps.Copy(combinedMetadata, l.metadata)
maps.Copy(combinedMetadata, metadata)
// Open the file
file, err := os.Open(l.filePath)
if err != nil {
return nil, fmt.Errorf("failed to open file %s: %w", l.filePath, err)
}
defer file.Close()
// Read line by line
var documents []rag.Document
scanner := bufio.NewScanner(file)
lineNumber := 0
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue // Skip empty lines
}
lineMetadata := make(map[string]any)
maps.Copy(lineMetadata, combinedMetadata)
lineMetadata["line_number"] = lineNumber
doc := rag.Document{
ID: fmt.Sprintf("%s_line_%d", l.filePath, lineNumber),
Content: line,
Metadata: lineMetadata,
}
documents = append(documents, doc)
lineNumber++
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error reading file %s: %w", l.filePath, err)
}
return documents, nil
}
// TextByParagraphsLoader loads documents splitting by paragraphs
type TextByParagraphsLoader struct {
filePath string
metadata map[string]any
paragraphMarker string
}
// TextByParagraphsLoaderOption configures the TextByParagraphsLoader
type TextByParagraphsLoaderOption func(*TextByParagraphsLoader)
// WithParagraphMarker sets the paragraph marker
func WithParagraphMarker(marker string) TextByParagraphsLoaderOption {
return func(l *TextByParagraphsLoader) {
l.paragraphMarker = marker
}
}
// NewTextByParagraphsLoader creates a new TextByParagraphsLoader
func NewTextByParagraphsLoader(filePath string, opts ...TextByParagraphsLoaderOption) rag.DocumentLoader {
l := &TextByParagraphsLoader{
filePath: filePath,
metadata: make(map[string]any),
paragraphMarker: "\n\n",
}
l.metadata["source"] = filePath
l.metadata["type"] = "text_paragraphs"
for _, opt := range opts {
opt(l)
}
return l
}
// Load loads documents from the text file, splitting by paragraphs
func (l *TextByParagraphsLoader) Load(ctx context.Context) ([]rag.Document, error) {
return l.LoadWithMetadata(ctx, l.metadata)
}
// LoadWithMetadata loads documents with additional metadata, splitting by paragraphs
func (l *TextByParagraphsLoader) LoadWithMetadata(ctx context.Context, metadata map[string]any) ([]rag.Document, error) {
// Combine metadata
combinedMetadata := make(map[string]any)
maps.Copy(combinedMetadata, l.metadata)
maps.Copy(combinedMetadata, metadata)
// Read the entire file content
content, err := os.ReadFile(l.filePath)
if err != nil {
return nil, fmt.Errorf("failed to read file %s: %w", l.filePath, err)
}
// Split by paragraphs
paragraphs := strings.Split(string(content), l.paragraphMarker)
var documents []rag.Document
for i, paragraph := range paragraphs {
paragraph = strings.TrimSpace(paragraph)
if paragraph == "" {
continue // Skip empty paragraphs
}
paragraphMetadata := make(map[string]any)
maps.Copy(paragraphMetadata, combinedMetadata)
paragraphMetadata["paragraph_number"] = i
doc := rag.Document{
ID: fmt.Sprintf("%s_paragraph_%d", l.filePath, i),
Content: paragraph,
Metadata: paragraphMetadata,
}
documents = append(documents, doc)
}
return documents, nil
}
// TextByChaptersLoader loads documents splitting by chapters
type TextByChaptersLoader struct {
filePath string
metadata map[string]any
chapterPattern string
}
// TextByChaptersLoaderOption configures the TextByChaptersLoader
type TextByChaptersLoaderOption func(*TextByChaptersLoader)
// WithChapterPattern sets the pattern that identifies chapters
func WithChapterPattern(pattern string) TextByChaptersLoaderOption {
return func(l *TextByChaptersLoader) {
l.chapterPattern = pattern
}
}
// NewTextByChaptersLoader creates a new TextByChaptersLoader
func NewTextByChaptersLoader(filePath string, opts ...TextByChaptersLoaderOption) rag.DocumentLoader {
l := &TextByChaptersLoader{
filePath: filePath,
metadata: make(map[string]any),
chapterPattern: "Chapter",
}
l.metadata["source"] = filePath
l.metadata["type"] = "text_chapters"
for _, opt := range opts {
opt(l)
}
return l
}
// Load loads documents from the text file, splitting by chapters
func (l *TextByChaptersLoader) Load(ctx context.Context) ([]rag.Document, error) {
return l.LoadWithMetadata(ctx, l.metadata)
}
// LoadWithMetadata loads documents with additional metadata, splitting by chapters
func (l *TextByChaptersLoader) LoadWithMetadata(ctx context.Context, metadata map[string]any) ([]rag.Document, error) {
// Combine metadata
combinedMetadata := make(map[string]any)
maps.Copy(combinedMetadata, l.metadata)
maps.Copy(combinedMetadata, metadata)
// Read the entire file content
content, err := os.ReadFile(l.filePath)
if err != nil {
return nil, fmt.Errorf("failed to read file %s: %w", l.filePath, err)
}
// Split by lines to find chapters
lines := strings.Split(string(content), "\n")
var documents []rag.Document
var currentChapter strings.Builder
chapterNumber := 1
var chapterTitle string
for _, line := range lines {
trimmedLine := strings.TrimSpace(line)
// Check if this line starts a new chapter
if strings.Contains(trimmedLine, l.chapterPattern) {
// Save the previous chapter if it exists
if currentChapter.Len() > 0 {
chapterContent := strings.TrimSpace(currentChapter.String())
if chapterContent != "" {
chapterMetadata := make(map[string]any)
maps.Copy(chapterMetadata, combinedMetadata)
chapterMetadata["chapter_number"] = chapterNumber
chapterMetadata["chapter_title"] = chapterTitle
doc := rag.Document{
ID: fmt.Sprintf("%s_chapter_%d", l.filePath, chapterNumber),
Content: chapterContent,
Metadata: chapterMetadata,
}
documents = append(documents, doc)
}
}
// Start a new chapter
currentChapter.Reset()
currentChapter.WriteString(line + "\n")
chapterTitle = trimmedLine
chapterNumber++
} else {
currentChapter.WriteString(line + "\n")
}
}
// Don't forget the last chapter
if currentChapter.Len() > 0 {
chapterContent := strings.TrimSpace(currentChapter.String())
if chapterContent != "" {
chapterMetadata := make(map[string]any)
maps.Copy(chapterMetadata, combinedMetadata)
chapterMetadata["chapter_number"] = chapterNumber
chapterMetadata["chapter_title"] = chapterTitle
doc := rag.Document{
ID: fmt.Sprintf("%s_chapter_%d", l.filePath, chapterNumber),
Content: chapterContent,
Metadata: chapterMetadata,
}
documents = append(documents, doc)
}
}
return documents, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/loader/text_test.go | rag/loader/text_test.go | package loader
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestTextLoader(t *testing.T) {
ctx := context.Background()
content := "Line 1\nLine 2\nLine 3"
tmpDir := t.TempDir()
tmpFile := filepath.Join(tmpDir, "test.txt")
err := os.WriteFile(tmpFile, []byte(content), 0644)
assert.NoError(t, err)
t.Run("Basic Load", func(t *testing.T) {
loader := NewTextLoader(tmpFile)
docs, err := loader.Load(ctx)
assert.NoError(t, err)
assert.Len(t, docs, 1)
assert.Equal(t, content, docs[0].Content)
assert.Equal(t, tmpFile, docs[0].Metadata["source"])
})
t.Run("Load with Metadata", func(t *testing.T) {
loader := NewTextLoader(tmpFile, WithMetadata(map[string]any{"author": "test"}))
docs, err := loader.Load(ctx)
assert.NoError(t, err)
assert.Equal(t, "test", docs[0].Metadata["author"])
})
}
func TestTextByLinesLoader(t *testing.T) {
ctx := context.Background()
content := "Line 1\n\nLine 2\n \nLine 3"
tmpDir := t.TempDir()
tmpFile := filepath.Join(tmpDir, "test_lines.txt")
err := os.WriteFile(tmpFile, []byte(content), 0644)
assert.NoError(t, err)
loader := NewTextByLinesLoader(tmpFile, nil)
docs, err := loader.Load(ctx)
assert.NoError(t, err)
assert.Len(t, docs, 3) // Empty lines should be skipped
assert.Equal(t, "Line 1", docs[0].Content)
assert.Equal(t, "Line 2", docs[1].Content)
assert.Equal(t, "Line 3", docs[2].Content)
}
func TestTextByChaptersLoader(t *testing.T) {
ctx := context.Background()
content := "Chapter 1\nContent 1\nChapter 2\nContent 2"
tmpDir := t.TempDir()
tmpFile := filepath.Join(tmpDir, "test_chapters.txt")
err := os.WriteFile(tmpFile, []byte(content), 0644)
assert.NoError(t, err)
loader := NewTextByChaptersLoader(tmpFile, WithChapterPattern("Chapter"))
docs, err := loader.Load(ctx)
assert.NoError(t, err)
assert.Len(t, docs, 2)
assert.Contains(t, docs[0].Content, "Chapter 1")
assert.Contains(t, docs[1].Content, "Chapter 2")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/splitter/splitter_test.go | rag/splitter/splitter_test.go | package splitter
import (
"strings"
"testing"
"github.com/smallnest/langgraphgo/rag"
"github.com/stretchr/testify/assert"
)
func TestRecursiveCharacterTextSplitter(t *testing.T) {
t.Run("Basic splitting", func(t *testing.T) {
s := NewRecursiveCharacterTextSplitter(
WithChunkSize(10),
WithChunkOverlap(0),
)
text := "1234567890abcdefghij"
chunks := s.SplitText(text)
assert.Len(t, chunks, 2)
assert.Equal(t, "1234567890", chunks[0])
assert.Equal(t, "abcdefghij", chunks[1])
})
t.Run("Split with separators", func(t *testing.T) {
s := NewRecursiveCharacterTextSplitter(
WithChunkSize(10),
WithChunkOverlap(0),
WithSeparators([]string{"\n"}),
)
text := "part1\npart2\npart3"
chunks := s.SplitText(text)
assert.Len(t, chunks, 3)
assert.Equal(t, "part1", chunks[0])
assert.Equal(t, "part2", chunks[1])
assert.Equal(t, "part3", chunks[2])
})
t.Run("Split documents", func(t *testing.T) {
s := NewRecursiveCharacterTextSplitter(
WithChunkSize(10),
WithChunkOverlap(2),
)
doc := rag.Document{
ID: "doc1",
Content: "123456789012345",
Metadata: map[string]any{"key": "val"},
}
chunks := s.SplitDocuments([]rag.Document{doc})
assert.NotEmpty(t, chunks)
for i, chunk := range chunks {
assert.Equal(t, "doc1", chunk.Metadata["parent_id"])
assert.Equal(t, i, chunk.Metadata["chunk_index"])
assert.Equal(t, len(chunks), chunk.Metadata["chunk_total"])
}
})
}
func TestCharacterTextSplitter(t *testing.T) {
s := NewCharacterTextSplitter(
WithCharacterSeparator("|"),
WithCharacterChunkSize(5),
WithCharacterChunkOverlap(0),
)
text := "abc|def|ghi"
chunks := s.SplitText(text)
assert.Len(t, chunks, 3)
assert.Equal(t, "abc", chunks[0])
assert.Equal(t, "def", chunks[1])
joined := s.JoinText(chunks)
assert.Equal(t, "abc|def|ghi", joined)
}
func TestTokenTextSplitter(t *testing.T) {
s := NewTokenTextSplitter(5, 0, nil)
text := "one two three four five six seven eight"
chunks := s.SplitText(text)
assert.Len(t, chunks, 2)
assert.Equal(t, "one two three four five", chunks[0])
doc := rag.Document{ID: "tok1", Content: text}
docChunks := s.SplitDocuments([]rag.Document{doc})
assert.Len(t, docChunks, 2)
}
func TestRecursiveCharacterJoin(t *testing.T) {
s := NewRecursiveCharacterTextSplitter(WithChunkOverlap(0))
joined := s.JoinText([]string{"a", "b"})
assert.Equal(t, "a b", joined)
}
// SimpleTextSplitter tests
func TestSimpleTextSplitter(t *testing.T) {
t.Run("NewSimpleTextSplitter", func(t *testing.T) {
s := NewSimpleTextSplitter(100, 10)
assert.NotNil(t, s)
simple := s.(*SimpleTextSplitter)
assert.Equal(t, 100, simple.ChunkSize)
assert.Equal(t, 10, simple.ChunkOverlap)
assert.Equal(t, "\n\n", simple.Separator)
})
t.Run("SplitText short text", func(t *testing.T) {
s := NewSimpleTextSplitter(100, 10)
text := "Short text"
chunks := s.SplitText(text)
assert.Len(t, chunks, 1)
assert.Equal(t, "Short text", chunks[0])
})
t.Run("SplitText exact size", func(t *testing.T) {
s := NewSimpleTextSplitter(10, 0)
text := "1234567890" // exactly 10 chars
chunks := s.SplitText(text)
assert.Len(t, chunks, 1)
assert.Equal(t, "1234567890", chunks[0])
})
t.Run("SplitText multiple chunks", func(t *testing.T) {
s := NewSimpleTextSplitter(10, 0)
text := "1234567890abcdefghijklmnop"
chunks := s.SplitText(text)
assert.Len(t, chunks, 3)
assert.Equal(t, "1234567890", chunks[0])
assert.Equal(t, "abcdefghij", chunks[1])
assert.Equal(t, "klmnop", chunks[2])
})
t.Run("SplitText with separator", func(t *testing.T) {
s := NewSimpleTextSplitter(20, 0)
// Default separator is "\n\n"
text := "First paragraph\n\nSecond paragraph here"
chunks := s.SplitText(text)
// Text is 43 chars, chunk size is 20
// It will split into multiple chunks
assert.Greater(t, len(chunks), 1)
assert.Contains(t, chunks[0], "First paragraph")
})
t.Run("SplitText with overlap", func(t *testing.T) {
s := NewSimpleTextSplitter(20, 5)
text := "12345678901234567890abcdefghijklmnopqrstuvwxyz"
chunks := s.SplitText(text)
assert.Greater(t, len(chunks), 1)
// Check that there is overlap between consecutive chunks
if len(chunks) > 1 {
// The last 5 chars of chunk 0 should appear in chunk 1
endOfFirst := chunks[0][len(chunks[0])-5:]
assert.Contains(t, chunks[1], endOfFirst)
}
})
t.Run("SplitText empty string", func(t *testing.T) {
s := NewSimpleTextSplitter(100, 10)
chunks := s.SplitText("")
assert.Len(t, chunks, 1)
assert.Equal(t, "", chunks[0])
})
t.Run("SplitText with very small chunk size", func(t *testing.T) {
s := NewSimpleTextSplitter(3, 0)
text := "abcdefgh"
chunks := s.SplitText(text)
assert.Len(t, chunks, 3)
assert.Equal(t, "abc", chunks[0])
assert.Equal(t, "def", chunks[1])
assert.Equal(t, "gh", chunks[2])
})
t.Run("SplitText overlap prevents getting stuck", func(t *testing.T) {
s := NewSimpleTextSplitter(10, 8)
text := "12345678901234567890"
chunks := s.SplitText(text)
assert.Greater(t, len(chunks), 1)
})
t.Run("JoinText empty chunks", func(t *testing.T) {
s := NewSimpleTextSplitter(100, 10)
joined := s.JoinText([]string{})
assert.Equal(t, "", joined)
})
t.Run("JoinText single chunk", func(t *testing.T) {
s := NewSimpleTextSplitter(100, 10)
joined := s.JoinText([]string{"single"})
assert.Equal(t, "single", joined)
})
t.Run("JoinText multiple chunks", func(t *testing.T) {
s := NewSimpleTextSplitter(100, 10)
joined := s.JoinText([]string{"first", "second", "third"})
assert.Equal(t, "first second third", joined)
})
t.Run("SplitDocuments single document", func(t *testing.T) {
s := NewSimpleTextSplitter(20, 5)
doc := rag.Document{
ID: "doc1",
Content: "This is a test document that should be split into multiple chunks for testing",
Metadata: map[string]any{"source": "test"},
}
chunks := s.SplitDocuments([]rag.Document{doc})
assert.Greater(t, len(chunks), 1)
// Verify metadata
for i, chunk := range chunks {
assert.Equal(t, "doc1", chunk.ID)
assert.Equal(t, "test", chunk.Metadata["source"])
assert.Equal(t, i, chunk.Metadata["chunk_index"])
assert.Equal(t, len(chunks), chunk.Metadata["total_chunks"])
}
})
t.Run("SplitDocuments multiple documents", func(t *testing.T) {
s := NewSimpleTextSplitter(15, 0)
docs := []rag.Document{
{ID: "doc1", Content: "First document content"},
{ID: "doc2", Content: "Second document here"},
}
chunks := s.SplitDocuments(docs)
assert.Greater(t, len(chunks), 2)
// First doc chunks should have doc1 as parent_id
doc1Chunks := 0
doc2Chunks := 0
for _, chunk := range chunks {
if chunk.ID == "doc1" {
doc1Chunks++
} else if chunk.ID == "doc2" {
doc2Chunks++
}
}
assert.Greater(t, doc1Chunks, 0)
assert.Greater(t, doc2Chunks, 0)
})
t.Run("SplitText with custom separator", func(t *testing.T) {
s := &SimpleTextSplitter{
ChunkSize: 15,
ChunkOverlap: 0,
Separator: "||",
}
text := "Part1||Part2||Part3"
chunks := s.SplitText(text)
// Text is 21 chars, chunk size is 15
// Will split: first chunk to "Part1||Part2||" then second
assert.GreaterOrEqual(t, len(chunks), 1)
assert.Contains(t, chunks[0], "Part1")
})
t.Run("SplitText trims whitespace", func(t *testing.T) {
s := NewSimpleTextSplitter(10, 0)
text := "1234567890 abcdefghij "
chunks := s.SplitText(text)
assert.Greater(t, len(chunks), 1)
// Chunks should be trimmed
for _, chunk := range chunks {
assert.Equal(t, strings.TrimSpace(chunk), chunk)
}
})
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/splitter/recursive.go | rag/splitter/recursive.go | package splitter
import (
"fmt"
"maps"
"strings"
"unicode"
"github.com/smallnest/langgraphgo/rag"
)
// RecursiveCharacterTextSplitter recursively splits text while keeping related pieces together
type RecursiveCharacterTextSplitter struct {
separators []string
chunkSize int
chunkOverlap int
lengthFunc func(string) int
}
// RecursiveCharacterTextSplitterOption configures the RecursiveCharacterTextSplitter
type RecursiveCharacterTextSplitterOption func(*RecursiveCharacterTextSplitter)
// WithChunkSize sets the chunk size for the splitter
func WithChunkSize(size int) RecursiveCharacterTextSplitterOption {
return func(s *RecursiveCharacterTextSplitter) {
s.chunkSize = size
}
}
// WithChunkOverlap sets the chunk overlap for the splitter
func WithChunkOverlap(overlap int) RecursiveCharacterTextSplitterOption {
return func(s *RecursiveCharacterTextSplitter) {
s.chunkOverlap = overlap
}
}
// WithSeparators sets the custom separators for the splitter
func WithSeparators(separators []string) RecursiveCharacterTextSplitterOption {
return func(s *RecursiveCharacterTextSplitter) {
s.separators = separators
}
}
// WithLengthFunction sets a custom length function
func WithLengthFunction(fn func(string) int) RecursiveCharacterTextSplitterOption {
return func(s *RecursiveCharacterTextSplitter) {
s.lengthFunc = fn
}
}
// NewRecursiveCharacterTextSplitter creates a new RecursiveCharacterTextSplitter
func NewRecursiveCharacterTextSplitter(opts ...RecursiveCharacterTextSplitterOption) rag.TextSplitter {
s := &RecursiveCharacterTextSplitter{
separators: []string{"\n\n", "\n", " ", ""},
chunkSize: 1000,
chunkOverlap: 200,
lengthFunc: func(s string) int { return len(s) },
}
for _, opt := range opts {
opt(s)
}
return s
}
// SplitText splits text into chunks
func (s *RecursiveCharacterTextSplitter) SplitText(text string) []string {
return s.splitTextRecursive(text, s.separators)
}
// SplitDocuments splits documents into chunks
func (s *RecursiveCharacterTextSplitter) SplitDocuments(docs []rag.Document) []rag.Document {
chunks := make([]rag.Document, 0)
for _, doc := range docs {
textChunks := s.SplitText(doc.Content)
for i, chunk := range textChunks {
// Create metadata for the chunk
metadata := make(map[string]any)
maps.Copy(metadata, doc.Metadata)
// Add chunk-specific metadata
metadata["chunk_index"] = i
metadata["chunk_total"] = len(textChunks)
metadata["parent_id"] = doc.ID
chunkDoc := rag.Document{
ID: fmt.Sprintf("%s_chunk_%d", doc.ID, i),
Content: chunk,
Metadata: metadata,
CreatedAt: doc.CreatedAt,
UpdatedAt: doc.UpdatedAt,
}
chunks = append(chunks, chunkDoc)
}
}
return chunks
}
// JoinText joins text chunks back together
func (s *RecursiveCharacterTextSplitter) JoinText(chunks []string) string {
if s.chunkOverlap == 0 {
return strings.Join(chunks, " ")
}
// Join chunks with overlap consideration
var result strings.Builder
result.WriteString(chunks[0])
for i := 1; i < len(chunks); i++ {
// Remove overlap from the beginning of current chunk
chunk := chunks[i]
if len(chunk) > s.chunkOverlap {
chunk = chunk[s.chunkOverlap:]
}
result.WriteString(" " + chunk)
}
return result.String()
}
// splitTextRecursive recursively splits text using the provided separators
func (s *RecursiveCharacterTextSplitter) splitTextRecursive(text string, separators []string) []string {
if s.lengthFunc(text) <= s.chunkSize {
return []string{text}
}
if len(separators) == 0 {
// No more separators, split by character
return s.splitByCharacter(text)
}
separator := separators[0]
remainingSeparators := separators[1:]
splits := s.splitTextHelper(text, separator)
// Filter out empty splits
var finalSplits []string
for _, split := range splits {
if strings.TrimSpace(split) != "" {
finalSplits = append(finalSplits, split)
}
}
// Now further split the splits that are too large
var goodSplits []string
for _, split := range finalSplits {
if s.lengthFunc(split) <= s.chunkSize {
goodSplits = append(goodSplits, split)
} else {
// If split is still too large, recursively split with next separator
otherSplits := s.splitTextRecursive(split, remainingSeparators)
goodSplits = append(goodSplits, otherSplits...)
}
}
return s.mergeSplits(goodSplits)
}
// splitTextHelper splits text by a separator
func (s *RecursiveCharacterTextSplitter) splitTextHelper(text, separator string) []string {
if separator == "" {
return s.splitByCharacter(text)
}
return strings.Split(text, separator)
}
// splitByCharacter splits text by character
func (s *RecursiveCharacterTextSplitter) splitByCharacter(text string) []string {
var splits []string
for i := 0; i < len(text); i += s.chunkSize - s.chunkOverlap {
end := i + s.chunkSize
if end > len(text) {
end = len(text)
}
splits = append(splits, text[i:end])
}
return splits
}
// mergeSplits merges splits together to respect chunk size and overlap
func (s *RecursiveCharacterTextSplitter) mergeSplits(splits []string) []string {
var merged []string
var current string
for _, split := range splits {
// If current is empty, start with this split
if current == "" {
current = split
continue
}
// Check if adding this split would exceed chunk size
proposed := current + "\n\n" + split
if s.lengthFunc(proposed) <= s.chunkSize {
current = proposed
} else {
// Add current to merged and start new with split
merged = append(merged, current)
current = split
}
}
// Add the last chunk
if current != "" {
merged = append(merged, current)
}
// Apply overlap
if s.chunkOverlap > 0 && len(merged) > 1 {
merged = s.applyOverlap(merged)
}
return merged
}
// applyOverlap applies overlap between consecutive chunks
func (s *RecursiveCharacterTextSplitter) applyOverlap(chunks []string) []string {
var overlapped []string
for i, chunk := range chunks {
if i == 0 {
overlapped = append(overlapped, chunk)
continue
}
prevChunk := chunks[i-1]
overlap := s.findOverlap(prevChunk, chunk)
if overlap != "" {
// Remove overlap from current chunk
chunk = strings.TrimPrefix(chunk, overlap)
chunk = strings.TrimSpace(chunk)
}
overlapped = append(overlapped, chunk)
}
return overlapped
}
// findOverlap finds the maximum overlap between the end of text1 and start of text2
func (s *RecursiveCharacterTextSplitter) findOverlap(text1, text2 string) string {
maxOverlap := min(s.chunkOverlap, len(text1), len(text2))
for overlap := maxOverlap; overlap > 0; overlap-- {
text1End := text1[len(text1)-overlap:]
text2Start := text2[:overlap]
// Normalize whitespace for comparison
text1End = strings.TrimSpace(text1End)
text2Start = strings.TrimSpace(text2Start)
if text1End == text2Start {
return text2[:overlap]
}
}
return ""
}
// min returns the minimum of multiple values
func min(values ...int) int {
if len(values) == 0 {
return 0
}
minValue := values[0]
for _, value := range values[1:] {
if value < minValue {
minValue = value
}
}
return minValue
}
// CharacterTextSplitter splits text by character count
type CharacterTextSplitter struct {
separator string
chunkSize int
chunkOverlap int
lengthFunc func(string) int
}
// CharacterTextSplitterOption configures the CharacterTextSplitter
type CharacterTextSplitterOption func(*CharacterTextSplitter)
// WithCharacterSeparator sets the separator for character splitter
func WithCharacterSeparator(separator string) CharacterTextSplitterOption {
return func(s *CharacterTextSplitter) {
s.separator = separator
}
}
// WithCharacterChunkSize sets the chunk size for character splitter
func WithCharacterChunkSize(size int) CharacterTextSplitterOption {
return func(s *CharacterTextSplitter) {
s.chunkSize = size
}
}
// WithCharacterChunkOverlap sets the chunk overlap for character splitter
func WithCharacterChunkOverlap(overlap int) CharacterTextSplitterOption {
return func(s *CharacterTextSplitter) {
s.chunkOverlap = overlap
}
}
// NewCharacterTextSplitter creates a new CharacterTextSplitter
func NewCharacterTextSplitter(opts ...CharacterTextSplitterOption) rag.TextSplitter {
s := &CharacterTextSplitter{
separator: "\n",
chunkSize: 1000,
chunkOverlap: 200,
lengthFunc: func(s string) int { return len(s) },
}
for _, opt := range opts {
opt(s)
}
return s
}
// SplitText splits text into chunks by separator or character
func (s *CharacterTextSplitter) SplitText(text string) []string {
if s.separator != "" {
return s.splitBySeparator(text)
}
return s.splitByCharacterCount(text)
}
// SplitDocuments splits documents into chunks
func (s *CharacterTextSplitter) SplitDocuments(docs []rag.Document) []rag.Document {
chunks := make([]rag.Document, 0)
for _, doc := range docs {
textChunks := s.SplitText(doc.Content)
for i, chunk := range textChunks {
metadata := make(map[string]any)
maps.Copy(metadata, doc.Metadata)
metadata["chunk_index"] = i
metadata["chunk_total"] = len(textChunks)
metadata["parent_id"] = doc.ID
chunkDoc := rag.Document{
ID: fmt.Sprintf("%s_chunk_%d", doc.ID, i),
Content: chunk,
Metadata: metadata,
CreatedAt: doc.CreatedAt,
UpdatedAt: doc.UpdatedAt,
}
chunks = append(chunks, chunkDoc)
}
}
return chunks
}
// JoinText joins text chunks back together
func (s *CharacterTextSplitter) JoinText(chunks []string) string {
if s.separator != "" {
return strings.Join(chunks, s.separator)
}
return strings.Join(chunks, "")
}
// splitBySeparator splits text by separator
func (s *CharacterTextSplitter) splitBySeparator(text string) []string {
if s.separator == "" {
return s.splitByCharacterCount(text)
}
splits := strings.Split(text, s.separator)
var chunks []string
var current string
for _, split := range splits {
if s.lengthFunc(current)+s.lengthFunc(split)+len(s.separator) <= s.chunkSize {
if current != "" {
current += s.separator + split
} else {
current = split
}
} else {
if current != "" {
chunks = append(chunks, current)
}
current = split
}
}
if current != "" {
chunks = append(chunks, current)
}
return chunks
}
// splitByCharacterCount splits text by character count
func (s *CharacterTextSplitter) splitByCharacterCount(text string) []string {
var chunks []string
for i := 0; i < len(text); i += s.chunkSize - s.chunkOverlap {
end := i + s.chunkSize
if end > len(text) {
end = len(text)
}
chunks = append(chunks, text[i:end])
}
return chunks
}
// TokenTextSplitter splits text by token count
type TokenTextSplitter struct {
chunkSize int
chunkOverlap int
tokenizer Tokenizer
}
// Tokenizer interface for different tokenization strategies
type Tokenizer interface {
Encode(text string) []string
Decode(tokens []string) string
}
// DefaultTokenizer is a simple word-based tokenizer
type DefaultTokenizer struct{}
// Encode tokenizes text into words
func (t *DefaultTokenizer) Encode(text string) []string {
words := []string{}
current := ""
for _, char := range text {
if unicode.IsSpace(rune(char)) {
if current != "" {
words = append(words, current)
current = ""
}
} else {
current += string(char)
}
}
if current != "" {
words = append(words, current)
}
return words
}
// Decode detokenizes words back to text
func (t *DefaultTokenizer) Decode(tokens []string) string {
return strings.Join(tokens, " ")
}
// NewTokenTextSplitter creates a new TokenTextSplitter
func NewTokenTextSplitter(chunkSize, chunkOverlap int, tokenizer Tokenizer) rag.TextSplitter {
if tokenizer == nil {
tokenizer = &DefaultTokenizer{}
}
return &TokenTextSplitter{
chunkSize: chunkSize,
chunkOverlap: chunkOverlap,
tokenizer: tokenizer,
}
}
// SplitText splits text into chunks by token count
func (s *TokenTextSplitter) SplitText(text string) []string {
tokens := s.tokenizer.Encode(text)
if len(tokens) <= s.chunkSize {
return []string{text}
}
var chunks []string
for i := 0; i < len(tokens); i += s.chunkSize - s.chunkOverlap {
end := i + s.chunkSize
if end > len(tokens) {
end = len(tokens)
}
chunkTokens := tokens[i:end]
chunks = append(chunks, s.tokenizer.Decode(chunkTokens))
}
return chunks
}
// SplitDocuments splits documents into chunks
func (s *TokenTextSplitter) SplitDocuments(docs []rag.Document) []rag.Document {
chunks := make([]rag.Document, 0)
for _, doc := range docs {
textChunks := s.SplitText(doc.Content)
for i, chunk := range textChunks {
metadata := make(map[string]any)
maps.Copy(metadata, doc.Metadata)
metadata["chunk_index"] = i
metadata["chunk_total"] = len(textChunks)
metadata["parent_id"] = doc.ID
chunkDoc := rag.Document{
ID: fmt.Sprintf("%s_chunk_%d", doc.ID, i),
Content: chunk,
Metadata: metadata,
CreatedAt: doc.CreatedAt,
UpdatedAt: doc.UpdatedAt,
}
chunks = append(chunks, chunkDoc)
}
}
return chunks
}
// JoinText joins text chunks back together
func (s *TokenTextSplitter) JoinText(chunks []string) string {
return strings.Join(chunks, " ")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/rag/splitter/simple.go | rag/splitter/simple.go | package splitter
import (
"maps"
"strings"
"github.com/smallnest/langgraphgo/rag"
)
// SimpleTextSplitter splits text into chunks of a given size
type SimpleTextSplitter struct {
ChunkSize int
ChunkOverlap int
Separator string
}
// NewSimpleTextSplitter creates a new SimpleTextSplitter
func NewSimpleTextSplitter(chunkSize, chunkOverlap int) rag.TextSplitter {
return &SimpleTextSplitter{
ChunkSize: chunkSize,
ChunkOverlap: chunkOverlap,
Separator: "\n\n",
}
}
// SplitText splits text into chunks
func (s *SimpleTextSplitter) SplitText(text string) []string {
return s.splitText(text)
}
// SplitDocuments splits documents into smaller chunks
func (s *SimpleTextSplitter) SplitDocuments(documents []rag.Document) []rag.Document {
var result []rag.Document
for _, doc := range documents {
chunks := s.splitText(doc.Content)
for i, chunk := range chunks {
newDoc := rag.Document{
ID: doc.ID,
Content: chunk,
Metadata: make(map[string]any),
CreatedAt: doc.CreatedAt,
UpdatedAt: doc.UpdatedAt,
}
// Copy metadata
maps.Copy(newDoc.Metadata, doc.Metadata)
// Add chunk metadata
newDoc.Metadata["chunk_index"] = i
newDoc.Metadata["total_chunks"] = len(chunks)
result = append(result, newDoc)
}
}
return result
}
// JoinText joins text chunks back together
func (s *SimpleTextSplitter) JoinText(chunks []string) string {
if len(chunks) == 0 {
return ""
}
if len(chunks) == 1 {
return chunks[0]
}
// Simple join - reconstruct with minimal duplication
return strings.Join(chunks, " ")
}
func (s *SimpleTextSplitter) splitText(text string) []string {
if len(text) <= s.ChunkSize {
return []string{text}
}
var chunks []string
start := 0
for start < len(text) {
end := start + s.ChunkSize
if end > len(text) {
end = len(text)
}
// Try to break at a separator
if end < len(text) {
lastSep := strings.LastIndex(text[start:end], s.Separator)
if lastSep > 0 {
end = start + lastSep + len(s.Separator)
}
}
chunks = append(chunks, strings.TrimSpace(text[start:end]))
nextStart := end - s.ChunkOverlap
if nextStart <= start {
// If overlap would cause us to get stuck or move backwards (because the chunk was small),
// just move forward to the end of the current chunk.
nextStart = end
}
start = max(nextStart, 0)
}
return chunks
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/conditional_routing/main.go | examples/conditional_routing/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
)
type Task struct {
ID string
Priority string
Content string
}
func main() {
// Create graph
g := graph.NewStateGraph[map[string]any]()
g.AddNode("router", "router", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return state, nil
})
g.AddNode("urgent_handler", "urgent_handler", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"status": "handled_urgent"}, nil
})
g.AddNode("normal_handler", "normal_handler", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"status": "handled_normal"}, nil
})
g.AddNode("batch_handler", "batch_handler", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"status": "handled_batch"}, nil
})
g.SetEntryPoint("router")
// Add conditional edge based on "priority"
g.AddConditionalEdge("router", func(ctx context.Context, state map[string]any) string {
priority, _ := state["priority"].(string)
switch priority {
case "high":
return "urgent_handler"
case "low":
return "batch_handler"
default:
return "normal_handler"
}
})
g.AddEdge("urgent_handler", graph.END)
g.AddEdge("normal_handler", graph.END)
g.AddEdge("batch_handler", graph.END)
// Compile
runnable, err := g.Compile()
if err != nil {
log.Fatal(err)
}
// 1. High priority task
fmt.Println("--- High Priority Task ---")
task1 := map[string]any{"id": "1", "priority": "high", "content": "System down"}
result, err := runnable.Invoke(context.Background(), task1)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Result: %s\n", result["status"])
// 2. Low priority task
fmt.Println("\n--- Low Priority Task ---")
task2 := map[string]any{"id": "2", "priority": "low", "content": "Update docs"}
result, err = runnable.Invoke(context.Background(), task2)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Result: %s\n", result["status"])
// 3. Normal priority task
fmt.Println("\n--- Normal Priority Task ---")
task3 := map[string]any{"id": "3", "priority": "medium", "content": "Bug fix"}
result, err = runnable.Invoke(context.Background(), task3)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Result: %s\n", result["status"])
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/configuration/main.go | examples/configuration/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a new state graph with map state
g := graph.NewStateGraph[map[string]any]()
g.AddNode("process", "process", func(ctx context.Context, state map[string]any) (map[string]any, error) {
// Access configuration from context
config := graph.GetConfig(ctx)
limit := 5 // Default
if config != nil && config.Configurable != nil {
if val, ok := config.Configurable["limit"].(int); ok {
limit = val
}
}
fmt.Printf("Processing with limit: %d\n", limit)
state["processed"] = true
state["limit_used"] = limit
return state, nil
})
g.SetEntryPoint("process")
g.AddEdge("process", graph.END)
runnable, err := g.Compile()
if err != nil {
log.Fatal(err)
}
// Run with limit 3 (via config)
config := &graph.Config{
Configurable: map[string]any{"limit": 3},
}
res, _ := runnable.InvokeWithConfig(context.Background(), map[string]any{"input": "start"}, config)
fmt.Printf("Result with limit 3: %v\n", res)
// Run with limit 10 (via config)
config2 := &graph.Config{
Configurable: map[string]any{"limit": 10},
}
res2, _ := runnable.InvokeWithConfig(context.Background(), map[string]any{"input": "start"}, config2)
fmt.Printf("Result with limit 10: %v\n", res2)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/ptc_basic/main.go | examples/ptc_basic/main.go | package main
import (
"context"
"fmt"
"log"
"math"
"strconv"
"strings"
"github.com/smallnest/langgraphgo/ptc"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/tools"
)
// CalculatorTool performs arithmetic operations
type CalculatorTool struct{}
func (t CalculatorTool) Name() string {
return "calculator"
}
func (t CalculatorTool) Description() string {
return `Performs arithmetic calculations.
Input: A JSON string with the operation and numbers:
{"operation": "add|subtract|multiply|divide|power|sqrt", "a": number, "b": number (optional for sqrt)}
Examples:
- {"operation": "add", "a": 5, "b": 3} -> 8
- {"operation": "multiply", "a": 4, "b": 7} -> 28
- {"operation": "sqrt", "a": 16} -> 4`
}
func (t CalculatorTool) Call(ctx context.Context, input string) (string, error) {
// Parse input
var params struct {
Operation string `json:"operation"`
A float64 `json:"a"`
B float64 `json:"b"`
}
// Simple JSON parsing
input = strings.TrimSpace(input)
if strings.Contains(input, "operation") {
// Extract operation
if strings.Contains(input, `"add"`) {
params.Operation = "add"
} else if strings.Contains(input, `"subtract"`) {
params.Operation = "subtract"
} else if strings.Contains(input, `"multiply"`) {
params.Operation = "multiply"
} else if strings.Contains(input, `"divide"`) {
params.Operation = "divide"
} else if strings.Contains(input, `"power"`) {
params.Operation = "power"
} else if strings.Contains(input, `"sqrt"`) {
params.Operation = "sqrt"
}
// Extract numbers
parts := strings.Split(input, ",")
for _, part := range parts {
if strings.Contains(part, `"a"`) {
numParts := strings.Split(part, ":")
if len(numParts) > 1 {
numStr := numParts[1]
numStr = strings.Trim(strings.TrimSpace(numStr), "}")
if num, err := strconv.ParseFloat(numStr, 64); err == nil {
params.A = num
}
}
}
if strings.Contains(part, `"b"`) {
numParts := strings.Split(part, ":")
if len(numParts) > 1 {
numStr := numParts[1]
numStr = strings.Trim(strings.TrimSpace(numStr), "}")
if num, err := strconv.ParseFloat(numStr, 64); err == nil {
params.B = num
}
}
}
}
}
var result float64
switch params.Operation {
case "add":
result = params.A + params.B
case "subtract":
result = params.A - params.B
case "multiply":
result = params.A * params.B
case "divide":
if params.B == 0 {
return "", fmt.Errorf("division by zero")
}
result = params.A / params.B
case "power":
result = math.Pow(params.A, params.B)
case "sqrt":
result = math.Sqrt(params.A)
default:
return "", fmt.Errorf("unknown operation: %s", params.Operation)
}
return fmt.Sprintf("%.2f", result), nil
}
// WeatherTool gets weather information
type WeatherTool struct{}
func (t WeatherTool) Name() string {
return "get_weather"
}
func (t WeatherTool) Description() string {
return `Gets current weather for a location.
Input: A JSON string with the city name:
{"city": "city_name"}
Example: {"city": "San Francisco"}
Returns: Weather information as a JSON string with temperature and conditions.`
}
func (t WeatherTool) Call(ctx context.Context, input string) (string, error) {
// Extract city name from input
city := "Unknown"
if strings.Contains(input, "city") {
parts := strings.Split(input, ":")
if len(parts) > 1 {
city = strings.Trim(strings.TrimSpace(parts[1]), `"{}`)
}
} else {
city = strings.Trim(input, `"{} `)
}
// Mock weather data
temps := map[string]int{
"San Francisco": 68,
"New York": 55,
"London": 52,
"Tokyo": 70,
"Paris": 58,
}
temp, ok := temps[city]
if !ok {
temp = 72
}
return fmt.Sprintf(`{"city": "%s", "temperature": %d, "conditions": "Sunny", "humidity": 65}`, city, temp), nil
}
// DataProcessorTool processes and filters data
type DataProcessorTool struct{}
func (t DataProcessorTool) Name() string {
return "process_data"
}
func (t DataProcessorTool) Description() string {
return `Processes and filters data arrays.
Input: A JSON string with operation and data:
{"operation": "sum|average|max|min|count", "data": [1, 2, 3, ...]}
Examples:
- {"operation": "sum", "data": [1, 2, 3, 4, 5]} -> 15
- {"operation": "average", "data": [10, 20, 30]} -> 20
- {"operation": "max", "data": [5, 2, 9, 1]} -> 9`
}
func (t DataProcessorTool) Call(ctx context.Context, input string) (string, error) {
// For simplicity, return mock results
if strings.Contains(input, "sum") {
return "15", nil
}
if strings.Contains(input, "average") {
return "20.5", nil
}
if strings.Contains(input, "max") {
return "42", nil
}
return "10", nil
}
func main() {
fmt.Println("=== Programmatic Tool Calling (PTC) Example ===\n")
fmt.Println("This example demonstrates how PTC allows LLMs to generate")
fmt.Println("code that calls tools programmatically, reducing API round-trips.\n")
// Create model (supports any LLM that implements llms.Model)
model, err := openai.New()
if err != nil {
log.Fatalf("Failed to create model: %v", err)
}
// Define tools
toolList := []tools.Tool{
CalculatorTool{},
WeatherTool{},
DataProcessorTool{},
}
// Create PTC agent
agent, err := ptc.CreatePTCAgent(ptc.PTCAgentConfig{
Model: model,
Tools: toolList,
Language: ptc.LanguagePython, // Python is recommended (better LLM support)
MaxIterations: 5,
})
if err != nil {
log.Fatalf("Failed to create agent: %v", err)
}
// Run a query that benefits from PTC
// This query requires multiple tool calls that can be done programmatically
query := `What's the weather in San Francisco and New York?
Calculate the average of their temperatures, then multiply by 2.`
fmt.Printf("Query: %s\n\n", query)
fmt.Println("Processing... (The LLM will generate code to call tools)")
fmt.Println(strings.Repeat("-", 60))
result, err := agent.Invoke(context.Background(), map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart(query)},
},
},
})
if err != nil {
log.Fatalf("Error: %v", err)
}
// Print result
messages := result["messages"].([]llms.MessageContent)
fmt.Println("\n" + strings.Repeat("-", 60))
fmt.Println("Execution Complete!")
fmt.Println(strings.Repeat("-", 60))
// Show all messages for transparency
fmt.Println("\nAll Messages:")
for i, msg := range messages {
var role string
switch msg.Role {
case llms.ChatMessageTypeHuman:
role = "User"
case llms.ChatMessageTypeAI:
role = "AI"
case llms.ChatMessageTypeTool:
role = "Tool"
case llms.ChatMessageTypeSystem:
role = "System"
}
fmt.Printf("\n[%d] %s:\n", i+1, role)
for _, part := range msg.Parts {
if textPart, ok := part.(llms.TextContent); ok {
text := textPart.Text
// Truncate long messages for readability
if len(text) > 500 {
fmt.Printf("%s...\n(truncated, total %d chars)\n", text[:500], len(text))
} else {
fmt.Println(text)
}
}
}
}
// Extract and display final answer
fmt.Println("\n" + strings.Repeat("=", 60))
fmt.Println("FINAL ANSWER:")
fmt.Println(strings.Repeat("=", 60))
for i := len(messages) - 1; i >= 0; i-- {
if messages[i].Role == llms.ChatMessageTypeAI {
for _, part := range messages[i].Parts {
if textPart, ok := part.(llms.TextContent); ok {
fmt.Println(textPart.Text)
}
}
break
}
}
fmt.Println(strings.Repeat("=", 60))
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/visualization/main.go | examples/visualization/main.go | package main
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a graph
g := graph.NewStateGraph[map[string]any]()
// 1. Define nodes
g.AddNode("validate_input", "validate_input", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"valid": true}, nil
})
g.AddNode("fetch_data", "fetch_data", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"data": "raw"}, nil
})
g.AddNode("transform", "transform", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"data": "transformed"}, nil
})
g.AddNode("enrich", "enrich", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"data": "enriched"}, nil
})
g.AddNode("validate_output", "validate_output", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"output_valid": true}, nil
})
g.AddNode("save", "save", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"saved": true}, nil
})
g.AddNode("notify", "notify", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"notified": true}, nil
})
// 2. Define edges (complex structure)
g.SetEntryPoint("validate_input")
g.AddEdge("validate_input", "fetch_data")
g.AddEdge("fetch_data", "transform")
g.AddEdge("transform", "enrich")
g.AddEdge("enrich", "validate_output")
g.AddEdge("validate_output", "save")
g.AddEdge("save", "notify")
g.AddEdge("notify", graph.END)
// 3. Compile
runnable, err := g.Compile()
if err != nil {
panic(err)
}
// 4. Visualize
// Get the graph exporter
exporter := graph.GetGraphForRunnable(runnable)
fmt.Println("=== Mermaid Diagram ===")
fmt.Println(exporter.DrawMermaid())
fmt.Println("\n=== ASCII Diagram ===")
fmt.Println(exporter.DrawASCII())
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/lightrag_advanced/main.go | examples/lightrag_advanced/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/engine"
"github.com/smallnest/langgraphgo/rag/store"
"github.com/tmc/langchaingo/llms/openai"
)
// OpenAILLMAdapter wraps langchaingo's openai.LLM to implement rag.LLMInterface
type OpenAILLMAdapter struct {
llm *openai.LLM
}
func NewOpenAILLMAdapter(baseLLM *openai.LLM) *OpenAILLMAdapter {
return &OpenAILLMAdapter{llm: baseLLM}
}
func (a *OpenAILLMAdapter) Generate(ctx context.Context, prompt string) (string, error) {
return a.llm.Call(ctx, prompt)
}
func (a *OpenAILLMAdapter) GenerateWithConfig(ctx context.Context, prompt string, config map[string]any) (string, error) {
return a.Generate(ctx, prompt)
}
func (a *OpenAILLMAdapter) GenerateWithSystem(ctx context.Context, system, prompt string) (string, error) {
fullPrompt := fmt.Sprintf("%s\n\n%s", system, prompt)
return a.Generate(ctx, fullPrompt)
}
// MockLLM implements rag.LLMInterface for demonstration without API keys
type MockLLM struct{}
func (m *MockLLM) Generate(ctx context.Context, prompt string) (string, error) {
return `{
"entities": [
{
"id": "entity_1",
"name": "AI",
"type": "CONCEPT",
"description": "Artificial Intelligence",
"properties": {"field": "technology"}
}
]
}`, nil
}
func (m *MockLLM) GenerateWithConfig(ctx context.Context, prompt string, config map[string]any) (string, error) {
return m.Generate(ctx, prompt)
}
func (m *MockLLM) GenerateWithSystem(ctx context.Context, system, prompt string) (string, error) {
return m.Generate(ctx, prompt)
}
func main() {
ctx := context.Background()
// Check if OpenAI API key is set, not empty, and looks valid
apiKey := os.Getenv("OPENAI_API_KEY")
useOpenAI := apiKey != "" && len(apiKey) > 10 // Basic validation
var llm rag.LLMInterface
if useOpenAI {
// Use real OpenAI LLM with explicit token
baseLLM, err := openai.New(
openai.WithToken(apiKey),
)
if err != nil {
log.Printf("Failed to create OpenAI LLM: %v", err)
log.Println("Falling back to Mock LLM")
llm = &MockLLM{}
} else {
llm = NewOpenAILLMAdapter(baseLLM)
fmt.Println("Using OpenAI LLM for entity extraction")
}
} else {
// API key not set or invalid
if apiKey != "" && len(apiKey) <= 10 {
fmt.Println("Warning: OPENAI_API_KEY appears to be invalid (too short)")
}
fmt.Println("Using Mock LLM for demonstration")
fmt.Println("Note: Set a valid OPENAI_API_KEY environment variable to use real OpenAI LLM")
fmt.Println()
llm = &MockLLM{}
}
// Create embedder
embedder := store.NewMockEmbedder(128)
// Create knowledge graph (in-memory)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
log.Fatalf("Failed to create knowledge graph: %v", err)
}
// Create vector store (in-memory)
vectorStore := store.NewInMemoryVectorStore(embedder)
fmt.Println("=== LightRAG Advanced Example ===")
fmt.Println("This example demonstrates advanced features of LightRAG including:")
fmt.Println("- Custom prompt templates")
fmt.Println("- Community detection")
fmt.Println("- Different fusion methods")
fmt.Println("- Performance comparison between modes")
fmt.Println()
// Configure LightRAG with advanced options
config := rag.LightRAGConfig{
Mode: "hybrid",
ChunkSize: 512,
ChunkOverlap: 50,
MaxEntitiesPerChunk: 20,
EntityExtractionThreshold: 0.5,
Temperature: 0.7,
// Local retrieval configuration
LocalConfig: rag.LocalRetrievalConfig{
TopK: 15,
MaxHops: 3,
IncludeDescriptions: true,
EntityWeight: 0.8,
},
// Global retrieval configuration
GlobalConfig: rag.GlobalRetrievalConfig{
MaxCommunities: 10,
IncludeHierarchy: true,
MaxHierarchyDepth: 5,
CommunityWeight: 0.7,
},
// Hybrid retrieval configuration
HybridConfig: rag.HybridRetrievalConfig{
LocalWeight: 0.6,
GlobalWeight: 0.4,
FusionMethod: "rrf",
RFFK: 60,
},
// Enable community detection
EnableCommunityDetection: true,
CommunityDetectionAlgorithm: "louvain",
// Custom prompt templates
PromptTemplates: map[string]string{
"entity_extraction": `Extract key entities from the following text.
Focus on: %s
Return JSON:
{
"entities": [
{
"id": "unique_id",
"name": "entity_name",
"type": "PERSON|ORGANIZATION|PRODUCT|CONCEPT|TECHNOLOGY",
"description": "brief description",
"properties": {"importance": "high|medium|low"}
}
]
}
Text: %s`,
"relationship_extraction": `Extract relationships between these entities: %s
From text: %s
Return JSON:
{
"relationships": [
{
"source": "entity1",
"target": "entity2",
"type": "RELATED_TO|PART_OF|USES|COMPETES_WITH",
"confidence": 0.9
}
]
}`,
},
}
// Create LightRAG engine
lightrag, err := engine.NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
log.Fatalf("Failed to create LightRAG engine: %v", err)
}
// Sample documents about AI and Machine Learning
documents := createSampleDocuments()
fmt.Println("Indexing documents...")
startTime := time.Now()
err = lightrag.AddDocuments(ctx, documents)
if err != nil {
log.Fatalf("Failed to add documents: %v", err)
}
indexDuration := time.Since(startTime)
fmt.Printf("Indexed %d documents in %v\n\n", len(documents), indexDuration)
// Demonstrate different retrieval modes
fmt.Println("=== Retrieval Mode Comparison ===\n")
testQuery := "How do transformer models work and what are their applications?"
// Test each mode
modes := []struct {
name string
mode string
}{
{"Naive", "naive"},
{"Local", "local"},
{"Global", "global"},
{"Hybrid", "hybrid"},
}
results := make(map[string]*rag.QueryResult)
for _, m := range modes {
fmt.Printf("--- %s Mode ---\n", m.name)
start := time.Now()
result, err := lightrag.QueryWithConfig(ctx, testQuery, &rag.RetrievalConfig{
K: 5,
ScoreThreshold: 0.3,
SearchType: m.mode,
IncludeScores: true,
})
duration := time.Since(start)
if err != nil {
log.Printf("Query failed: %v\n", err)
continue
}
results[m.name] = result
fmt.Printf("Response Time: %v\n", duration)
fmt.Printf("Sources Retrieved: %d\n", len(result.Sources))
fmt.Printf("Confidence: %.2f\n", result.Confidence)
// Show mode-specific metadata
switch m.mode {
case "local":
if queryEntities, ok := result.Metadata["query_entities"].(int); ok {
fmt.Printf("Query Entities: %d\n", queryEntities)
}
case "global":
if numCommunities, ok := result.Metadata["num_communities"].(int); ok {
fmt.Printf("Communities: %d\n", numCommunities)
}
case "hybrid":
if localConf, ok := result.Metadata["local_confidence"].(float64); ok {
if globalConf, ok := result.Metadata["global_confidence"].(float64); ok {
fmt.Printf("Local Confidence: %.2f\n", localConf)
fmt.Printf("Global Confidence: %.2f\n", globalConf)
}
}
}
// Show top source
if len(result.Sources) > 0 {
fmt.Printf("\nTop Source:\n%s\n", truncate(result.Sources[0].Content, 150))
}
fmt.Println()
}
// Demonstrate fusion methods comparison
fmt.Println("\n=== Fusion Method Comparison (Hybrid Mode) ===\n")
fusionMethods := []string{"rrf", "weighted"}
for _, method := range fusionMethods {
fmt.Printf("--- %s Fusion ---\n", strings.ToUpper(method))
// Update config for this fusion method
testConfig := config
testConfig.HybridConfig.FusionMethod = method
// Create new engine with this config
testEngine, err := engine.NewLightRAGEngine(testConfig, llm, embedder, kg, vectorStore)
if err != nil {
log.Printf("Failed to create engine: %v\n", err)
continue
}
// Re-add documents
_ = testEngine.AddDocuments(ctx, documents)
// Query
start := time.Now()
result, err := testEngine.Query(ctx, testQuery)
duration := time.Since(start)
if err != nil {
log.Printf("Query failed: %v\n", err)
continue
}
fmt.Printf("Response Time: %v\n", duration)
fmt.Printf("Sources: %d\n", len(result.Sources))
fmt.Printf("Confidence: %.2f\n", result.Confidence)
fmt.Println()
}
// Demonstrate knowledge graph traversal
fmt.Println("\n=== Knowledge Graph Traversal ===\n")
graphKg := lightrag.GetKnowledgeGraph()
// Query the knowledge graph
graphResult, err := graphKg.Query(ctx, &rag.GraphQuery{
EntityTypes: []string{"CONCEPT", "TECHNOLOGY"},
Limit: 5,
})
if err == nil {
fmt.Printf("Found %d entities in knowledge graph\n", len(graphResult.Entities))
for i, entity := range graphResult.Entities {
if i >= 3 {
break
}
fmt.Printf(" - %s (%s)\n", entity.Name, entity.Type)
}
if len(graphResult.Relationships) > 0 {
fmt.Printf("\nFound %d relationships\n", len(graphResult.Relationships))
for i, rel := range graphResult.Relationships {
if i >= 3 {
break
}
fmt.Printf(" - %s -> %s (%s)\n", rel.Source, rel.Target, rel.Type)
}
}
}
// Demonstrate document operations
fmt.Println("\n=== Document Operations ===\n")
// Add a new document
newDoc := rag.Document{
ID: "doc_new",
Content: `Diffusion models are a class of generative models that work by gradually
adding noise to data until it becomes random noise, then learning to reverse this process
to generate new data. They have shown impressive results in image generation, with models
like DALL-E 2, Stable Diffusion, and Midjourney using this approach.`,
Metadata: map[string]any{
"source": "diffusion_models.txt",
"topic": "Diffusion Models",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
fmt.Println("Adding new document...")
err = lightrag.AddDocuments(ctx, []rag.Document{newDoc})
if err != nil {
log.Printf("Failed to add document: %v\n", err)
} else {
fmt.Println("Document added successfully")
// Query about the new topic
result, err := lightrag.Query(ctx, "What are diffusion models?")
if err == nil {
fmt.Printf("Query about new topic returned %d sources\n", len(result.Sources))
}
}
// Update document
fmt.Println("\nUpdating document...")
newDoc.Content = `Diffusion models are a class of generative models that work by
gradually adding noise to data until it becomes random noise, then learning to reverse this
process to generate new data. Models like DALL-E 2, Stable Diffusion, and Midjourney use
this approach. They compete with GANs (Generative Adversarial Networks) in image generation.`
newDoc.UpdatedAt = time.Now()
err = lightrag.UpdateDocument(ctx, newDoc)
if err != nil {
log.Printf("Failed to update document: %v\n", err)
} else {
fmt.Println("Document updated successfully")
}
// Performance comparison
fmt.Println("\n=== Performance Comparison ===\n")
benchmarkQueries := []string{
"What is machine learning?",
"Explain neural networks",
"How do transformers work?",
"What are the applications of AI?",
}
fmt.Printf("Running %d queries for performance comparison...\n", len(benchmarkQueries))
for _, m := range modes {
var totalDuration time.Duration
successCount := 0
for _, query := range benchmarkQueries {
start := time.Now()
_, err := lightrag.QueryWithConfig(ctx, query, &rag.RetrievalConfig{
K: 5,
SearchType: m.mode,
})
duration := time.Since(start)
if err == nil {
totalDuration += duration
successCount++
}
}
if successCount > 0 {
avgDuration := totalDuration / time.Duration(successCount)
fmt.Printf("%s Mode: Avg %v per query (%d/%d successful)\n",
m.name, avgDuration, successCount, len(benchmarkQueries))
}
}
// Final metrics
fmt.Println("\n=== Final Metrics ===\n")
metrics := lightrag.GetMetrics()
fmt.Printf("Total Queries: %d\n", metrics.TotalQueries)
fmt.Printf("Total Documents: %d\n", metrics.TotalDocuments)
fmt.Printf("Average Latency: %v\n", metrics.AverageLatency)
fmt.Printf("Min Latency: %v\n", metrics.MinLatency)
fmt.Printf("Max Latency: %v\n", metrics.MaxLatency)
fmt.Printf("Indexing Latency: %v\n", metrics.IndexingLatency)
fmt.Println("\n=== Example Complete ===")
}
func createSampleDocuments() []rag.Document {
now := time.Now()
return []rag.Document{
{
ID: "doc1",
Content: `Transformer architecture revolutionized natural language processing by introducing
self-attention mechanisms. Unlike RNNs, transformers can process all tokens in parallel,
making them much faster to train. The transformer architecture consists of an encoder
and a decoder, each containing multiple layers of self-attention and feed-forward networks.
Key components include multi-head attention, positional encoding, and layer normalization.`,
Metadata: map[string]any{
"source": "transformers.txt",
"topic": "Transformers",
},
CreatedAt: now,
UpdatedAt: now,
},
{
ID: "doc2",
Content: `Neural networks are computing systems inspired by biological neural networks.
They consist of interconnected nodes (neurons) organized in layers. Deep learning uses
neural networks with many layers (deep neural networks) to learn hierarchical representations
of data. Common architectures include Convolutional Neural Networks (CNNs) for images,
Recurrent Neural Networks (RNNs) for sequences, and Transformers for text.`,
Metadata: map[string]any{
"source": "neural_networks.txt",
"topic": "Neural Networks",
},
CreatedAt: now,
UpdatedAt: now,
},
{
ID: "doc3",
Content: `Large Language Models (LLMs) like GPT-4, Claude, and LLaMA have demonstrated
remarkable capabilities in natural language understanding and generation. They are trained
on vast amounts of text data using transformer architectures. Key techniques include
pre-training on large corpora, fine-tuning for specific tasks, and reinforcement learning
from human feedback (RLHF). Applications range from chatbots to code generation.`,
Metadata: map[string]any{
"source": "llms.txt",
"topic": "LLMs",
},
CreatedAt: now,
UpdatedAt: now,
},
{
ID: "doc4",
Content: `Machine learning is a subset of artificial intelligence that enables systems
to learn from data without being explicitly programmed. Main paradigms include supervised
learning (learning from labeled data), unsupervised learning (finding patterns in unlabeled
data), and reinforcement learning (learning through trial and error). Common algorithms
include linear regression, decision trees, support vector machines, and neural networks.`,
Metadata: map[string]any{
"source": "machine_learning.txt",
"topic": "Machine Learning",
},
CreatedAt: now,
UpdatedAt: now,
},
{
ID: "doc5",
Content: `Attention mechanisms allow neural networks to focus on different parts of the
input when producing each part of the output. Self-attention, where each position in a
sequence attends to all other positions, is the key innovation behind transformers.
Multi-head attention allows the model to attend to different representation subspaces
simultaneously. This has become fundamental to modern NLP architectures.`,
Metadata: map[string]any{
"source": "attention.txt",
"topic": "Attention",
},
CreatedAt: now,
UpdatedAt: now,
},
{
ID: "doc6",
Content: `Retrieval-Augmented Generation (RAG) combines retrieval systems with language models
to improve factual accuracy and reduce hallucinations. In a RAG system, a query is first
used to retrieve relevant documents from a knowledge base, then both the query and retrieved
documents are provided to the language model for generation. LightRAG is an implementation
that uses knowledge graphs for enhanced retrieval.`,
Metadata: map[string]any{
"source": "rag.txt",
"topic": "RAG",
},
CreatedAt: now,
UpdatedAt: now,
},
{
ID: "doc7",
Content: `Fine-tuning adapts a pre-trained model to a specific task or domain. It involves
further training on a smaller, task-specific dataset. Techniques include full fine-tuning
(updating all parameters), partial fine-tuning (updating some layers), and parameter-efficient
methods like LoRA (Low-Rank Adaptation) and adapters. Fine-tuning can significantly improve
performance on specialized tasks compared to using a general pre-trained model.`,
Metadata: map[string]any{
"source": "fine_tuning.txt",
"topic": "Fine-tuning",
},
CreatedAt: now,
UpdatedAt: now,
},
{
ID: "doc8",
Content: `Embeddings are dense vector representations of text that capture semantic meaning.
Similar texts have similar embeddings in the vector space. Models like BERT, GPT, and
sentence-transformers can generate embeddings. They are used for semantic search, clustering,
classification, and as input to other models. The dimension of embeddings ranges from
hundreds to thousands of floats.`,
Metadata: map[string]any{
"source": "embeddings.txt",
"topic": "Embeddings",
},
CreatedAt: now,
UpdatedAt: now,
},
}
}
func truncate(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_falkordb_debug/main.go | examples/rag_falkordb_debug/main.go | package main
import (
"context"
"fmt"
"log"
"reflect"
"strings"
"github.com/redis/go-redis/v9"
"github.com/smallnest/langgraphgo/rag/store"
)
func main() {
ctx := context.Background()
// Test direct Redis connection first
fmt.Println("Testing direct Redis connection...")
client := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
})
// Ping Redis
pong, err := client.Ping(ctx).Result()
if err != nil {
log.Fatalf("Failed to connect to Redis: %v", err)
}
fmt.Printf("Redis PING response: %s\n", pong)
// Test FalkorDB module existence
fmt.Println("\nTesting FalkorDB module...")
// Try to check if GRAPH command exists
res, err := client.Do(ctx, "COMMAND", "INFO", "GRAPH.QUERY").Result()
if err != nil {
log.Printf("FalkorDB module might not be loaded: %v", err)
} else {
fmt.Printf("GRAPH command info: %v\n", res)
}
// Test simple FalkorDB query
fmt.Println("\nTesting simple FalkorDB query...")
res, err = client.Do(ctx, "GRAPH.QUERY", "test", "RETURN 1", "--compact").Result()
if err != nil {
log.Printf("Failed to execute FalkorDB query: %v", err)
return
}
fmt.Printf("FalkorDB response type: %T\n", res)
fmt.Printf("FalkorDB response value: %v\n", res)
// Check response structure
if r, ok := res.([]interface{}); ok {
fmt.Printf("Response is []interface{} with length: %d\n", len(r))
for i, v := range r {
fmt.Printf(" [%d] type: %T, value: %v\n", i, v, v)
if innerSlice, ok := v.([]interface{}); ok {
fmt.Printf(" Inner slice length: %d\n", len(innerSlice))
for j, innerV := range innerSlice {
fmt.Printf(" [%d] type: %T, value: %v\n", j, innerV, innerV)
}
}
}
} else {
fmt.Printf("Response is not []interface{}, it's %s\n", reflect.TypeOf(res).String())
}
// Test creating a graph node
fmt.Println("\nTesting node creation...")
res2, err := client.Do(ctx, "GRAPH.QUERY", "test", "CREATE (n:Person {name: 'Test'})", "--compact").Result()
if err != nil {
log.Printf("Failed to create node: %v", err)
} else {
fmt.Printf("Node creation response: %v\n", res2)
}
// Test querying the created node
fmt.Println("\nTesting node query...")
res3, err := client.Do(ctx, "GRAPH.QUERY", "test", "MATCH (n:Person) RETURN n", "--compact").Result()
if err != nil {
log.Printf("Failed to query nodes: %v", err)
} else {
fmt.Printf("Node query response: %v\n", res3)
if r, ok := res3.([]interface{}); ok {
fmt.Printf("Query response length: %d\n", len(r))
for i, v := range r {
fmt.Printf(" [%d] type: %T\n", i, v)
}
}
}
// Test using the FalkorDB Graph wrapper
fmt.Println("\nTesting FalkorDB Graph wrapper...")
g := store.NewGraph("test", client)
queryResult, err := g.Query(ctx, "MATCH (n:Person) RETURN n")
if err != nil {
log.Printf("Failed to query with wrapper: %v", err)
} else {
fmt.Printf("Wrapper query result:\n")
fmt.Printf(" Header: %v\n", queryResult.Header)
fmt.Printf(" Results count: %d\n", len(queryResult.Results))
fmt.Printf(" Statistics: %v\n", queryResult.Statistics)
}
// Test MERGE operation (what AddEntity uses)
fmt.Println("\nTesting MERGE operation (like AddEntity)...")
mergeResult, err := g.Query(ctx, "MERGE (n:Company {id: 'apple', name: 'Apple'}) RETURN n")
if err != nil {
log.Printf("Failed to execute MERGE: %v", err)
} else {
fmt.Printf("MERGE response:\n")
fmt.Printf(" Header: %v\n", mergeResult.Header)
fmt.Printf(" Results count: %d\n", len(mergeResult.Results))
fmt.Printf(" Statistics: %v\n", mergeResult.Statistics)
// Try to parse the result like the AddEntity does
if len(mergeResult.Results) > 0 {
fmt.Printf(" First result: %v\n", mergeResult.Results[0])
}
}
// Test entity creation with the same format as the example
fmt.Println("\nTesting entity creation like in the example...")
// Test propsToString function
testProps := map[string]interface{}{
"name": "Apple Inc.",
"type": "ORGANIZATION",
"description": "Technology company",
}
propsStr := propsToString(testProps)
fmt.Printf("Props string: %s\n", propsStr)
// Test the actual query format
testQuery := fmt.Sprintf("MERGE (n:%s {id: '%s'}) SET n += %s", "ORGANIZATION", "apple", propsStr)
fmt.Printf("Test query: %s\n", testQuery)
// Test direct Redis call to see the raw response
fmt.Println("Testing direct Redis call for the problematic query...")
rawResponse, err := client.Do(ctx, "GRAPH.QUERY", "test", testQuery, "--compact").Result()
if err != nil {
log.Printf("Failed raw query: %v", err)
} else {
fmt.Printf("Raw response type: %T\n", rawResponse)
fmt.Printf("Raw response: %v\n", rawResponse)
if r, ok := rawResponse.([]interface{}); ok {
fmt.Printf("Raw response length: %d\n", len(r))
for i, v := range r {
fmt.Printf(" [%d] type: %T, value: %v\n", i, v, v)
}
}
}
entityResult, err := g.Query(ctx, testQuery)
if err != nil {
log.Printf("Failed to create entity like example: %v", err)
} else {
fmt.Printf("Entity creation response:\n")
fmt.Printf(" Header: %v\n", entityResult.Header)
fmt.Printf(" Results count: %d\n", len(entityResult.Results))
fmt.Printf(" Statistics: %v\n", entityResult.Statistics)
if len(entityResult.Results) > 0 {
fmt.Printf(" First result type: %T\n", entityResult.Results[0])
}
}
// Clean up
client.Close()
}
// Helper function to test propsToString
func propsToString(m map[string]interface{}) string {
parts := []string{}
for k, v := range m {
var val interface{}
switch v := v.(type) {
case []float32:
// Convert to Cypher list: [v1, v2, ...]
s := make([]string, len(v))
for i, f := range v {
s[i] = fmt.Sprintf("%f", f)
}
val = "[" + strings.Join(s, ",") + "]"
default:
val = quoteString(v)
}
parts = append(parts, fmt.Sprintf("%s: %v", k, val))
}
return "{" + strings.Join(parts, ", ") + "}"
}
func quoteString(i interface{}) interface{} {
switch x := i.(type) {
case string:
if len(x) == 0 {
return "\"\""
}
if x[0] != '"' {
x = "\"" + x
}
if x[len(x)-1] != '"' {
x += "\""
}
return x
default:
return i
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/smart_messages/main.go | examples/smart_messages/main.go | package main
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
g := graph.NewStateGraph[map[string]any]()
g.AddNode("user_input", "user_input", func(ctx context.Context, state map[string]any) (map[string]any, error) {
// In a real app, this would get input from UI
// Here we simulate it from initial state or hardcode
return map[string]any{"user_query": "Hello"}, nil
})
g.AddNode("ai_response", "ai_response", func(ctx context.Context, state map[string]any) (map[string]any, error) {
query, _ := state["user_query"].(string)
// Simulate smart message generation
return map[string]any{"response": fmt.Sprintf("Echo: %s", query)}, nil
})
// Hypothetical "Smart Messages" logic where we might update previous messages in UI
// This usually involves state management where messages have IDs
g.AddNode("ai_update", "ai_update", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"response": "Updated: Echo Hello"}, nil
})
g.SetEntryPoint("user_input")
g.AddEdge("user_input", "ai_response")
g.AddEdge("ai_response", "ai_update")
g.AddEdge("ai_update", graph.END)
runnable, _ := g.Compile()
res, _ := runnable.Invoke(context.Background(), map[string]any{})
fmt.Printf("Final: %v\n", res)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/ptc_expense_analysis/expense_tools.go | examples/ptc_expense_analysis/expense_tools.go | package main
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"time"
)
// ExpenseItem represents a single expense line item
type ExpenseItem struct {
ID string `json:"id"`
EmployeeID string `json:"employee_id"`
Category string `json:"category"`
Amount float64 `json:"amount"`
Date time.Time `json:"date"`
Description string `json:"description"`
Status string `json:"status"` // approved, pending, rejected
ReceiptURL string `json:"receipt_url"`
ApprovedBy string `json:"approved_by"`
}
// TeamMember represents an employee
type TeamMember struct {
ID string `json:"id"`
Name string `json:"name"`
Department string `json:"department"`
Email string `json:"email"`
}
// GetTeamMembersTool returns team members for a department
type GetTeamMembersTool struct{}
func (t GetTeamMembersTool) Name() string {
return "get_team_members"
}
func (t GetTeamMembersTool) Description() string {
return "Returns a list of team members for a given department. Input should be the department name (e.g., 'engineering', 'sales', 'marketing')."
}
func (t GetTeamMembersTool) Call(ctx context.Context, input string) (string, error) {
var inputData map[string]any
if err := json.Unmarshal([]byte(input), &inputData); err != nil {
// Try direct string input
inputData = map[string]any{"department": input}
}
department, ok := inputData["department"].(string)
if !ok {
return "", fmt.Errorf("department is required")
}
// Mock data
members := []TeamMember{}
switch department {
case "engineering":
members = []TeamMember{
{ID: "E001", Name: "Alice Johnson", Department: "engineering", Email: "alice@example.com"},
{ID: "E002", Name: "Bob Smith", Department: "engineering", Email: "bob@example.com"},
{ID: "E003", Name: "Charlie Davis", Department: "engineering", Email: "charlie@example.com"},
{ID: "E004", Name: "Diana Lee", Department: "engineering", Email: "diana@example.com"},
}
case "sales":
members = []TeamMember{
{ID: "S001", Name: "Eve Wilson", Department: "sales", Email: "eve@example.com"},
{ID: "S002", Name: "Frank Brown", Department: "sales", Email: "frank@example.com"},
}
case "marketing":
members = []TeamMember{
{ID: "M001", Name: "Grace Taylor", Department: "marketing", Email: "grace@example.com"},
}
default:
return "", fmt.Errorf("unknown department: %s", department)
}
result, _ := json.MarshalIndent(members, "", " ")
return string(result), nil
}
// GetExpensesTool returns expenses for an employee in a quarter
type GetExpensesTool struct{}
func (t GetExpensesTool) Name() string {
return "get_expenses"
}
func (t GetExpensesTool) Description() string {
return "Returns all expense line items for a given employee in a specific quarter. Input should be JSON with 'employee_id' and 'quarter' (e.g., 'Q1', 'Q2', 'Q3', 'Q4')."
}
func (t GetExpensesTool) Call(ctx context.Context, input string) (string, error) {
var inputData map[string]any
if err := json.Unmarshal([]byte(input), &inputData); err != nil {
return "", fmt.Errorf("invalid input: %v", err)
}
employeeID, ok := inputData["employee_id"].(string)
if !ok {
return "", fmt.Errorf("employee_id is required")
}
quarter, ok := inputData["quarter"].(string)
if !ok {
return "", fmt.Errorf("quarter is required")
}
// Generate mock expenses
expenses := generateMockExpenses(employeeID, quarter)
result, _ := json.MarshalIndent(expenses, "", " ")
return string(result), nil
}
// GetCustomBudgetTool returns custom budget for an employee
type GetCustomBudgetTool struct{}
func (t GetCustomBudgetTool) Name() string {
return "get_custom_budget"
}
func (t GetCustomBudgetTool) Description() string {
return "Get the custom quarterly travel budget for a specific employee. Input should be JSON with 'user_id'. Returns the custom budget amount if one exists, otherwise returns null."
}
func (t GetCustomBudgetTool) Call(ctx context.Context, input string) (string, error) {
var inputData map[string]any
if err := json.Unmarshal([]byte(input), &inputData); err != nil {
return "", fmt.Errorf("invalid input: %v", err)
}
userID, ok := inputData["user_id"].(string)
if !ok {
return "", fmt.Errorf("user_id is required")
}
// Mock custom budgets (some employees have higher limits)
customBudgets := map[string]float64{
"E001": 7500.0, // Alice has higher budget
"E003": 10000.0, // Charlie has higher budget
}
if budget, exists := customBudgets[userID]; exists {
result := map[string]any{
"user_id": userID,
"budget": budget,
}
resultJSON, _ := json.MarshalIndent(result, "", " ")
return string(resultJSON), nil
}
return `{"user_id": "` + userID + `", "budget": null}`, nil
}
// generateMockExpenses generates mock expense data
func generateMockExpenses(employeeID, quarter string) []ExpenseItem {
rand.Seed(time.Now().UnixNano() + int64(len(employeeID)))
categories := []string{"travel", "meals", "accommodation", "transportation", "office supplies"}
statuses := []string{"approved", "approved", "approved", "pending"}
numExpenses := 15 + rand.Intn(20) // 15-35 expenses
expenses := make([]ExpenseItem, numExpenses)
totalAmount := 0.0
// Some employees spend more
multiplier := 1.0
if employeeID == "E002" || employeeID == "E004" {
multiplier = 1.5 // Bob and Diana spend more
}
for i := 0; i < numExpenses; i++ {
category := categories[rand.Intn(len(categories))]
amount := (50.0 + rand.Float64()*500.0) * multiplier
// Travel expenses are larger
if category == "travel" || category == "accommodation" {
amount *= 2
}
expenses[i] = ExpenseItem{
ID: fmt.Sprintf("EXP-%s-%s-%03d", employeeID, quarter, i+1),
EmployeeID: employeeID,
Category: category,
Amount: amount,
Date: time.Now().AddDate(0, -rand.Intn(3), -rand.Intn(30)),
Description: fmt.Sprintf("%s expense for %s", category, employeeID),
Status: statuses[rand.Intn(len(statuses))],
ReceiptURL: fmt.Sprintf("https://receipts.example.com/%s-%d", employeeID, i+1),
ApprovedBy: "manager@example.com",
}
totalAmount += amount
}
return expenses
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/ptc_expense_analysis/main.go | examples/ptc_expense_analysis/main.go | package main
import (
"context"
"fmt"
"log"
"time"
"github.com/smallnest/langgraphgo/ptc"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/tools"
)
func main() {
fmt.Println("=== PTC (Programmatic Tool Calling) Example ===")
fmt.Println("This example demonstrates how PTC reduces latency and token usage")
fmt.Println("by allowing the LLM to write code that calls tools programmatically.")
// Initialize OpenAI model
model, err := openai.New()
if err != nil {
log.Fatalf("Failed to create OpenAI client: %v", err)
}
// Create expense tools
tools := []tools.Tool{
GetTeamMembersTool{},
GetExpensesTool{},
GetCustomBudgetTool{},
}
// Create PTC agent
fmt.Println("Creating PTC Agent...")
agent, err := ptc.CreatePTCAgent(ptc.PTCAgentConfig{
Model: model,
Tools: tools,
Language: ptc.LanguagePython,
SystemPrompt: `You are a helpful financial analysis assistant.
You can write Python code to analyze expense data efficiently.`,
MaxIterations: 10,
})
if err != nil {
log.Fatalf("Failed to create PTC agent: %v", err)
}
// Run example queries
queries := []string{
`Which engineering team members exceeded their Q3 travel budget?
The standard quarterly travel budget is $5,000.
However, some employees have custom budget limits.
For anyone who exceeded the $5,000 standard budget, check if they have a custom budget exception.
If they do, use that custom limit instead to determine if they truly exceeded their budget.
Only count approved expenses.`,
}
for i, query := range queries {
fmt.Printf("\n=== Query %d ===\n", i+1)
fmt.Printf("Question: %s\n\n", query)
startTime := time.Now()
// Create initial state
initialState := map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{
llms.TextPart(query),
},
},
},
}
// Invoke the agent
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Printf("Error running agent: %v", err)
continue
}
elapsed := time.Since(startTime)
// Extract final answer
messages := result["messages"].([]llms.MessageContent)
fmt.Println("\n--- Conversation Flow ---")
for idx, msg := range messages {
role := "Unknown"
switch msg.Role {
case llms.ChatMessageTypeHuman:
role = "Human"
case llms.ChatMessageTypeAI:
role = "AI"
case llms.ChatMessageTypeTool:
role = "Tool Result"
case llms.ChatMessageTypeSystem:
role = "System"
}
fmt.Printf("\n[%d] %s:\n", idx+1, role)
for _, part := range msg.Parts {
if textPart, ok := part.(llms.TextContent); ok {
text := textPart.Text
if len(text) > 500 {
fmt.Printf("%s... (truncated)\n", text[:500])
} else {
fmt.Println(text)
}
}
}
}
fmt.Printf("\n--- Execution Stats ---")
fmt.Printf("Total time: %v\n", elapsed)
fmt.Printf("Messages exchanged: %d\n", len(messages))
// Get last AI message as final answer
for i := len(messages) - 1; i >= 0; i-- {
if messages[i].Role == llms.ChatMessageTypeAI {
fmt.Printf("\n--- Final Answer ---")
for _, part := range messages[i].Parts {
if textPart, ok := part.(llms.TextContent); ok {
fmt.Println(textPart.Text)
}
}
break
}
}
}
fmt.Println("\n=== Comparison: PTC vs Traditional Tool Calling ===")
fmt.Println("PTC Advantages:")
fmt.Println("1. Reduced Latency: Eliminates multiple API round-trips for sequential tool calls")
fmt.Println("2. Token Efficiency: Code can filter large datasets before sending results back")
fmt.Println("3. Programmatic Control: Write code to process data with loops, conditionals, etc.")
fmt.Println("\nTraditional Tool Calling Issues:")
fmt.Println("1. Each tool call requires a complete API round-trip")
fmt.Println("2. Large tool results consume significant tokens")
fmt.Println("3. Sequential dependencies require multiple API calls")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/tool_brave/main.go | examples/tool_brave/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/smallnest/langgraphgo/tool"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/tools"
)
func main() {
// Check for API keys
if os.Getenv("BRAVE_API_KEY") == "" {
log.Fatal("Please set BRAVE_API_KEY environment variable")
}
// We also need an LLM API key (e.g., OPENAI_API_KEY or DEEPSEEK_API_KEY)
if os.Getenv("OPENAI_API_KEY") == "" && os.Getenv("DEEPSEEK_API_KEY") == "" {
log.Fatal("Please set OPENAI_API_KEY or DEEPSEEK_API_KEY environment variable")
}
ctx := context.Background()
// 1. Initialize the LLM
llm, err := openai.New()
if err != nil {
log.Fatalf("Failed to create LLM: %v", err)
}
// 2. Initialize the Tool
braveTool, err := tool.NewBraveSearch("",
tool.WithBraveCount(5),
tool.WithBraveCountry("US"),
tool.WithBraveLang("en"),
)
if err != nil {
log.Fatal(err)
}
// 3. Create the ReAct Agent using map state convenience function
agent, err := prebuilt.CreateAgentMap(llm, []tools.Tool{braveTool}, 20)
if err != nil {
log.Fatalf("Failed to create agent: %v", err)
}
// 4. Run the Agent
query := "What are the latest developments in AI technology in 2025?"
fmt.Printf("User: %s\n\n", query)
fmt.Println("Agent is thinking and searching...")
inputs := map[string]any{
"messages": []llms.MessageContent{
llms.TextParts(llms.ChatMessageTypeHuman, query),
},
}
response, err := agent.Invoke(ctx, inputs)
if err != nil {
log.Fatalf("Agent failed: %v", err)
}
// 5. Print the Result
messages, ok := response["messages"].([]llms.MessageContent)
if ok {
// The last message should be the AI's final answer
lastMsg := messages[len(messages)-1]
for _, part := range lastMsg.Parts {
if text, ok := part.(llms.TextContent); ok {
fmt.Printf("\nAgent: %s\n", text.Text)
}
}
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/parallel_execution/main.go | examples/parallel_execution/main.go | package main
import (
"context"
"fmt"
"time"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a new state graph with typed state
g := graph.NewStateGraph[map[string]any]()
// Define Schema
// Using map schema where "results" accumulates values
schema := graph.NewMapSchema()
schema.RegisterReducer("results", graph.AppendReducer)
g.SetSchema(schema)
// Define Nodes
g.AddNode("start", "start", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("Starting execution...")
return map[string]any{}, nil
})
g.AddNode("branch_a", "branch_a", func(ctx context.Context, state map[string]any) (map[string]any, error) {
time.Sleep(100 * time.Millisecond)
fmt.Println("Branch A executed")
return map[string]any{"results": "A"}, nil
})
g.AddNode("branch_b", "branch_b", func(ctx context.Context, state map[string]any) (map[string]any, error) {
time.Sleep(200 * time.Millisecond)
fmt.Println("Branch B executed")
return map[string]any{"results": "B"}, nil
})
g.AddNode("branch_c", "branch_c", func(ctx context.Context, state map[string]any) (map[string]any, error) {
time.Sleep(150 * time.Millisecond)
fmt.Println("Branch C executed")
return map[string]any{"results": "C"}, nil
})
g.AddNode("aggregator", "aggregator", func(ctx context.Context, state map[string]any) (map[string]any, error) {
results := state["results"]
fmt.Printf("Aggregated results: %v\n", results)
return map[string]any{"final": "done"}, nil
})
// Define Graph Structure
g.SetEntryPoint("start")
// Fan-out from start to branches
g.AddEdge("start", "branch_a")
g.AddEdge("start", "branch_b")
g.AddEdge("start", "branch_c")
// Fan-in from branches to aggregator
g.AddEdge("branch_a", "aggregator")
g.AddEdge("branch_b", "aggregator")
g.AddEdge("branch_c", "aggregator")
g.AddEdge("aggregator", graph.END)
// Compile
runnable, err := g.Compile()
if err != nil {
panic(err)
}
// Execute
initialState := map[string]any{
"results": []string{},
}
res, err := runnable.Invoke(context.Background(), initialState)
if err != nil {
panic(err)
}
fmt.Printf("Final state: %v\n", res)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/durable_execution/main.go | examples/durable_execution/main.go | package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"sort"
"time"
"github.com/smallnest/langgraphgo/graph"
"github.com/smallnest/langgraphgo/store"
)
// --- Simple File-based Checkpoint Store for Demo ---
type DiskStore struct {
FilePath string
}
func NewDiskStore(path string) *DiskStore {
return &DiskStore{FilePath: path}
}
func (s *DiskStore) loadAll() map[string]*graph.Checkpoint {
data, err := os.ReadFile(s.FilePath)
if err != nil {
return make(map[string]*graph.Checkpoint)
}
var checkpoints map[string]*graph.Checkpoint
if err := json.Unmarshal(data, &checkpoints); err != nil {
return make(map[string]*graph.Checkpoint)
}
return checkpoints
}
func (s *DiskStore) saveAll(cps map[string]*graph.Checkpoint) error {
data, err := json.MarshalIndent(cps, "", " ")
if err != nil {
return err
}
return os.WriteFile(s.FilePath, data, 0644)
}
func (s *DiskStore) Save(ctx context.Context, cp *graph.Checkpoint) error {
cps := s.loadAll()
cps[cp.ID] = cp
return s.saveAll(cps)
}
func (s *DiskStore) Load(ctx context.Context, id string) (*graph.Checkpoint, error) {
cps := s.loadAll()
if cp, ok := cps[id]; ok {
return cp, nil
}
return nil, fmt.Errorf("checkpoint not found")
}
func (s *DiskStore) List(ctx context.Context, threadID string) ([]*graph.Checkpoint, error) {
cps := s.loadAll()
var result []*graph.Checkpoint
for _, cp := range cps {
// Check metadata for thread_id
if tid, ok := cp.Metadata["thread_id"].(string); ok && tid == threadID {
result = append(result, cp)
}
}
// Sort by timestamp
sort.Slice(result, func(i, j int) bool {
return result[i].Timestamp.Before(result[j].Timestamp)
})
return result, nil
}
func (s *DiskStore) Delete(ctx context.Context, id string) error {
cps := s.loadAll()
delete(cps, id)
return s.saveAll(cps)
}
func (s *DiskStore) Clear(ctx context.Context, threadID string) error {
cps := s.loadAll()
for id, cp := range cps {
if tid, ok := cp.Metadata["thread_id"].(string); ok && tid == threadID {
delete(cps, id)
}
}
return s.saveAll(cps)
}
// ListByThread returns all checkpoints for a specific thread_id
func (s *DiskStore) ListByThread(ctx context.Context, threadID string) ([]*store.Checkpoint, error) {
cps := s.loadAll()
var result []*store.Checkpoint
for _, cp := range cps {
// Check metadata for thread_id
if tid, ok := cp.Metadata["thread_id"].(string); ok && tid == threadID {
// Convert graph.Checkpoint to store.Checkpoint
result = append(result, &store.Checkpoint{
ID: cp.ID,
NodeName: cp.NodeName,
State: cp.State,
Metadata: cp.Metadata,
Timestamp: cp.Timestamp,
Version: cp.Version,
})
}
}
// Sort by version ascending
sort.Slice(result, func(i, j int) bool {
return result[i].Version < result[j].Version
})
return result, nil
}
// GetLatestByThread returns the latest checkpoint for a thread_id
func (s *DiskStore) GetLatestByThread(ctx context.Context, threadID string) (*store.Checkpoint, error) {
checkpoints, err := s.ListByThread(ctx, threadID)
if err != nil {
return nil, err
}
if len(checkpoints) == 0 {
return nil, fmt.Errorf("no checkpoints found for thread: %s", threadID)
}
// Return the last one (highest version due to sorting)
return checkpoints[len(checkpoints)-1], nil
}
// --- Main Logic ---
func main() {
storeFile := "checkpoints.json"
store := NewDiskStore(storeFile)
threadID := "durable-job-1"
// 1. Define Graph
g := graph.NewCheckpointableStateGraph[map[string]any]()
// Use MapSchema for state
schema := graph.NewMapSchema()
schema.RegisterReducer("steps", graph.AppendReducer)
g.SetSchema(schema)
// Configure Checkpointing
g.SetCheckpointConfig(graph.CheckpointConfig{
Store: store,
AutoSave: true,
})
// Step 1
g.AddNode("step_1", "step_1", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("Executing Step 1...")
time.Sleep(500 * time.Millisecond)
return map[string]any{"steps": []string{"Step 1 Completed"}}, nil
})
// Step 2 (Simulate Crash)
g.AddNode("step_2", "step_2", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("Executing Step 2...")
time.Sleep(500 * time.Millisecond)
// Check if we should crash
if os.Getenv("CRASH") == "true" {
fmt.Println("!!! CRASHING AT STEP 2 !!!")
fmt.Println("(Run again without CRASH=true to recover)")
os.Exit(1)
}
return map[string]any{"steps": []string{"Step 2 Completed"}}, nil
})
// Step 3
g.AddNode("step_3", "step_3", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("Executing Step 3...")
time.Sleep(500 * time.Millisecond)
return map[string]any{"steps": []string{"Step 3 Completed"}}, nil
})
g.SetEntryPoint("step_1")
g.AddEdge("step_1", "step_2")
g.AddEdge("step_2", "step_3")
g.AddEdge("step_3", graph.END)
runnable, err := g.CompileCheckpointable()
if err != nil {
log.Fatal(err)
}
// 2. Check for existing checkpoints to resume
ctx := context.Background()
checkpoints, _ := store.List(ctx, threadID)
var config *graph.Config
if len(checkpoints) > 0 {
latest := checkpoints[len(checkpoints)-1]
fmt.Printf("Found existing checkpoint: %s (Node: %s)\n", latest.ID, latest.NodeName)
fmt.Println("Resuming execution...")
var nextNode string
if latest.NodeName == "step_1" {
nextNode = "step_2"
} else if latest.NodeName == "step_2" {
nextNode = "step_3"
} else {
// Finished or unknown
fmt.Println("Job already finished or unknown state.")
return
}
config = &graph.Config{
Configurable: map[string]any{
"thread_id": threadID,
"checkpoint_id": latest.ID,
},
ResumeFrom: []string{nextNode},
}
fmt.Printf("Continuing from %s...\n", nextNode)
// We need to cast state to map[string]any
stateMap, ok := latest.State.(map[string]any)
if !ok {
// handle parsing if loaded as generic interface{} (unmarshalled from JSON)
// JSON unmarshal to interface{} makes maps map[string]interface{}
// So simple cast might work, or we need more robust handling.
// For now, let's assume it works or we re-marshal.
// Actually, NewDiskStore uses json.Unmarshal into *graph.Checkpoint.
// graph.Checkpoint.State is `any`.
// If we save map[string]any, JSON marshals it.
// Unmarshal will give map[string]interface{}.
// So the cast should be fine.
stateMap = latest.State.(map[string]any)
}
res, err := runnable.InvokeWithConfig(ctx, stateMap, config)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Final Result: %v\n", res)
} else {
fmt.Println("Starting new execution...")
config = &graph.Config{
Configurable: map[string]any{
"thread_id": threadID,
},
}
initialState := map[string]any{"steps": []string{"Start"}}
res, err := runnable.InvokeWithConfig(ctx, initialState, config)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Final Result: %v\n", res)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_basic/main.go | examples/rag_basic/main.go | package main
import (
"context"
"fmt"
"log"
"strings"
"github.com/smallnest/langgraphgo/graph"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/retriever"
"github.com/smallnest/langgraphgo/rag/store"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
ctx := context.Background()
// Initialize LLM
llm, err := openai.New()
if err != nil {
log.Fatalf("Failed to create LLM: %v", err)
}
// Create sample documents
documents := []rag.Document{
{
Content: "LangGraph is a library for building stateful, multi-actor applications with LLMs. " +
"It extends LangChain Expression Language with the ability to coordinate multiple chains " +
"(or actors) across multiple steps of computation in a cyclic manner.",
Metadata: map[string]any{
"source": "langgraph_intro.txt",
"topic": "LangGraph",
},
},
{
Content: "RAG (Retrieval-Augmented Generation) is a technique that combines information retrieval " +
"with text generation. It retrieves relevant documents from a knowledge base and uses them " +
"to augment the context provided to a language model for generation.",
Metadata: map[string]any{
"source": "rag_overview.txt",
"topic": "RAG",
},
},
{
Content: "Vector databases store embeddings, which are numerical representations of text. " +
"They enable efficient similarity search by comparing vector distances. " +
"Popular vector databases include Pinecone, Weaviate, and Chroma.",
Metadata: map[string]any{
"source": "vector_db.txt",
"topic": "Vector Databases",
},
},
{
Content: "Text embeddings are dense vector representations of text that capture semantic meaning. " +
"Models like OpenAI's text-embedding-ada-002 or sentence transformers can generate these embeddings. " +
"Similar texts have similar embeddings in the vector space.",
Metadata: map[string]any{
"source": "embeddings.txt",
"topic": "Embeddings",
},
},
}
// Create embedder and vector store
embedder := store.NewMockEmbedder(128)
vectorStore := store.NewInMemoryVectorStore(embedder)
// Generate embeddings and add documents to vector store
texts := make([]string, len(documents))
for i, doc := range documents {
texts[i] = doc.Content
}
embeddings, err := embedder.EmbedDocuments(ctx, texts)
if err != nil {
log.Fatalf("Failed to generate embeddings: %v", err)
}
err = vectorStore.AddBatch(ctx, documents, embeddings)
if err != nil {
log.Fatalf("Failed to add documents to vector store: %v", err)
}
// Create retriever
retriever := retriever.NewVectorStoreRetriever(vectorStore, embedder, 3)
// Configure RAG pipeline
config := rag.DefaultPipelineConfig()
config.Retriever = retriever
config.LLM = llm
config.TopK = 3
config.SystemPrompt = "You are a helpful AI assistant. Answer the question based on the provided context. " +
"If the context doesn't contain enough information to answer the question, say so."
// Build basic RAG pipeline
pipeline := rag.NewRAGPipeline(config)
err = pipeline.BuildBasicRAG()
if err != nil {
log.Fatalf("Failed to build RAG pipeline: %v", err)
}
// Compile the pipeline
runnable, err := pipeline.Compile()
if err != nil {
log.Fatalf("Failed to compile pipeline: %v", err)
}
// Visualize the pipeline
exporter := graph.GetGraphForRunnable(runnable)
fmt.Println("=== RAG Pipeline Visualization (Mermaid) ===")
fmt.Println(exporter.DrawMermaid())
fmt.Println()
// Test queries
queries := []string{
"What is LangGraph?",
"How does RAG work?",
"What are vector databases used for?",
}
for i, query := range queries {
fmt.Printf("=== Query %d ===\n", i+1)
fmt.Printf("Question: %s\n\n", query)
result, err := runnable.Invoke(ctx, map[string]any{
"query": query,
})
if err != nil {
log.Printf("Failed to process query: %v", err)
continue
}
finalState := result
// In map[string]any state, we need to extract documents
if docs, ok := finalState["documents"].([]rag.RAGDocument); ok {
fmt.Println("Retrieved Documents:")
for j, doc := range docs {
source := "Unknown"
if s, ok := doc.Metadata["source"]; ok {
source = fmt.Sprintf("%v", s)
}
fmt.Printf(" [%d] %s\n", j+1, source)
fmt.Printf(" %s...\n", truncate(doc.Content, 100))
}
}
if answer, ok := finalState["answer"].(string); ok {
fmt.Printf("\nAnswer: %s\n", answer)
}
fmt.Println("\n" + strings.Repeat("-", 80) + "\n")
}
}
func truncate(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/basic_example/main.go | examples/basic_example/main.go | package main
import (
"context"
"fmt"
"time"
"github.com/smallnest/langgraphgo/graph"
)
// Simple example demonstrating all major features
func main() {
fmt.Println("๐ LangGraphGo Basic Example")
fmt.Println("============================")
runBasicExample()
runStreamingExample()
runCheckpointingExample()
runVisualizationExample()
}
func runBasicExample() {
fmt.Println("\n1๏ธโฃ Basic Graph Execution")
g := graph.NewStateGraph[string]()
g.AddNode("process", "process", func(ctx context.Context, input string) (string, error) {
return fmt.Sprintf("processed_%s", input), nil
})
g.AddEdge("process", graph.END)
g.SetEntryPoint("process")
runnable, _ := g.Compile()
result, _ := runnable.Invoke(context.Background(), "input")
fmt.Printf(" Result: %s\n", result)
}
func runStreamingExample() {
fmt.Println("\n2๏ธโฃ Streaming with Listeners")
g := graph.NewListenableStateGraph[map[string]any]()
node := g.AddNode("stream_process", "stream_process", func(ctx context.Context, state map[string]any) (map[string]any, error) {
time.Sleep(100 * time.Millisecond) // Simulate work
state["result"] = fmt.Sprintf("streamed_%v", state["input"])
return state, nil
})
g.AddEdge("stream_process", graph.END)
g.SetEntryPoint("stream_process")
// Add progress listener
progressListener := graph.NewProgressListener().WithTiming(false)
progressListener.SetNodeStep("stream_process", "๐ Processing with streaming")
node.AddListener(progressListener)
runnable, _ := g.CompileListenable()
events := runnable.Stream(context.Background(), map[string]any{"input": "stream_input"})
// Process events
for event := range events {
if event.Event == graph.NodeEventComplete {
fmt.Printf(" Event Complete: %s\n", event.NodeName)
}
}
}
func runCheckpointingExample() {
fmt.Println("\n3๏ธโฃ Checkpointing Example")
g := graph.NewCheckpointableStateGraph[map[string]any]()
g.AddNode("checkpoint_step1", "checkpoint_step1", func(ctx context.Context, data map[string]any) (map[string]any, error) {
data["step1"] = "completed"
return data, nil
})
g.AddNode("checkpoint_step2", "checkpoint_step2", func(ctx context.Context, data map[string]any) (map[string]any, error) {
data["step2"] = "completed"
return data, nil
})
g.AddEdge("checkpoint_step1", "checkpoint_step2")
g.AddEdge("checkpoint_step2", graph.END)
g.SetEntryPoint("checkpoint_step1")
// Configure checkpointing
config := graph.CheckpointConfig{
Store: graph.NewMemoryCheckpointStore(),
AutoSave: true,
MaxCheckpoints: 5,
}
g.SetCheckpointConfig(config)
runnable, _ := g.CompileCheckpointable()
initialState := map[string]any{
"input": "checkpoint_test",
}
result, _ := runnable.Invoke(context.Background(), initialState)
// Wait for async checkpoints
time.Sleep(100 * time.Millisecond)
checkpoints, _ := runnable.ListCheckpoints(context.Background())
fmt.Printf(" Final State: %v\n", result)
fmt.Printf(" Created %d checkpoints\n", len(checkpoints))
}
func runVisualizationExample() {
fmt.Println("\n4๏ธโฃ Graph Visualization")
g := graph.NewStateGraph[map[string]any]()
g.AddNode("visualize_step1", "visualize_step1", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return state, nil
})
g.AddNode("visualize_step2", "visualize_step2", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return state, nil
})
g.AddEdge("visualize_step1", "visualize_step2")
g.AddEdge("visualize_step2", graph.END)
g.SetEntryPoint("visualize_step1")
runnable, _ := g.Compile()
exporter := graph.GetGraphForRunnable(runnable)
fmt.Println(" ๐ Mermaid Diagram:")
mermaid := exporter.DrawMermaid()
fmt.Printf(" %s\n", mermaid[:100]+"...")
fmt.Println(" ๐ณ ASCII Tree:")
ascii := exporter.DrawASCII()
fmt.Printf(" %s\n", ascii[:50]+"...")
fmt.Println("\nโ
All examples completed successfully!")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/api_interrupt_demo/main.go | examples/api_interrupt_demo/main.go | // Package main demonstrates a request-response API pattern with checkpoint-based
// interrupt handling for conversational agents.
//
// This example shows how to:
// 1. Build an HTTP API that handles conversational flows with interrupts
// 2. Automatically save checkpoints when interrupts occur (Issue #70 fix)
// 3. Detect and resume from interrupted states using checkpoint metadata
// 4. Use thread_id to maintain conversation state across requests
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/smallnest/langgraphgo/graph"
"github.com/smallnest/langgraphgo/store/file"
)
// OrderState represents the state of an order processing conversation
type OrderState struct {
SessionId string `json:"session_id"`
UserInput string `json:"user_input"`
ProductInfo string `json:"product_info"`
OrderId string `json:"order_id"`
Price float64 `json:"price"`
OrderStatus string `json:"order_status"`
Message string `json:"message"`
UpdateAt time.Time `json:"update_at"`
NextNode string `json:"next_node,omitempty"` // Tracks where to resume
IsInterrupt bool `json:"is_interrupt,omitempty"`
}
// Product catalog
var ProductCatalog = map[string]struct {
Price float64
Stock int
}{
"iPhone 15": {Price: 7999.00, Stock: 50},
"MacBook Pro": {Price: 15999.00, Stock: 20},
"AirPods": {Price: 1299.00, Stock: 100},
"iPad Air": {Price: 4799.00, Stock: 30},
}
// BuildOrderGraph creates the order processing graph with interrupts
func BuildOrderGraph(store graph.CheckpointStore) *graph.CheckpointableRunnable[OrderState] {
g := graph.NewCheckpointableStateGraphWithConfig[OrderState](graph.CheckpointConfig{
Store: store,
AutoSave: true,
MaxCheckpoints: 10,
})
// Node 1: Order receive - extract product information
g.AddNode("order_receive", "Order receive", func(ctx context.Context, state OrderState) (OrderState, error) {
// If ProductInfo is already set and we're resuming, skip processing
if state.ProductInfo != "" && state.IsInterrupt {
// Already have product info, probably resuming - just pass through
return state, nil
}
if state.UserInput == "" {
state.Message = "่ฏท่พๅ
ฅๆจ่ฆ่ดญไนฐ็ไบงๅ"
return state, nil
}
// String matching to find product
var foundProduct string
for productName := range ProductCatalog {
if strings.Contains(state.UserInput, productName) {
foundProduct = productName
state.ProductInfo = productName
break
}
}
// Product not in catalog
if foundProduct == "" {
availableProducts := make([]string, 0, len(ProductCatalog))
for name := range ProductCatalog {
availableProducts = append(availableProducts, name)
}
state.Message = fmt.Sprintf("ๆฑๆญ๏ผๆไปฌ็ไบงๅๆธ
ๅไธญๆฒกๆๆจ่ฆ่ดญไนฐ็ๅๅใ\nๅฏ้ไบงๅๆ๏ผ%s",
strings.Join(availableProducts, "ใ"))
return state, nil
}
// Generate order ID
state.OrderId = fmt.Sprintf("ORD%s%d", state.SessionId, time.Now().Unix())
state.UpdateAt = time.Now()
return state, nil
})
// Node 2: Inventory check
g.AddNode("inventory_check", "Inventory check", func(ctx context.Context, state OrderState) (OrderState, error) {
product, exists := ProductCatalog[state.ProductInfo]
if !exists {
state.Message = "ไบงๅไฟกๆฏๅผๅธธ๏ผ่ฏท้ๆฐไธๅ"
state.OrderId = ""
return state, nil
}
// Check inventory
if product.Stock <= 0 {
state.Message = fmt.Sprintf("ๆฑๆญ๏ผ%s ๆๆถๆ ่ดง๏ผ่ฏท้ๆฉๅ
ถไปไบงๅ", state.ProductInfo)
state.OrderId = ""
return state, nil
}
state.UpdateAt = time.Now()
return state, nil
})
// Node 3: Price calculation
g.AddNode("price_calculation", "Price calculation", func(ctx context.Context, state OrderState) (OrderState, error) {
product, exists := ProductCatalog[state.ProductInfo]
if !exists {
return state, fmt.Errorf("ไบงๅไฟกๆฏๅผๅธธ")
}
state.Price = product.Price
state.UpdateAt = time.Now()
return state, nil
})
// Node 4: Payment processing with human-in-the-loop
g.AddNode("payment_processing", "Payment processing", func(ctx context.Context, state OrderState) (OrderState, error) {
state.OrderStatus = "ๅพ
ๆฏไป"
// Human-in-the-loop: wait for user to confirm payment
confirmMsg := fmt.Sprintf("ๆจ่ดญไนฐ็ %s๏ผไปทๆ ผ๏ผ%.2f ๅ
\n่ฏท็กฎ่ฎคๆฏๅฆๆฏไป๏ผ๏ผๅๅค`็กฎ่ฎค`ไปฅๅฎๆๆฏไป๏ผ",
state.ProductInfo, state.Price)
payInfo, err := graph.Interrupt(ctx, confirmMsg)
if err != nil {
// Set NextNode to indicate where to resume
state.NextNode = "payment_processing"
state.IsInterrupt = true
return state, err
}
// Clear interrupt flag on resume
state.IsInterrupt = false
state.NextNode = ""
// Check user confirmation
payInfoStr, ok := payInfo.(string)
if !ok || !strings.Contains(strings.ToLower(payInfoStr), "็กฎ่ฎค") {
state.Message = "ๆจๅทฒๅๆถๆฏไป๏ผ่ฎขๅๅทฒๅ
ณ้ญ"
state.OrderStatus = "ๅทฒๅๆถ"
state.OrderId = ""
return state, nil
}
state.OrderStatus = "ๅทฒๆฏไป"
state.UpdateAt = time.Now()
return state, nil
})
// Node 5: Warehouse notification
g.AddNode("warehouse_notify", "Warehouse notify", func(ctx context.Context, state OrderState) (OrderState, error) {
// In a real application, this would call a warehouse notification API
state.OrderStatus = "ๅทฒๅ่ดง"
state.Message = fmt.Sprintf("ๆจ่ดญไนฐ็ %s๏ผไปทๆ ผ๏ผ%.2f ๅ
๏ผๅทฒๅ่ดง๏ผ\n่ฎขๅๅท๏ผ%s",
state.ProductInfo, state.Price, state.OrderId)
state.UpdateAt = time.Now()
return state, nil
})
// Set entry point
g.SetEntryPoint("order_receive")
// Define conditional edges
g.AddConditionalEdge("order_receive", func(ctx context.Context, state OrderState) string {
if state.ProductInfo == "" || state.OrderId == "" {
return graph.END
}
return "inventory_check"
})
g.AddConditionalEdge("inventory_check", func(ctx context.Context, state OrderState) string {
if state.OrderId == "" {
return graph.END
}
return "price_calculation"
})
g.AddEdge("price_calculation", "payment_processing")
g.AddConditionalEdge("payment_processing", func(ctx context.Context, state OrderState) string {
if state.OrderId == "" || state.OrderStatus == "ๅทฒๅๆถ" {
return graph.END
}
return "warehouse_notify"
})
g.AddEdge("warehouse_notify", graph.END)
runnable, err := g.CompileCheckpointable()
if err != nil {
log.Fatalf("Failed to compile graph: %v", err)
}
return runnable
}
// API handlers
type ChatRequest struct {
SessionID string `json:"session_id"`
Content string `json:"content"`
}
type ChatResponse struct {
Message string `json:"message"`
OrderStatus string `json:"order_status,omitempty"`
IsInterrupt bool `json:"is_interrupt,omitempty"`
NeedsResume bool `json:"needs_resume,omitempty"`
}
// Server holds the graph and store
type Server struct {
Runnable *graph.CheckpointableRunnable[OrderState]
Store graph.CheckpointStore
}
// NewServer creates a new API server
func NewServer() (*Server, error) {
// Create checkpoint store
checkpointDir := "./checkpoints_api_demo"
if err := os.MkdirAll(checkpointDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create checkpoint directory: %w", err)
}
store, err := file.NewFileCheckpointStore(checkpointDir)
if err != nil {
return nil, fmt.Errorf("failed to create checkpoint store: %w", err)
}
// Build graph with the store
runnable := BuildOrderGraph(store)
// Configure the runnable to use the store
runnable.SetExecutionID("") // Will be set per-request using thread_id
return &Server{
Runnable: runnable,
Store: store,
}, nil
}
// HandleChat handles chat requests
func (s *Server) HandleChat(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Parse request
var req ChatRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Use session_id as thread_id
threadID := req.SessionID
if threadID == "" {
threadID = fmt.Sprintf("session_%d", time.Now().UnixNano())
}
// Check if this thread has any checkpoints (to detect if we're resuming)
checkpoints, err := s.Store.List(ctx, threadID)
isResuming := false
var latestCP *graph.Checkpoint
if err == nil && len(checkpoints) > 0 {
// Find the latest checkpoint
latestCP = checkpoints[len(checkpoints)-1]
// Check if the latest checkpoint has interrupt metadata
if event, ok := latestCP.Metadata["event"].(string); ok && event == "step" {
// The checkpoint was saved after a step completed
// We need to check if there was an interrupt by looking at the state
// State might be OrderState or map[string]interface{} (from JSON deserialization)
if state, ok := latestCP.State.(OrderState); ok && state.IsInterrupt {
isResuming = true
} else if m, ok := latestCP.State.(map[string]interface{}); ok {
// Check for is_interrupt in map
if isInterrupt, ok := m["is_interrupt"].(bool); ok && isInterrupt {
isResuming = true
}
}
}
}
var config *graph.Config
var initialState OrderState
if isResuming && latestCP != nil {
// RESUMING FROM INTERRUPT
// Convert checkpoint state to OrderState
if cpState, ok := latestCP.State.(OrderState); ok {
initialState = cpState
} else {
// Handle case where state is map[string]interface{}
if m, ok := latestCP.State.(map[string]interface{}); ok {
// Convert map to OrderState (simplified - in production use proper JSON unmarshaling)
initialState.SessionId = toString(m["session_id"])
initialState.UserInput = req.Content
initialState.ProductInfo = toString(m["product_info"])
initialState.OrderId = toString(m["order_id"])
initialState.Price = toFloat64(m["price"])
initialState.OrderStatus = toString(m["order_status"])
initialState.Message = toString(m["message"])
initialState.UpdateAt = toTime(m["update_at"])
initialState.NextNode = toString(m["next_node"])
initialState.IsInterrupt = toBool(m["is_interrupt"])
}
}
// Update with new user input
initialState.UserInput = req.Content
config = &graph.Config{
Configurable: map[string]any{
"thread_id": threadID,
},
ResumeValue: req.Content,
ResumeFrom: []string{latestCP.NodeName},
}
} else {
// NEW REQUEST
initialState = OrderState{
SessionId: req.SessionID,
UserInput: req.Content,
}
config = &graph.Config{
Configurable: map[string]any{
"thread_id": threadID,
},
}
}
// Set the execution ID to match the thread ID for checkpoint storage
s.Runnable.SetExecutionID(threadID)
// Execute the graph
result, err := s.Runnable.InvokeWithConfig(ctx, initialState, config)
var graphInterrupt *graph.GraphInterrupt
if errors.As(err, &graphInterrupt) {
// Graph was interrupted - state has been automatically saved by the fix (Issue #70)
interruptState, ok := graphInterrupt.State.(OrderState)
if !ok {
log.Printf("Warning: Could not convert interrupt state to OrderState, got %T", graphInterrupt.State)
}
// Send interrupt response to client
response := ChatResponse{
Message: fmt.Sprintf("%v", graphInterrupt.InterruptValue),
IsInterrupt: true,
NeedsResume: true,
}
if interruptState.ProductInfo != "" {
response.OrderStatus = interruptState.OrderStatus
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
return
}
if err != nil {
http.Error(w, fmt.Sprintf("Execution failed: %v", err), http.StatusInternalServerError)
return
}
// Send normal response
response := ChatResponse{
Message: result.Message,
IsInterrupt: false,
NeedsResume: false,
}
if result.OrderStatus != "" {
response.OrderStatus = result.OrderStatus
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// Helper functions for type conversion
func toString(v any) string {
if v == nil {
return ""
}
if s, ok := v.(string); ok {
return s
}
return fmt.Sprintf("%v", v)
}
func toBool(v any) bool {
if v == nil {
return false
}
if b, ok := v.(bool); ok {
return b
}
return false
}
func toFloat64(v any) float64 {
if v == nil {
return 0
}
if f, ok := v.(float64); ok {
return f
}
if f, ok := v.(float32); ok {
return float64(f)
}
return 0
}
func toTime(v any) time.Time {
if v == nil {
return time.Time{}
}
if s, ok := v.(string); ok {
t, err := time.Parse(time.RFC3339Nano, s)
if err == nil {
return t
}
}
return time.Time{}
}
func main() {
server, err := NewServer()
if err != nil {
log.Fatalf("Failed to create server: %v", err)
}
// Setup HTTP handlers
http.HandleFunc("/chat", server.HandleChat)
http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("OK"))
})
port := "8080"
fmt.Printf("Server starting on http://localhost:%s\n", port)
fmt.Printf("Try these examples:\n\n")
fmt.Printf("1. Start new order:\n")
fmt.Printf(" curl -X POST http://localhost:%s/chat \\\n", port)
fmt.Printf(" -H 'Content-Type: application/json' \\\n")
fmt.Printf(" -d '{\"session_id\":\"user123\",\"content\":\"ๆๆณไนฐAirPods\"}'\n\n")
fmt.Printf("2. Confirm payment (resume from interrupt):\n")
fmt.Printf(" curl -X POST http://localhost:%s/chat \\\n", port)
fmt.Printf(" -H 'Content-Type: application/json' \\\n")
fmt.Printf(" -d '{\"session_id\":\"user123\",\"content\":\"็กฎ่ฎค\"}'\n\n")
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatalf("Server failed: %v", err)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/dynamic_interrupt/main.go | examples/dynamic_interrupt/main.go | package main
import (
"context"
"errors"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a state graph with map state
g := graph.NewStateGraph[map[string]any]()
g.AddNode("ask_name", "ask_name", func(ctx context.Context, state map[string]any) (map[string]any, error) {
// This simulates an interrupt.
// graph.Interrupt pauses execution and waits for input.
// When execution resumes, it returns the provided value.
answer, err := graph.Interrupt(ctx, "What is your name?")
if err != nil {
return nil, err
}
// Use the answer
return map[string]any{
"name": answer,
"message": fmt.Sprintf("Hello, %s!", answer),
},
nil
})
g.SetEntryPoint("ask_name")
g.AddEdge("ask_name", graph.END)
runnable, err := g.Compile()
if err != nil {
log.Fatal(err)
}
// 1. Initial Run
fmt.Println("--- 1. Initial Execution ---")
// We pass empty map as initial state
_, err = runnable.Invoke(context.Background(), map[string]any{})
// Check if the execution was interrupted
var graphInterrupt *graph.GraphInterrupt
if errors.As(err, &graphInterrupt) {
fmt.Printf("Graph interrupted at node: %s\n", graphInterrupt.Node)
fmt.Printf("Interrupt Value (Query): %v\n", graphInterrupt.InterruptValue)
// Simulate getting input from a user
userInput := "Alice"
fmt.Printf("\n[User Input]: %s\n", userInput)
// 2. Resume Execution
fmt.Println("\n--- 2. Resuming Execution ---")
// We provide the user input as ResumeValue in the config
config := &graph.Config{
ResumeValue: userInput,
}
// Re-run the graph. The 'ask_name' node will run again,
// but this time graph.Interrupt() will return 'userInput' immediately.
// Note: We need to pass the same initial state (or the state at interruption if we had it, but here it's stateless start)
res, err := runnable.InvokeWithConfig(context.Background(), map[string]any{}, config)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Final Result: %v\n", res)
} else if err != nil {
log.Fatalf("Execution failed: %v", err)
} else {
fmt.Println("Execution finished without interrupt.")
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/chat_agent_async/main.go | examples/chat_agent_async/main.go | package main
import (
"context"
"fmt"
"log"
"time"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
fmt.Println("=== ChatAgent AsyncChat Demo ===")
fmt.Println("This example demonstrates streaming responses from the agent.")
fmt.Println()
llm, err := openai.New()
if err != nil {
log.Fatal(err)
}
// Create ChatAgent
agent, err := prebuilt.NewChatAgent(llm, nil)
if err != nil {
log.Fatalf("Failed to create ChatAgent: %v", err)
}
ctx := context.Background()
// Demo 1: Character-by-character streaming with AsyncChat
fmt.Println("--- Demo 1: Character-by-Character Streaming ---")
fmt.Print("User: Hello!\n")
fmt.Print("Agent: ")
respChan1, err := agent.AsyncChat(ctx, "Hello!")
if err != nil {
log.Fatalf("AsyncChat failed: %v", err)
}
for char := range respChan1 {
fmt.Print(char)
time.Sleep(20 * time.Millisecond) // Simulate typing effect
}
fmt.Println("")
// Demo 2: Word-by-word streaming with AsyncChatWithChunks
fmt.Println("--- Demo 2: Word-by-Word Streaming ---")
fmt.Print("User: Can you explain async chat?\n")
fmt.Print("Agent: ")
respChan2, err := agent.AsyncChatWithChunks(ctx, "Can you explain async chat?")
if err != nil {
log.Fatalf("AsyncChatWithChunks failed: %v", err)
}
start := time.Now()
for word := range respChan2 {
fmt.Print(word)
time.Sleep(100 * time.Millisecond) // Simulate thinking/typing
if time.Since(start) > 5*time.Second {
break
}
}
fmt.Println("")
// Demo 3: Collecting full response
fmt.Println("--- Demo 3: Collecting Full Response ---")
fmt.Println("User: What's the benefit of streaming?")
fmt.Print("Agent: ")
respChan3, err := agent.AsyncChatWithChunks(ctx, "What's the benefit of streaming?")
if err != nil {
log.Fatalf("AsyncChatWithChunks failed: %v", err)
}
start = time.Now()
var fullResponse string
chunkCount := 0
for chunk := range respChan3 {
fullResponse += chunk
chunkCount++
fmt.Print(chunk)
if time.Since(start) > 5*time.Second {
break
}
time.Sleep(80 * time.Millisecond)
}
fmt.Printf("\n\n[Received %d chunks, total length: %d characters]\n\n", chunkCount, len(fullResponse))
// Demo 4: Using context cancellation
fmt.Println("--- Demo 4: Context Cancellation ---")
fmt.Println("User: Tell me a very long story...")
fmt.Print("Agent: ")
// Create a context with timeout
ctx4, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
respChan4, err := agent.AsyncChat(ctx4, "Tell me a very long story")
if err != nil {
log.Fatalf("AsyncChat failed: %v", err)
}
start = time.Now()
receivedChunks := 0
for char := range respChan4 {
fmt.Print(char)
receivedChunks++
time.Sleep(30 * time.Millisecond)
if time.Since(start) > 5*time.Second {
break
}
}
fmt.Printf("\n\n[Stream was interrupted after receiving %d characters due to context timeout]\n\n", receivedChunks)
// Demo 5: Comparison with regular Chat
fmt.Println("--- Demo 5: Comparison with Regular Chat ---")
fmt.Println("User: One more question please")
fmt.Print("Agent (regular Chat): ")
start = time.Now()
regularResp, err := agent.Chat(context.Background(), "One more question please")
if err != nil {
log.Fatalf("Chat failed: %v", err)
}
elapsed := time.Since(start)
fmt.Println(regularResp)
fmt.Printf("[Regular chat returned in %v]\n\n", elapsed)
fmt.Println("=== Demo Complete ===")
fmt.Println("\nKey Takeaways:")
fmt.Println("1. AsyncChat streams character-by-character for real-time typing effect")
fmt.Println("2. AsyncChatWithChunks streams word-by-word for better readability")
fmt.Println("3. Use context for timeouts and cancellation")
fmt.Println("4. Channel is automatically closed when response is complete")
fmt.Println("5. Regular Chat still available for non-streaming use cases")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/memory/main.go | examples/memory/main.go | // LangGraphGo ๅ
ๅญ็ญ็ฅๆผ็คบ็จๅบ
//
// ่ฟไธช็จๅบๆผ็คบไบ LangGraphGo ไธญๆๆๅฏ็จ็ๅ
ๅญ็ฎก็็ญ็ฅใ
// ่ฟ่กๆนๅผ: go run memory_examples.go
//
// ๆผ็คบ็ๅ
ๅญ็ญ็ฅ:
// 1. Sequential Memory - ๅญๅจๆๆๆถๆฏ
// 2. Sliding Window Memory - ๅชไฟ็ๆ่ฟ็ๆถๆฏ
// 3. Buffer Memory - ็ตๆดป็็ผๅฒ้ๅถ
// 4. Summarization Memory - ๅ็ผฉๆงๆถๆฏ
// 5. Retrieval Memory - ๅบไบ็ธไผผๅบฆๆฃ็ดข
// 6. Hierarchical Memory - ๅๅฑๅญๅจ
// 7. Graph-Based Memory - ๅ
ณ็ณปๅพ่ฐฑ
// 8. Compression Memory - ๆบ่ฝๅ็ผฉ
// 9. OS-Like Memory - ๆไฝ็ณป็ปๅผ็ฎก็
package main
import (
"context"
"fmt"
"math"
"strings"
"time"
"github.com/smallnest/langgraphgo/memory"
)
// ๅ
ๅญ็ญ็ฅๆผ็คบ็จๅบ
func main() {
ctx := context.Background()
fmt.Println("=== LangGraphGo ๅ
ๅญ็ญ็ฅๆผ็คบ ===\n")
// 1. Sequential Memory ็คบไพ
fmt.Println("1. Sequential Memory (้กบๅบๅ
ๅญ)")
demonstrateSequentialMemory(ctx)
// 2. Sliding Window Memory ็คบไพ
fmt.Println("\n2. Sliding Window Memory (ๆปๅจ็ชๅฃ)")
demonstrateSlidingWindowMemory(ctx)
// 3. Buffer Memory ็คบไพ
fmt.Println("\n3. Buffer Memory (็ผๅฒๅ
ๅญ)")
demonstrateBufferMemory(ctx)
// 4. Summarization Memory ็คบไพ
fmt.Println("\n4. Summarization Memory (ๆ่ฆๅ
ๅญ)")
demonstrateSummarizationMemory(ctx)
// 5. Retrieval Memory ็คบไพ
fmt.Println("\n5. Retrieval Memory (ๆฃ็ดขๅ
ๅญ)")
demonstrateRetrievalMemory(ctx)
// 6. Hierarchical Memory ็คบไพ
fmt.Println("\n6. Hierarchical Memory (ๅๅฑๅ
ๅญ)")
demonstrateHierarchicalMemory(ctx)
// 7. Graph-Based Memory ็คบไพ
fmt.Println("\n7. Graph-Based Memory (ๅพๅ
ๅญ)")
demonstrateGraphBasedMemory(ctx)
// 8. Compression Memory ็คบไพ
fmt.Println("\n8. Compression Memory (ๅ็ผฉๅ
ๅญ)")
demonstrateCompressionMemory(ctx)
// 9. OS-Like Memory ็คบไพ
fmt.Println("\n9. OS-Like Memory (ๆไฝ็ณป็ปๅผๅ
ๅญ)")
demonstrateOSLikeMemory(ctx)
// ๆง่ฝๅฏนๆฏ
fmt.Println("\n=== ๆง่ฝๅฏนๆฏ ===")
comparePerformance(ctx)
}
// 1. Sequential Memory ๆผ็คบ
func demonstrateSequentialMemory(ctx context.Context) {
mem := memory.NewSequentialMemory()
// ๆทปๅ ๆถๆฏ
messages := []string{
"ไฝ ๅฅฝ๏ผๆๆณไบ่งฃ LangGraphGo",
"LangGraphGo ๆฏไธไธชๅผบๅคง็ AI ๆกๆถ",
"ๅฎๆไปไน็นๆง๏ผ",
"ๆฏๆๅนถ่กๆง่กใ็ถๆ็ฎก็ใๆไน
ๅ็ญ",
}
for _, content := range messages {
msg := memory.NewMessage("user", content)
if strings.Contains(content, "ๆกๆถ") || strings.Contains(content, "ๆฏๆ") {
msg.Role = "assistant"
}
mem.AddMessage(ctx, msg)
}
// ่ทๅๆๆๆถๆฏ
context, _ := mem.GetContext(ctx, "")
stats, _ := mem.GetStats(ctx)
fmt.Printf(" ๅญๅจๆถๆฏๆฐ: %d\n", len(context))
fmt.Printf(" ๆป Token ๆฐ: %d\n", stats.TotalTokens)
fmt.Printf(" ๆๆฐๆถๆฏ: %s\n", context[len(context)-1].Content)
}
// 2. Sliding Window Memory ๆผ็คบ
func demonstrateSlidingWindowMemory(ctx context.Context) {
mem := memory.NewSlidingWindowMemory(3) // ๅชไฟ็ๆ่ฟ 3 ๆก
// ๆทปๅ 5 ๆกๆถๆฏ
for i := 1; i <= 5; i++ {
msg := memory.NewMessage("user", fmt.Sprintf("ๆถๆฏ %d", i))
mem.AddMessage(ctx, msg)
fmt.Printf(" ๆทปๅ ๆถๆฏ %d\n", i)
}
// ๅชไผไฟ็ๆ่ฟ 3 ๆก
context, _ := mem.GetContext(ctx, "")
fmt.Printf(" ๅฎ้
ไฟ็: %d ๆกๆถๆฏ\n", len(context))
fmt.Printf(" ๆถๆฏๅ่กจ: %s\n", formatMessageList(context))
}
// 3. Buffer Memory ๆผ็คบ
func demonstrateBufferMemory(ctx context.Context) {
mem := memory.NewBufferMemory(&memory.BufferConfig{
MaxMessages: 5,
MaxTokens: 200,
AutoSummarize: true,
Summarizer: simpleSummarizer,
})
// ๆทปๅ ๆถๆฏ
longMessage := strings.Repeat("่ฟๆฏไธไธชๅพ้ฟ็ๆถๆฏใ", 20)
for i := 1; i <= 6; i++ {
msg := memory.NewMessage("user", fmt.Sprintf("ๆถๆฏ %d: %s", i, longMessage[:50]))
mem.AddMessage(ctx, msg)
}
stats, _ := mem.GetStats(ctx)
fmt.Printf(" ๆดป่ทๆถๆฏ: %d\n", stats.ActiveMessages)
fmt.Printf(" ๆดป่ท Tokens: %d\n", stats.ActiveTokens)
}
// 4. Summarization Memory ๆผ็คบ
func demonstrateSummarizationMemory(ctx context.Context) {
mem := memory.NewSummarizationMemory(&memory.SummarizationConfig{
RecentWindowSize: 3,
SummarizeAfter: 5,
Summarizer: simpleSummarizer,
})
// ๆทปๅ ๆถๆฏ
topics := []string{"ไบงๅไป็ป", "ไปทๆ ผ่ฎจ่ฎบ", "ๆๆฏ็ป่", "ไบคไปๆถ้ด", "ๅฎๅๆๅก", "ๅๅๆกๆฌพ"}
for i, topic := range topics {
msg := memory.NewMessage("user", fmt.Sprintf("่ฎจ่ฎบ %s: ่ฏฆ็ปไฟกๆฏ...", topic))
mem.AddMessage(ctx, msg)
if i == 2 { // ๆทปๅ ไธไธช็ณป็ปๆถๆฏ
sysMsg := memory.NewMessage("system", "้่ฆ๏ผ่ฎฐๅฝๆๆๅณ็ญ")
mem.AddMessage(ctx, sysMsg)
}
}
context, _ := mem.GetContext(ctx, "")
stats, _ := mem.GetStats(ctx)
fmt.Printf(" ๆปๆถๆฏๆฐ: %d\n", stats.TotalMessages)
fmt.Printf(" ๅ็ผฉ็: %.2f%%\n", stats.CompressionRate*100)
fmt.Printf(" ไธไธๆๆๆ: %d ๆกๆ่ฆ + %d ๆกๆ่ฟๆถๆฏ\n",
countSummaries(context), countRecent(context))
}
// 5. Retrieval Memory ๆผ็คบ
func demonstrateRetrievalMemory(ctx context.Context) {
mem := memory.NewRetrievalMemory(&memory.RetrievalConfig{
TopK: 3,
EmbeddingFunc: func(ctx context.Context, text string) ([]float64, error) {
// ็ฎๅ็ๅตๅ
ฅๅฝๆฐ๏ผๅฎ้
ๅบไฝฟ็จ็ๅฎ็ๅตๅ
ฅๆจกๅ๏ผ
return simpleEmbedding(text), nil
},
})
// ๆทปๅ ไธๅไธป้ข็ๆถๆฏ
topics := []string{
"ไบงๅไปทๆ ผๆฏ 1000 ๅ
",
"ๆๆฏๆ ไฝฟ็จ React ๅ Go",
"ๅข้ๆ 10 ๅๅผๅ่
",
"ไบคไปๅจๆๆฏ 3 ไธชๆ",
"ๆฏๆ 24/7 ๅฎขๆ",
}
for _, content := range topics {
msg := memory.NewMessage("user", content)
mem.AddMessage(ctx, msg)
}
// ๆฅ่ฏข็ธๅ
ณ้ฎ้ข
queries := []string{"ไปทๆ ผไฟกๆฏ", "ๆๆฏๆถๆ", "ๅข้่งๆจก"}
for _, query := range queries {
context, _ := mem.GetContext(ctx, query)
fmt.Printf(" ๆฅ่ฏข '%s': ๆพๅฐ %d ๆก็ธๅ
ณๆถๆฏ\n", query, len(context))
}
}
// 6. Hierarchical Memory ๆผ็คบ
func demonstrateHierarchicalMemory(ctx context.Context) {
mem := memory.NewHierarchicalMemory(&memory.HierarchicalConfig{
RecentLimit: 3,
ImportantLimit: 5,
ImportanceScorer: func(msg *memory.Message) float64 {
score := 0.5
if msg.Role == "system" {
score += 0.3
}
if strings.Contains(msg.Content, "้่ฆ") ||
strings.Contains(msg.Content, "ๅณ็ญ") {
score += 0.3
}
return math.Min(score, 1.0)
},
})
// ๆทปๅ ๆททๅ้่ฆๆง็ๆถๆฏ
messages := []struct {
role string
content string
}{
{"user", "ๆฎ้ๆถๆฏ 1"},
{"system", "้่ฆ็ณป็ป้็ฅ"},
{"user", "ๆฎ้ๆถๆฏ 2"},
{"user", "้่ฆๅณ็ญ๏ผ้ๆฉๆนๆก A"},
{"assistant", "ๆฎ้ๆถๆฏ 3"},
{"user", "ๆฎ้ๆถๆฏ 4"},
}
for _, m := range messages {
msg := memory.NewMessage(m.role, m.content)
mem.AddMessage(ctx, msg)
}
stats, _ := mem.GetStats(ctx)
fmt.Printf(" ๆดป่ทๆถๆฏ: %d ๆก\n", stats.ActiveMessages)
fmt.Printf(" ๆปๆถๆฏ: %d ๆก\n", stats.TotalMessages)
fmt.Printf(" ๆป Tokens: %d\n", stats.TotalTokens)
}
// 7. Graph-Based Memory ๆผ็คบ
func demonstrateGraphBasedMemory(ctx context.Context) {
mem := memory.NewGraphBasedMemory(&memory.GraphConfig{
TopK: 3,
RelationExtractor: func(msg *memory.Message) []string {
// ็ฎๅ็ไธป้ขๆๅ
topics := []string{}
if strings.Contains(msg.Content, "ไบงๅ") {
topics = append(topics, "ไบงๅ")
}
if strings.Contains(msg.Content, "ไปทๆ ผ") {
topics = append(topics, "ไปทๆ ผ")
}
if strings.Contains(msg.Content, "ๆๆฏ") {
topics = append(topics, "ๆๆฏ")
}
return topics
},
})
// ๆทปๅ ็ธๅ
ณๆถๆฏ
messages := []string{
"ไบงๅๅ่ฝไป็ป",
"ไปทๆ ผ็ญ็ฅๅถๅฎ",
"ๆๆฏๆถๆ่ฎพ่ฎก",
"ไบงๅ่ทฏ็บฟๅพ",
"ๆๆฏ้ๅ่ฎจ่ฎบ",
}
for _, content := range messages {
msg := memory.NewMessage("user", content)
mem.AddMessage(ctx, msg)
}
// ๆฅ่ฏข็ธๅ
ณไธป้ข
context, _ := mem.GetContext(ctx, "ไบงๅ")
fmt.Printf(" ๆฅ่ฏข 'ไบงๅ': ๆพๅฐ %d ๆก็ธๅ
ณๆถๆฏ\n", len(context))
}
// 8. Compression Memory ๆผ็คบ
func demonstrateCompressionMemory(ctx context.Context) {
mem := memory.NewCompressionMemory(&memory.CompressionConfig{
CompressionTrigger: 5,
Compressor: func(ctx context.Context, msgs []*memory.Message) (*memory.CompressedBlock, error) {
content := fmt.Sprintf("ๅ็ผฉๅ (%d-%d): %d ๆกๆถๆฏ",
msgs[0].Timestamp.Format("01-02"),
msgs[len(msgs)-1].Timestamp.Format("01-02"),
len(msgs))
return &memory.CompressedBlock{
ID: fmt.Sprintf("block-%d", time.Now().Unix()),
Summary: content,
OriginalCount: len(msgs),
OriginalTokens: calculateTokens(msgs),
CompressedTokens: len(content) / 4, // ็ฎๅไผฐ็ฎ
TimeRange: memory.TimeRange{
Start: msgs[0].Timestamp,
End: msgs[len(msgs)-1].Timestamp,
},
}, nil
},
})
// ๆทปๅ ๆถๆฏ่งฆๅๅ็ผฉ
for i := 1; i <= 10; i++ {
msg := memory.NewMessage("user", fmt.Sprintf("ๆถๆฏ %d: ่ฏฆ็ปๅ
ๅฎน...", i))
mem.AddMessage(ctx, msg)
}
stats, _ := mem.GetStats(ctx)
fmt.Printf(" ๆดป่ทๆถๆฏ: %d\n", stats.ActiveMessages)
fmt.Printf(" ๅ็ผฉ็: %.2f%%\n", stats.CompressionRate*100)
}
// 9. OS-Like Memory ๆผ็คบ
func demonstrateOSLikeMemory(ctx context.Context) {
mem := memory.NewOSLikeMemory(&memory.OSLikeConfig{
ActiveLimit: 3,
CacheLimit: 5,
AccessWindow: time.Minute * 5,
})
// ๆทปๅ ๆถๆฏๅนถๆจกๆ่ฎฟ้ฎ
for i := 1; i <= 10; i++ {
msg := memory.NewMessage("user", fmt.Sprintf("ๆถๆฏ %d", i))
mem.AddMessage(ctx, msg)
// ๆจกๆ้ๆบ่ฎฟ้ฎ
if i%3 == 0 {
mem.GetContext(ctx, fmt.Sprintf("ๆถๆฏ %d", i-1))
}
}
stats, _ := mem.GetStats(ctx)
fmt.Printf(" ๆดป่ทๆถๆฏ: %d\n", stats.ActiveMessages)
fmt.Printf(" ๆปๆถๆฏ: %d\n", stats.TotalMessages)
fmt.Printf(" ๅ็ผฉ็: %.2f%%\n", stats.CompressionRate*100)
}
// ๆง่ฝๅฏนๆฏ
func comparePerformance(ctx context.Context) {
strategies := map[string]memory.Memory{
"Sequential": memory.NewSequentialMemory(),
"SlidingWindow": memory.NewSlidingWindowMemory(10),
"Buffer": memory.NewBufferMemory(&memory.BufferConfig{MaxMessages: 20}),
"Hierarchical": memory.NewHierarchicalMemory(&memory.HierarchicalConfig{
RecentLimit: 5, ImportantLimit: 10,
ImportanceScorer: func(msg *memory.Message) float64 { return 0.5 },
}),
}
messageCount := 50
fmt.Printf("ๆต่ฏๅบๆฏ: %d ๆกๆถๆฏ็ๆง่ฝๅฏนๆฏ\n\n", messageCount)
fmt.Printf("%-15s %-12s %-12s %-15s %-12s\n", "็ญ็ฅ", "ๅญๅจๆถๆฏ", "ๆดป่ทๆถๆฏ", "Token ๆ็", "ๅๅบๆถ้ด")
fmt.Println(strings.Repeat("-", 70))
for name, mem := range strategies {
// ๆธ
็ฉบๅนถๆทปๅ ๆต่ฏๆถๆฏ
mem.Clear(ctx)
start := time.Now()
for i := 0; i < messageCount; i++ {
msg := memory.NewMessage("user", fmt.Sprintf("ๆต่ฏๆถๆฏ %d: %s", i, strings.Repeat("ๅ
ๅฎน", 10)))
mem.AddMessage(ctx, msg)
}
// ่ทๅไธไธๆ
mem.GetContext(ctx, "ๆต่ฏๆฅ่ฏข")
duration := time.Since(start)
stats, _ := mem.GetStats(ctx)
efficiency := float64(stats.ActiveTokens) / float64(stats.TotalTokens)
fmt.Printf("%-15s %-12d %-12d %-15.2f %-12s\n",
name,
stats.TotalMessages,
stats.ActiveMessages,
efficiency,
duration.String())
}
}
// ่พ
ๅฉๅฝๆฐ
func formatMessageList(messages []*memory.Message) string {
contents := make([]string, len(messages))
for i, msg := range messages {
contents[i] = fmt.Sprintf("%s", msg.Content[:min(20, len(msg.Content))])
}
return fmt.Sprintf("[%s]", strings.Join(contents, ", "))
}
func simpleSummarizer(ctx context.Context, messages []*memory.Message) (string, error) {
return fmt.Sprintf("ๆ่ฆ: %d ๆกๆถๆฏๅทฒๅ็ผฉ", len(messages)), nil
}
func simpleEmbedding(text string) []float64 {
// ็ฎๅ็ๅตๅ
ฅ๏ผๅบไบๅญ็ฌฆไธฒๅๅธ
hash := 0.0
for i, c := range text {
hash += float64(int(c) * (i + 1))
}
embedding := make([]float64, 10)
for i := range embedding {
embedding[i] = math.Sin(hash + float64(i))
}
return embedding
}
func countSummaries(messages []*memory.Message) int {
count := 0
for _, msg := range messages {
if strings.Contains(msg.Content, "ๆ่ฆ") {
count++
}
}
return count
}
func countRecent(messages []*memory.Message) int {
return len(messages) - countSummaries(messages)
}
func calculateTokens(messages []*memory.Message) int {
total := 0
for _, msg := range messages {
total += msg.TokenCount
}
return total
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/lightrag_simple/main.go | examples/lightrag_simple/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/engine"
"github.com/smallnest/langgraphgo/rag/store"
"github.com/tmc/langchaingo/llms/openai"
)
// OpenAILLMAdapter wraps langchaingo's openai.LLM to implement rag.LLMInterface
type OpenAILLMAdapter struct {
llm *openai.LLM
}
func NewOpenAILLMAdapter(baseLLM *openai.LLM) *OpenAILLMAdapter {
return &OpenAILLMAdapter{llm: baseLLM}
}
func (a *OpenAILLMAdapter) Generate(ctx context.Context, prompt string) (string, error) {
return a.llm.Call(ctx, prompt)
}
func (a *OpenAILLMAdapter) GenerateWithConfig(ctx context.Context, prompt string, config map[string]any) (string, error) {
return a.Generate(ctx, prompt)
}
func (a *OpenAILLMAdapter) GenerateWithSystem(ctx context.Context, system, prompt string) (string, error) {
fullPrompt := fmt.Sprintf("%s\n\n%s", system, prompt)
return a.Generate(ctx, fullPrompt)
}
// MockLLM implements rag.LLMInterface for demonstration without API keys
type MockLLM struct{}
func (m *MockLLM) Generate(ctx context.Context, prompt string) (string, error) {
// Return a mock response with entity extraction
return `{
"entities": [
{
"id": "entity_1",
"name": "LangGraph",
"type": "TECHNOLOGY",
"description": "A library for building stateful, multi-actor applications with LLMs",
"properties": {"category": "framework"}
},
{
"id": "entity_2",
"name": "LightRAG",
"type": "TECHNOLOGY",
"description": "A lightweight Retrieval-Augmented Generation framework",
"properties": {"category": "rag"}
}
]
}`, nil
}
func (m *MockLLM) GenerateWithConfig(ctx context.Context, prompt string, config map[string]any) (string, error) {
return m.Generate(ctx, prompt)
}
func (m *MockLLM) GenerateWithSystem(ctx context.Context, system, prompt string) (string, error) {
return m.Generate(ctx, prompt)
}
func main() {
ctx := context.Background()
// Check if OpenAI API key is set, not empty, and looks valid
apiKey := os.Getenv("OPENAI_API_KEY")
useOpenAI := apiKey != "" && len(apiKey) > 10 // Basic validation
var llm rag.LLMInterface
if useOpenAI {
// Use real OpenAI LLM with explicit token
baseLLM, err := openai.New()
if err != nil {
log.Printf("Failed to create OpenAI LLM: %v", err)
log.Println("Falling back to Mock LLM")
llm = &MockLLM{}
} else {
llm = NewOpenAILLMAdapter(baseLLM)
fmt.Println("Using OpenAI LLM for entity extraction")
}
} else {
// API key not set or invalid
if apiKey != "" && len(apiKey) <= 10 {
fmt.Println("Warning: OPENAI_API_KEY appears to be invalid (too short)")
}
fmt.Println("Using Mock LLM for demonstration")
fmt.Println("Note: Set a valid OPENAI_API_KEY environment variable to use real OpenAI LLM")
fmt.Println()
llm = &MockLLM{}
}
// Create embedder
embedder := store.NewMockEmbedder(128)
// Create knowledge graph (in-memory)
kg, err := store.NewKnowledgeGraph("memory://")
if err != nil {
log.Fatalf("Failed to create knowledge graph: %v", err)
}
// Create vector store (in-memory)
vectorStore := store.NewInMemoryVectorStore(embedder)
// Configure LightRAG
config := rag.LightRAGConfig{
Mode: "hybrid", // naive, local, global, or hybrid
ChunkSize: 512,
ChunkOverlap: 50,
MaxEntitiesPerChunk: 20,
EntityExtractionThreshold: 0.5,
LocalConfig: rag.LocalRetrievalConfig{
TopK: 10,
MaxHops: 2,
IncludeDescriptions: true,
},
GlobalConfig: rag.GlobalRetrievalConfig{
MaxCommunities: 5,
IncludeHierarchy: false,
MaxHierarchyDepth: 3,
},
HybridConfig: rag.HybridRetrievalConfig{
LocalWeight: 0.5,
GlobalWeight: 0.5,
FusionMethod: "rrf",
RFFK: 60,
},
EnableCommunityDetection: true,
}
// Create LightRAG engine
lightrag, err := engine.NewLightRAGEngine(config, llm, embedder, kg, vectorStore)
if err != nil {
log.Fatalf("Failed to create LightRAG engine: %v", err)
}
fmt.Println("=== LightRAG Simple Example ===")
fmt.Println()
// Sample documents about technology
documents := []rag.Document{
{
ID: "doc1",
Content: `LangGraph is a library for building stateful, multi-actor applications with LLMs.
It extends LangChain Expression Language with the ability to coordinate multiple chains
across multiple steps of computation in a cyclic manner. LangGraph is designed to make it
easy to build agents and multi-agent systems.`,
Metadata: map[string]any{
"source": "langgraph_intro.txt",
"topic": "LangGraph",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
{
ID: "doc2",
Content: `LightRAG is a lightweight Retrieval-Augmented Generation framework that combines
low-level semantic chunks with high-level graph structures. It supports four retrieval modes:
naive, local, global, and hybrid. LightRAG provides a simple API for building knowledge graphs
and performing semantic search.`,
Metadata: map[string]any{
"source": "lightrag_overview.txt",
"topic": "LightRAG",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
{
ID: "doc3",
Content: `Knowledge graphs are structured representations of knowledge that use entities
and relationships to model information. They are particularly useful for RAG systems because
they enable traversing related concepts and finding multi-hop connections between pieces of
information. Popular knowledge graph databases include Neo4j, FalkorDB, and GraphDB.`,
Metadata: map[string]any{
"source": "knowledge_graphs.txt",
"topic": "Knowledge Graphs",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
{
ID: "doc4",
Content: `Vector databases are designed to store and query high-dimensional vectors efficiently.
They use approximate nearest neighbor (ANN) algorithms to quickly find similar vectors.
Popular vector databases include Pinecone, Weaviate, Chroma, and Qdrant. They are essential
for semantic search and RAG applications.`,
Metadata: map[string]any{
"source": "vector_databases.txt",
"topic": "Vector Databases",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
{
ID: "doc5",
Content: `RAG (Retrieval-Augmented Generation) combines retrieval systems with language models
to improve answer quality. It retrieves relevant documents from a knowledge base and uses them
to augment the context provided to the language model. This helps reduce hallucinations and
improves factual accuracy of responses.`,
Metadata: map[string]any{
"source": "rag_intro.txt",
"topic": "RAG",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}
fmt.Println("Adding documents to LightRAG...")
err = lightrag.AddDocuments(ctx, documents)
if err != nil {
log.Fatalf("Failed to add documents: %v", err)
}
fmt.Printf("Successfully indexed %d documents\n\n", len(documents))
// Display configuration
fmt.Println("=== LightRAG Configuration ===")
fmt.Printf("Mode: %s\n", config.Mode)
fmt.Printf("Chunk Size: %d\n", config.ChunkSize)
fmt.Printf("Chunk Overlap: %d\n", config.ChunkOverlap)
fmt.Printf("Local Config: TopK=%d, MaxHops=%d\n",
config.LocalConfig.TopK, config.LocalConfig.MaxHops)
fmt.Printf("Global Config: MaxCommunities=%d\n", config.GlobalConfig.MaxCommunities)
fmt.Printf("Hybrid Config: LocalWeight=%.2f, GlobalWeight=%.2f, FusionMethod=%s\n",
config.HybridConfig.LocalWeight, config.HybridConfig.GlobalWeight, config.HybridConfig.FusionMethod)
fmt.Println()
// Test different retrieval modes
modes := []string{"naive", "local", "global", "hybrid"}
queries := []string{
"What is LightRAG and how does it work?",
"Explain the relationship between RAG and knowledge graphs",
"What are the benefits of using vector databases?",
}
for _, mode := range modes {
fmt.Printf("=== Testing %s Mode ===\n", strings.ToUpper(mode))
// Update configuration for this mode
testConfig := config
testConfig.Mode = mode
for i, query := range queries {
fmt.Printf("\n--- Query %d: %s ---\n", i+1, query)
// Query with the current mode
result, err := lightrag.QueryWithConfig(ctx, query, &rag.RetrievalConfig{
K: 3,
ScoreThreshold: 0.3,
SearchType: mode,
IncludeScores: true,
})
if err != nil {
log.Printf("Query failed: %v", err)
continue
}
// Display results
fmt.Printf("Retrieved %d sources\n", len(result.Sources))
fmt.Printf("Confidence: %.2f\n", result.Confidence)
fmt.Printf("Response Time: %v\n", result.ResponseTime)
// Show metadata
if modeVal, ok := result.Metadata["mode"].(string); ok {
fmt.Printf("Mode: %s\n", modeVal)
}
// Show first few results
fmt.Println("\nTop Sources:")
for j, source := range result.Sources {
if j >= 2 {
break
}
fmt.Printf(" [%d] %s\n", j+1, truncate(source.Content, 100))
}
}
fmt.Println()
}
// Display metrics
fmt.Println("\n=== LightRAG Metrics ===")
metrics := lightrag.GetMetrics()
fmt.Printf("Total Queries: %d\n", metrics.TotalQueries)
fmt.Printf("Total Documents: %d\n", metrics.TotalDocuments)
fmt.Printf("Average Latency: %v\n", metrics.AverageLatency)
fmt.Printf("Indexing Latency: %v\n", metrics.IndexingLatency)
}
func truncate(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/reflexive_metacognitive/main.go | examples/reflexive_metacognitive/main.go | // Reflexive Metacognitive Agent
//
// This example implements the "Reflexive Metacognitive Agent" architecture
// from the Agentic Architectures series by Fareed Khan.
//
// Architecture Overview:
//
// A metacognitive agent maintains an explicit "self-model" โ a structured
// representation of its own knowledge, tools, and boundaries. When faced with
// a task, its first step is not to solve the problem, but to *analyze the
// problem in the context of its self-model*. It asks internal questions like:
//
// - "Do I have sufficient knowledge to answer this confidently?"
// - "Is this topic within my designated area of expertise?"
// - "Do I have a specific tool that is required to answer this safely?"
// - "Is the user's query about a high-stakes topic where an error would be dangerous?"
//
// Based on the answers, it chooses a strategy:
// 1. REASON_DIRECTLY: For high-confidence, low-risk queries within its knowledge
// 2. USE_TOOL: When the query requires a specific capability via a tool
// 3. ESCALATE: For low-confidence, high-risk, or out-of-scope queries
//
// This pattern is essential for:
// - High-Stakes Advisory Systems (healthcare, law, finance)
// - Autonomous Systems (robots assessing their ability to perform tasks safely)
// - Complex Tool Orchestrators (choosing the right API from many options)
//
// Reference: https://github.com/FareedKhan-dev/all-agentic-architectures/blob/main/17_reflexive_metacognitive.ipynb
package main
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
// ==================== Data Models ====================
// AgentSelfModel is a structured representation of the agent's capabilities
// and limitations โ the foundation of its self-awareness.
type AgentSelfModel struct {
Name string
Role string
KnowledgeDomain []string // Topics the agent is knowledgeable about
AvailableTools []string // Tools the agent can use
ConfidenceThreshold float64 // Confidence below which the agent must escalate
}
// MetacognitiveAnalysis represents the agent's self-analysis of a query
type MetacognitiveAnalysis struct {
Confidence float64 // 0.0 to 1.0 - confidence in ability to answer safely
Strategy string // "reason_directly", "use_tool", or "escalate"
Reasoning string // Justification for the chosen confidence and strategy
ToolToUse string // If strategy is "use_tool", the name of the tool
ToolArgs map[string]string // If strategy is "use_tool", the arguments
}
// AgentState represents the state passed between nodes in the graph
type AgentState struct {
UserQuery string
SelfModel *AgentSelfModel
MetacognitiveAnalysis *MetacognitiveAnalysis
ToolOutput string
FinalResponse string
}
// ==================== Tools ====================
// DrugInteractionChecker is a mock tool to check for drug interactions
type DrugInteractionChecker struct {
knownInteractions map[string]string
}
// Check checks for interactions between two drugs
func (d *DrugInteractionChecker) Check(drugA, drugB string) string {
key := drugA + "+" + drugB
if interaction, ok := d.knownInteractions[key]; ok {
return fmt.Sprintf("Interaction Found: %s", interaction)
}
return "No known significant interactions found. However, always consult a pharmacist or doctor."
}
// NewDrugInteractionChecker creates a new drug interaction checker
func NewDrugInteractionChecker() *DrugInteractionChecker {
return &DrugInteractionChecker{
knownInteractions: map[string]string{
"ibuprofen+lisinopril": "Moderate risk: Ibuprofen may reduce the blood pressure-lowering effects of lisinopril. Monitor blood pressure.",
"aspirin+warfarin": "High risk: Increased risk of bleeding. This combination should be avoided unless directed by a doctor.",
},
}
}
var drugTool = NewDrugInteractionChecker()
// ==================== Graph Nodes ====================
// MetacognitiveAnalysisNode performs the self-reflection step
func MetacognitiveAnalysisNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("\nโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
fmt.Println("โ ๐ค Agent is performing metacognitive analysis... โ")
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
// Create prompt for metacognitive analysis
prompt := fmt.Sprintf(`You are a metacognitive reasoning engine for an AI assistant. Analyze the user's query based on the agent's self-model.
**Agent's Self-Model:**
- Name: %s
- Role: %s
- Knowledge Domain: %s
- Available Tools: %s
**Strategy Rules:**
1. **escalate**: Emergency, out-of-domain, or doubt.
2. **use_tool**: Explicitly requires 'drug_interaction_checker'.
3. **reason_directly**: In-domain, low-risk.
FORMAT:
CONFIDENCE: [0.0 to 1.0]
STRATEGY: [escalate|use_tool|reason_directly]
TOOL_TO_USE: [tool name or "none"]
DRUG_A: [drug name or "none"]
DRUG_B: [drug name or "none"]
REASONING: [justification]
**User Query:** %s`,
agentState.SelfModel.Name,
agentState.SelfModel.Role,
strings.Join(agentState.SelfModel.KnowledgeDomain, ", "),
strings.Join(agentState.SelfModel.AvailableTools, ", "),
agentState.UserQuery)
// Call LLM
llm := state["llm"].(llms.Model)
resp, err := llms.GenerateFromSinglePrompt(ctx, llm, prompt)
if err != nil {
return nil, fmt.Errorf("metacognitive analysis LLM call failed: %w", err)
}
// Parse the response
analysis := parseMetacognitiveAnalysis(resp)
agentState.MetacognitiveAnalysis = analysis
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
fmt.Printf("โ Confidence: %.2f โ\n", analysis.Confidence)
fmt.Printf("โ Strategy: %s โ\n", analysis.Strategy)
fmt.Printf("โ Reasoning: %s โ\n", truncate(analysis.Reasoning, 50))
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
return state, nil
}
// ReasonDirectlyNode handles high-confidence, low-risk queries
func ReasonDirectlyNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
fmt.Println("โ โ
Confident in direct answer. Generating response... โ")
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
prompt := fmt.Sprintf(`You are %s. Provide a helpful, non-prescriptive answer. Reminder: not a doctor.
Query: %s`,
agentState.SelfModel.Role,
agentState.UserQuery)
llm := state["llm"].(llms.Model)
resp, err := llms.GenerateFromSinglePrompt(ctx, llm, prompt)
if err != nil {
return nil, fmt.Errorf("reason directly LLM call failed: %w", err)
}
agentState.FinalResponse = resp
return state, nil
}
// CallToolNode handles queries that require specialized tools
func CallToolNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
fmt.Printf("โ ๐ ๏ธ Confidence requires tool use. Calling `%s`... โ\n", agentState.MetacognitiveAnalysis.ToolToUse)
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
analysis := agentState.MetacognitiveAnalysis
if analysis.ToolToUse == "drug_interaction_checker" {
drugA := analysis.ToolArgs["drug_a"]
drugB := analysis.ToolArgs["drug_b"]
toolOutput := drugTool.Check(drugA, drugB)
agentState.ToolOutput = toolOutput
} else {
agentState.ToolOutput = "Error: Tool not found."
}
return state, nil
}
// SynthesizeToolResponseNode combines tool output with a helpful response
func SynthesizeToolResponseNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
fmt.Println("โ ๐ Synthesizing final response from tool output... โ")
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
prompt := fmt.Sprintf(`You are %s. Present tool information clearly. Disclaimer: not a doctor.
Original Query: %s
Tool Output: %s`,
agentState.SelfModel.Role,
agentState.UserQuery,
agentState.ToolOutput)
llm := state["llm"].(llms.Model)
resp, err := llms.GenerateFromSinglePrompt(ctx, llm, prompt)
if err != nil {
return nil, fmt.Errorf("synthesize tool response LLM call failed: %w", err)
}
agentState.FinalResponse = resp
return state, nil
}
// EscalateToHumanNode handles low-confidence or high-risk queries
func EscalateToHumanNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
fmt.Println("โ ๐จ Low confidence or high risk detected. Escalating. โ")
fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
response := "I am an AI assistant and not qualified to provide information on this topic. " +
"**Please consult a qualified medical professional immediately.**"
agentState.FinalResponse = response
return state, nil
}
// ==================== Routing Logic ====================
// RouteStrategy determines the next node based on the metacognitive analysis
func RouteStrategy(ctx context.Context, state map[string]any) string {
agentState := state["agent_state"].(*AgentState)
switch agentState.MetacognitiveAnalysis.Strategy {
case "reason_directly":
return "reason"
case "use_tool":
return "call_tool"
case "escalate":
return "escalate"
default:
return "escalate"
}
}
// ==================== Parsing Helpers ====================
func parseMetacognitiveAnalysis(response string) *MetacognitiveAnalysis {
analysis := &MetacognitiveAnalysis{
Confidence: 0.1,
Strategy: "escalate",
Reasoning: response,
ToolToUse: "none",
ToolArgs: make(map[string]string),
}
lines := strings.Split(response, "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
upperLine := strings.ToUpper(line)
if strings.HasPrefix(upperLine, "CONFIDENCE:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
var confidence float64
fmt.Sscanf(strings.TrimSpace(parts[1]), "%f", &confidence)
analysis.Confidence = confidence
}
} else if strings.HasPrefix(upperLine, "STRATEGY:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
analysis.Strategy = strings.TrimSpace(parts[1])
analysis.Strategy = strings.ToLower(analysis.Strategy)
}
} else if strings.HasPrefix(upperLine, "TOOL_TO_USE:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
analysis.ToolToUse = strings.TrimSpace(parts[1])
analysis.ToolToUse = strings.ToLower(analysis.ToolToUse)
}
} else if strings.HasPrefix(upperLine, "DRUG_A:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
analysis.ToolArgs["drug_a"] = strings.TrimSpace(parts[1])
}
} else if strings.HasPrefix(upperLine, "DRUG_B:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
analysis.ToolArgs["drug_b"] = strings.TrimSpace(parts[1])
}
} else if strings.HasPrefix(upperLine, "REASONING:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
analysis.Reasoning = strings.TrimSpace(parts[1])
}
}
}
return analysis
}
func truncate(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
// ==================== Main Function ====================
func main() {
if os.Getenv("OPENAI_API_KEY") == "" {
log.Fatal("OPENAI_API_KEY environment variable is required")
}
fmt.Println("=== ๐ Reflexive Metacognitive Agent Architecture ===")
// Create LLM
llm, err := openai.New()
if err != nil {
log.Fatal(err)
}
// Define the agent's self-model
medicalAgentModel := &AgentSelfModel{
Name: "TriageBot-3000",
Role: "A helpful AI assistant for providing preliminary medical information",
KnowledgeDomain: []string{"common_cold", "influenza", "allergies", "headaches", "basic_first_aid"},
AvailableTools: []string{"drug_interaction_checker"},
ConfidenceThreshold: 0.6,
}
// Create the metacognitive graph with map state
workflow := graph.NewStateGraph[map[string]any]()
// Add nodes
workflow.AddNode("analyze", "Metacognitive analysis", MetacognitiveAnalysisNode)
workflow.AddNode("reason", "Reason directly", ReasonDirectlyNode)
workflow.AddNode("call_tool", "Call tool", CallToolNode)
workflow.AddNode("synthesize", "Synthesize tool response", SynthesizeToolResponseNode)
workflow.AddNode("escalate", "Escalate to human", EscalateToHumanNode)
// Set entry point
workflow.SetEntryPoint("analyze")
// Add conditional edges from analyze node
workflow.AddConditionalEdge("analyze", RouteStrategy)
// Add edges for each strategy
workflow.AddEdge("reason", graph.END)
workflow.AddEdge("call_tool", "synthesize")
workflow.AddEdge("synthesize", graph.END)
workflow.AddEdge("escalate", graph.END)
// Compile the graph
app, err := workflow.Compile()
if err != nil {
log.Fatalf("Failed to compile graph: %v", err)
}
ctx := context.Background()
// Test queries
testQueries := []struct {
name string
query string
}{
{
name: "Simple, In-Scope, Low-Risk Query",
query: "What are the symptoms of a common cold?",
},
{
name: "Specific Query Requiring a Tool",
query: "Is it safe to take Ibuprofen if I am also taking Lisinopril?",
},
{
name: "High-Stakes, Emergency Query",
query: "I have a crushing pain in my chest and my left arm feels numb, what should I do?",
},
{
name: "Out-of-Scope Query",
query: "What are the latest treatment options for stage 4 pancreatic cancer?",
},
}
for i, test := range testQueries {
fmt.Printf("\n--- Test %d: %s ---", i+1, test.name)
agentState := &AgentState{
UserQuery: test.query,
SelfModel: medicalAgentModel,
}
input := map[string]any{
"llm": llm,
"agent_state": agentState,
}
result, err := app.Invoke(ctx, input)
if err != nil {
log.Printf("Error: %v\n", err)
continue
}
finalState := result["agent_state"].(*AgentState)
fmt.Println("\n๐ Response:")
fmt.Println(finalState.FinalResponse)
fmt.Println(strings.Repeat("=", 70))
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/tool_tavily/main.go | examples/tool_tavily/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/smallnest/langgraphgo/tool"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/tools"
)
func main() {
// Check for API keys
if os.Getenv("TAVILY_API_KEY") == "" {
log.Fatal("Please set TAVILY_API_KEY environment variable")
}
if os.Getenv("OPENAI_API_KEY") == "" && os.Getenv("DEEPSEEK_API_KEY") == "" {
log.Fatal("Please set OPENAI_API_KEY or DEEPSEEK_API_KEY environment variable")
}
ctx := context.Background()
// 1. Initialize the LLM
llm, err := openai.New()
if err != nil {
log.Fatalf("Failed to create LLM: %v", err)
}
// 2. Initialize the Tool
tavilyTool, err := tool.NewTavilySearch("",
tool.WithTavilySearchDepth("advanced"),
)
if err != nil {
log.Fatal(err)
}
// 3. Create the ReAct Agent using map state convenience function
agent, err := prebuilt.CreateAgentMap(llm, []tools.Tool{tavilyTool}, 20)
if err != nil {
log.Fatalf("Failed to create agent: %v", err)
}
// 4. Run the Agent
query := "Who won the Best Picture Oscar in 2025?"
fmt.Printf("User: %s\n\n", query)
inputs := map[string]any{
"messages": []llms.MessageContent{
llms.TextParts(llms.ChatMessageTypeHuman, query),
},
}
response, err := agent.Invoke(ctx, inputs)
if err != nil {
log.Fatalf("Agent failed: %v", err)
}
// 5. Print the Result
messages, ok := response["messages"].([]llms.MessageContent)
if ok {
lastMsg := messages[len(messages)-1]
for _, part := range lastMsg.Parts {
if text, ok := part.(llms.TextContent); ok {
fmt.Printf("\nAgent: %s\n", text.Text)
}
}
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/arith_example/main.go | examples/arith_example/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Initialize LLM
model, err := openai.New()
if err != nil {
log.Fatal(err)
}
// Create state graph
g := graph.NewStateGraph[map[string]any]()
// Add arith node that calls LLM to calculate expression
g.AddNode("arith", "arith", func(ctx context.Context, state map[string]any) (map[string]any, error) {
expression, ok := state["expression"].(string)
if !ok {
return nil, fmt.Errorf("invalid expression")
}
// Call LLM to calculate
prompt := fmt.Sprintf("Calculate: %s. Only return the number.", expression)
result, err := model.Call(ctx, prompt)
if err != nil {
return nil, err
}
// Update state with result
state["result"] = result
return state, nil
})
g.AddEdge("arith", graph.END)
g.SetEntryPoint("arith")
runnable, err := g.Compile()
if err != nil {
log.Fatal(err)
}
// Invoke with expression in state
res, err := runnable.Invoke(context.Background(), map[string]any{
"expression": "123 + 456",
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("123 + 456 = %s\n", res["result"])
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_falkordb_fast/main.go | examples/rag_falkordb_fast/main.go | package main
import (
"context"
"fmt"
"log"
"strings"
"time"
"github.com/smallnest/langgraphgo/adapter"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/store"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
ctx := context.Background()
// Initialize LLM
ollm, err := openai.New()
if err != nil {
log.Fatalf("Failed to create LLM: %v", err)
}
// Create adapter for our LLM interface
llm := adapter.NewOpenAIAdapter(ollm)
// Create FalkorDB knowledge graph
fmt.Println("Initializing FalkorDB knowledge graph...")
falkorDBConnStr := "falkordb://localhost:6379/fast_rag_graph"
kg, err := store.NewFalkorDBGraph(falkorDBConnStr)
if err != nil {
log.Fatalf("Failed to create FalkorDB knowledge graph: %v", err)
}
// Close the connection when done (type assert to access Close method)
defer func() {
if falkorDB, ok := kg.(*store.FalkorDBGraph); ok {
falkorDB.Close()
}
}()
fmt.Println("Fast RAG with FalkorDB Knowledge Graph")
fmt.Println("=====================================\n")
// ็คบไพ1๏ผๆๅจๆทปๅ ้ขๅฎไนๅฎไฝๅๅ
ณ็ณป๏ผๅฟซ้ๆนๅผ๏ผ
fmt.Println("1. Adding predefined entities and relationships...")
startTime := time.Now()
// ๆๅจๅฎไนๅฎไฝ๏ผ้ฟๅ
LLM่ฐ็จ๏ผ
entities := []*rag.Entity{
{
ID: "apple_inc",
Name: "Apple Inc.",
Type: "ORGANIZATION",
Properties: map[string]any{
"industry": "Technology",
"founded": "1976",
"location": "Cupertino, California",
},
},
{
ID: "steve_jobs",
Name: "Steve Jobs",
Type: "PERSON",
Properties: map[string]any{
"role": "Co-founder",
"company": "Apple Inc.",
},
},
{
ID: "iphone",
Name: "iPhone",
Type: "PRODUCT",
Properties: map[string]any{
"category": "Smartphone",
"company": "Apple Inc.",
},
},
{
ID: "microsoft",
Name: "Microsoft",
Type: "ORGANIZATION",
Properties: map[string]any{
"industry": "Technology",
"founded": "1975",
"location": "Redmond, Washington",
},
},
{
ID: "bill_gates",
Name: "Bill Gates",
Type: "PERSON",
Properties: map[string]any{
"role": "Co-founder",
"company": "Microsoft",
},
},
{
ID: "windows",
Name: "Windows",
Type: "PRODUCT",
Properties: map[string]any{
"category": "Operating System",
"company": "Microsoft",
},
},
{
ID: "machine_learning",
Name: "Machine Learning",
Type: "CONCEPT",
Properties: map[string]any{
"category": "Artificial Intelligence",
"description": "Subset of AI that enables computers to learn from data",
},
},
}
// ๆทปๅ ๅฎไฝๅฐ็ฅ่ฏๅพ่ฐฑ
for _, entity := range entities {
err := kg.AddEntity(ctx, entity)
if err != nil {
log.Printf("Failed to add entity %s: %v", entity.ID, err)
}
}
// ๆๅจๅฎไนๅ
ณ็ณป
relationships := []*rag.Relationship{
{
ID: "steve_founded_apple",
Source: "steve_jobs",
Target: "apple_inc",
Type: "FOUNDED",
},
{
ID: "apple_makes_iphone",
Source: "apple_inc",
Target: "iphone",
Type: "PRODUCES",
},
{
ID: "bill_founded_microsoft",
Source: "bill_gates",
Target: "microsoft",
Type: "FOUNDED",
},
{
ID: "microsoft_makes_windows",
Source: "microsoft",
Target: "windows",
Type: "PRODUCES",
},
{
ID: "apple_vs_microsoft",
Source: "apple_inc",
Target: "microsoft",
Type: "COMPETES_WITH",
},
{
ID: "jobs_vs_gates",
Source: "steve_jobs",
Target: "bill_gates",
Type: "RIVALRY",
},
{
ID: "ml_used_by_tech",
Source: "machine_learning",
Target: "apple_inc",
Type: "USED_BY",
},
{
ID: "ml_used_by_microsoft",
Source: "machine_learning",
Target: "microsoft",
Type: "USED_BY",
},
}
// ๆทปๅ ๅ
ณ็ณปๅฐ็ฅ่ฏๅพ่ฐฑ
for _, rel := range relationships {
err := kg.AddRelationship(ctx, rel)
if err != nil {
log.Printf("Failed to add relationship %s: %v", rel.ID, err)
}
}
entityAddTime := time.Since(startTime)
fmt.Printf("Added %d entities and %d relationships in %v\n\n", len(entities), len(relationships), entityAddTime)
// ็คบไพ2๏ผๅฟซ้ๆฅ่ฏข็คบไพ
fmt.Println("2. Fast Query Examples")
fmt.Println("=====================")
queries := []struct {
description string
query *rag.GraphQuery
}{
{
description: "Find all organizations",
query: &rag.GraphQuery{
EntityTypes: []string{"ORGANIZATION"},
Limit: 10,
},
},
{
description: "Find all people",
query: &rag.GraphQuery{
EntityTypes: []string{"PERSON"},
Limit: 10,
},
},
{
description: "Find all products",
query: &rag.GraphQuery{
EntityTypes: []string{"PRODUCT"},
Limit: 10,
},
},
{
description: "Find all entities (limit 5)",
query: &rag.GraphQuery{
Limit: 5,
},
},
}
for i, testQuery := range queries {
fmt.Printf("Query %d: %s\n", i+1, testQuery.description)
queryStart := time.Now()
result, err := kg.Query(ctx, testQuery.query)
queryTime := time.Since(queryStart)
if err != nil {
log.Printf("Query failed: %v", err)
continue
}
fmt.Printf(" Found %d entities and %d relationships in %v\n",
len(result.Entities), len(result.Relationships), queryTime)
for j, entity := range result.Entities {
fmt.Printf(" [%d] %s (%s)\n", j+1, entity.Name, entity.Type)
if entity.Properties != nil {
if industry, ok := entity.Properties["industry"]; ok {
fmt.Printf(" Industry: %v\n", industry)
}
if role, ok := entity.Properties["role"]; ok {
fmt.Printf(" Role: %v\n", role)
}
}
}
fmt.Println(strings.Repeat("-", 50))
}
// ็คบไพ3๏ผๅ
ณ็ณป้ๅ
fmt.Println("\n3. Relationship Traversal Examples")
fmt.Println("===================================")
// ๆฅๆพไธApple็ธๅ
ณ็ๅฎไฝ
fmt.Println("Entities related to Apple Inc.:")
startTime = time.Now()
relatedEntities, err := kg.GetRelatedEntities(ctx, "apple_inc", 2)
traversalTime := time.Since(startTime)
if err != nil {
log.Printf("Failed to get related entities: %v", err)
} else {
fmt.Printf("Found %d related entities in %v:\n", len(relatedEntities), traversalTime)
for i, entity := range relatedEntities {
if entity.ID != "apple_inc" { // ๆ้คApple่ชๅทฑ
fmt.Printf(" [%d] %s (%s)\n", i, entity.Name, entity.Type)
}
}
}
// ็คบไพ4๏ผ็ฎๅ็ๅบไบ็ฅ่ฏ็้ฎ็ญ
fmt.Println("\n4. Knowledge-Based Q&A")
fmt.Println("======================")
questions := []string{
"Who founded Apple Inc.?",
"What does Microsoft produce?",
"Which people are related to these companies?",
"How is machine learning used in technology?",
}
for i, question := range questions {
fmt.Printf("\nQuestion %d: %s\n", i+1, question)
// ็ฎๅ็ๅบไบๅพ็ๅ
ณ้ฎ่ฏๅน้
answer := generateAnswerFromKnowledgeGraph(ctx, kg, question, llm)
fmt.Printf("Answer: %s\n", answer)
}
fmt.Println("\n=== Fast RAG Demo Complete ===")
fmt.Println("Performance summary:")
fmt.Printf("- Entity addition: %v\n", entityAddTime)
fmt.Println("- Queries completed quickly (no LLM calls for extraction)")
fmt.Println("- Knowledge graph ready for RAG applications")
}
// generateAnswerFromKnowledgeGraph generates answers using knowledge graph without full RAG pipeline
func generateAnswerFromKnowledgeGraph(ctx context.Context, kg rag.KnowledgeGraph, question string, llm rag.LLMInterface) string {
// ็ฎๅ็ๅ
ณ้ฎ่ฏๅน้
ๆฅๆฅๆพ็ธๅ
ณๅฎไฝ
questionLower := strings.ToLower(question)
var relevantEntities []*rag.Entity
var relevantRelationships []*rag.Relationship
// ๆฅๆพ็ธๅ
ณ็ๅฎไฝ๏ผ็ฎๅ็ๆฌ๏ผ
allEntitiesQuery := &rag.GraphQuery{
Limit: 20,
}
result, err := kg.Query(ctx, allEntitiesQuery)
if err != nil {
return "I couldn't access the knowledge graph to answer your question."
}
// ็ฎๅ็ๅ
ณ้ฎ่ฏๅน้
for _, entity := range result.Entities {
if strings.Contains(questionLower, strings.ToLower(entity.Name)) ||
strings.Contains(questionLower, strings.ToLower(entity.Type)) {
relevantEntities = append(relevantEntities, entity)
}
}
// ๆฅๆพ็ธๅ
ณๅ
ณ็ณป
for _, rel := range result.Relationships {
relLower := strings.ToLower(rel.Type)
if strings.Contains(questionLower, relLower) {
relevantRelationships = append(relevantRelationships, rel)
}
}
// ๆๅปบไธไธๆ
var context strings.Builder
if len(relevantEntities) > 0 {
context.WriteString("Relevant entities:\n")
for _, entity := range relevantEntities {
context.WriteString(fmt.Sprintf("- %s (%s): ", entity.Name, entity.Type))
if entity.Properties != nil {
for k, v := range entity.Properties {
context.WriteString(fmt.Sprintf("%s=%v ", k, v))
}
}
context.WriteString("\n")
}
}
if len(relevantRelationships) > 0 {
context.WriteString("\nRelevant relationships:\n")
for _, rel := range relevantRelationships {
context.WriteString(fmt.Sprintf("- %s %s %s\n", rel.Source, rel.Type, rel.Target))
}
}
if context.Len() == 0 {
// ๅฆๆๆฒกๆๆพๅฐ็ดๆฅ็ธๅ
ณไฟกๆฏ๏ผ่ฟๅไธ่ฌๆงๅ็ญ
if strings.Contains(questionLower, "apple") {
return "Apple Inc. is a technology company founded by Steve Jobs. It produces products like the iPhone and competes with Microsoft."
} else if strings.Contains(questionLower, "microsoft") {
return "Microsoft is a technology company founded by Bill Gates. It produces the Windows operating system and competes with Apple."
} else if strings.Contains(questionLower, "machine learning") {
return "Machine learning is a concept used by technology companies like Apple and Microsoft for various applications."
} else {
return "I found some information in the knowledge graph, but need more specific details to answer your question accurately."
}
}
// ไฝฟ็จLLM็ๆๅ็ญ
prompt := fmt.Sprintf(`Based on the following knowledge graph information, answer the question briefly and accurately.
Question: %s
Knowledge Graph Context:
%s
Answer:`, question, context.String())
answer, err := llm.Generate(ctx, prompt)
if err != nil {
return "I encountered an error while generating an answer based on the knowledge graph."
}
return answer
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_conditional/main.go | examples/rag_conditional/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/retriever"
"github.com/smallnest/langgraphgo/rag/store"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
ctx := context.Background()
llm, err := openai.New()
if err != nil {
log.Fatalf("Failed to create LLM: %v", err)
}
documents := []rag.Document{
{
Content: "The company policy allows remote work for 3 days a week.",
Metadata: map[string]any{"source": "policy"},
},
{
Content: "Employees must be in the office on Mondays and Fridays.",
Metadata: map[string]any{"source": "policy"},
},
}
embedder := store.NewMockEmbedder(128)
vectorStore := store.NewInMemoryVectorStore(embedder)
texts := make([]string, len(documents))
for i, doc := range documents {
texts[i] = doc.Content
}
embeddings, _ := embedder.EmbedDocuments(ctx, texts)
vectorStore.AddBatch(ctx, documents, embeddings)
retriever := retriever.NewVectorStoreRetriever(vectorStore, embedder, 2)
config := rag.DefaultPipelineConfig()
config.Retriever = retriever
config.LLM = llm
config.ScoreThreshold = 0.5
config.UseFallback = true
pipeline := rag.NewRAGPipeline(config)
err = pipeline.BuildConditionalRAG()
if err != nil {
log.Fatalf("Failed to build RAG pipeline: %v", err)
}
runnable, err := pipeline.Compile()
if err != nil {
log.Fatalf("Failed to compile pipeline: %v", err)
}
exporter := graph.GetGraphForRunnable(runnable)
fmt.Println(exporter.DrawASCII())
query := "Can I work from home on Tuesday?"
fmt.Printf("\nQuery: %s\n", query)
result, err := runnable.Invoke(ctx, map[string]any{
"query": query,
})
if err != nil {
log.Fatalf("Failed to process query: %v", err)
}
if answer, ok := result["answer"].(string); ok {
fmt.Printf("Answer: %s\n", answer)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_reranker/main.go | examples/rag_reranker/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/retriever"
"github.com/smallnest/langgraphgo/rag/splitter"
"github.com/smallnest/langgraphgo/rag/store"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Check for API keys
openAIKey := os.Getenv("OPENAI_API_KEY")
if openAIKey == "" {
fmt.Println("OPENAI_API_KEY not set. Skipping execution.")
fmt.Println("\nTo run this example:")
fmt.Println("1. Set OPENAI_API_KEY for LLM-based reranking")
fmt.Println("2. Optionally set COHERE_API_KEY for Cohere reranking")
fmt.Println("3. Optionally set JINA_API_KEY for Jina reranking")
return
}
ctx := context.Background()
fmt.Println("=== RAG Reranker Comparison Example ===")
// Initialize LLM
llm, err := openai.New()
if err != nil {
log.Fatalf("Failed to create LLM: %v", err)
}
// Create sample documents
documents := createSampleDocuments()
// Split documents
splitter := splitter.NewSimpleTextSplitter(200, 50)
chunks := splitter.SplitDocuments(documents)
fmt.Printf("Split %d documents into %d chunks\n\n", len(documents), len(chunks))
// Create embedder and vector store
embedder := store.NewMockEmbedder(256)
vectorStore := store.NewInMemoryVectorStore(embedder)
// Generate embeddings and add chunks to vector store
texts := make([]string, len(chunks))
for i, chunk := range chunks {
texts[i] = chunk.Content
}
embeddings, err := embedder.EmbedDocuments(ctx, texts)
if err != nil {
log.Fatalf("Failed to generate embeddings: %v", err)
}
err = vectorStore.AddBatch(ctx, chunks, embeddings)
if err != nil {
log.Fatalf("Failed to add documents to vector store: %v", err)
}
// Test query
query := "What is LangGraph and how does it help with multi-agent systems?"
fmt.Printf("Query: %s\n\n", query)
// Create base retriever
baseRetriever := retriever.NewVectorStoreRetriever(vectorStore, embedder, 5)
// Define rerankers to test
rerankers := []struct {
name string
reranker rag.Reranker
}{
{
name: "SimpleReranker (keyword-based)",
reranker: retriever.NewSimpleReranker(),
},
{
name: "LLMReranker",
reranker: retriever.NewLLMReranker(llm, retriever.DefaultLLMRerankerConfig()),
},
}
// Add Cohere reranker if API key is available
if os.Getenv("COHERE_API_KEY") != "" {
cohereReranker := retriever.NewCohereReranker("", retriever.DefaultCohereRerankerConfig())
rerankers = append(rerankers, struct {
name string
reranker rag.Reranker
}{
name: "CohereReranker",
reranker: cohereReranker,
})
fmt.Println("Added CohereReranker")
}
// Add Jina reranker if API key is available
if os.Getenv("JINA_API_KEY") != "" {
jinaReranker := retriever.NewJinaReranker("", retriever.DefaultJinaRerankerConfig())
rerankers = append(rerankers, struct {
name string
reranker rag.Reranker
}{
name: "JinaReranker",
reranker: jinaReranker,
})
fmt.Println("Added JinaReranker")
}
// Test each reranker
for _, rr := range rerankers {
fmt.Printf("\n--- %s ---\n", rr.name)
// Create pipeline with this reranker
config := rag.DefaultPipelineConfig()
config.Retriever = baseRetriever
config.Reranker = rr.reranker
config.LLM = llm
config.TopK = 3
config.UseReranking = true
pipeline := rag.NewRAGPipeline(config)
err = pipeline.BuildAdvancedRAG()
if err != nil {
log.Printf("Failed to build pipeline: %v", err)
continue
}
runnable, err := pipeline.Compile()
if err != nil {
log.Printf("Failed to compile pipeline: %v", err)
continue
}
// Run query
result, err := runnable.Invoke(ctx, map[string]any{
"query": query,
})
if err != nil {
log.Printf("Failed to process query: %v", err)
continue
}
// Display results
displayResults(result)
}
fmt.Println("\n" + strings.Repeat("=", 80))
fmt.Println("\nNotes:")
fmt.Println("- SimpleReranker: Fast, keyword-based, no API calls")
fmt.Println("- LLMReranker: Uses LLM to score documents, slower but good semantic understanding")
fmt.Println("- CohereReranker: High-quality results, requires Cohere API key")
fmt.Println("- JinaReranker: High-quality results, supports multiple languages")
fmt.Println("\nFor cross-encoder reranking, see scripts/cross_encoder_server.py")
}
func createSampleDocuments() []rag.Document {
return []rag.Document{
{
Content: "LangGraph is a library for building stateful, multi-actor applications with LLMs. " +
"It extends LangChain Expression Language with the ability to coordinate multiple chains " +
"across multiple steps of computation in a cyclic manner. LangGraph is particularly useful " +
"for building complex agent workflows and multi-agent systems where agents can communicate " +
"with each other and maintain state across interactions.",
Metadata: map[string]any{
"source": "langgraph_intro.txt",
"topic": "LangGraph",
"category": "Framework",
},
},
{
Content: "Multi-agent systems in LangGraph enable multiple AI agents to work together on complex tasks. " +
"Each agent can have specialized roles, tools, and objectives. The graph-based architecture allows " +
"agents to pass messages, share state, and coordinate their actions. This enables sophisticated " +
"workflows like research teams, code generation pipelines, and decision-making systems.",
Metadata: map[string]any{
"source": "multi_agent.txt",
"topic": "Multi-Agent",
"category": "Architecture",
},
},
{
Content: "RAG (Retrieval-Augmented Generation) is a technique that combines information retrieval " +
"with text generation. It retrieves relevant documents from a knowledge base and uses them " +
"to augment the context provided to a language model for generation. This approach helps " +
"reduce hallucinations and provides more factual, grounded responses.",
Metadata: map[string]any{
"source": "rag_overview.txt",
"topic": "RAG",
"category": "Technique",
},
},
{
Content: "Vector databases store embeddings, which are numerical representations of text. " +
"They enable efficient similarity search by comparing vector distances using metrics like " +
"cosine similarity or Euclidean distance. Popular vector databases include Pinecone, Weaviate, " +
"Chroma, and Qdrant.",
Metadata: map[string]any{
"source": "vector_db.txt",
"topic": "Vector Databases",
"category": "Infrastructure",
},
},
{
Content: "Document reranking is a technique to improve retrieval quality by re-scoring retrieved " +
"documents based on their relevance to the query. Cross-encoder models are often used for " +
"reranking as they can better capture query-document interactions compared to bi-encoders " +
"used for initial retrieval. Popular reranking services include Cohere Rerank and Jina Rerank.",
Metadata: map[string]any{
"source": "reranking.txt",
"topic": "Reranking",
"category": "Technique",
},
},
{
Content: "State management in LangGraph is handled through a stateful graph where each node " +
"can read and modify the state. The state flows through the graph and evolves at each step. " +
"This allows agents to maintain context, remember previous interactions, and make decisions " +
"based on accumulated information.",
Metadata: map[string]any{
"source": "state_management.txt",
"topic": "State Management",
"category": "Core Concept",
},
},
}
}
func displayResults(result map[string]any) {
// Display retrieved documents
if docs, ok := result["documents"].([]rag.RAGDocument); ok && len(docs) > 0 {
fmt.Println("Top Retrieved Documents:")
for i, doc := range docs {
source := "Unknown"
if s, ok := doc.Metadata["source"].(string); ok {
source = s
}
topic := "N/A"
if t, ok := doc.Metadata["topic"].(string); ok {
topic = t
}
fmt.Printf(" [%d] %s (Topic: %s)\n", i+1, source, topic)
fmt.Printf(" %s\n", truncate(doc.Content, 100))
}
}
// Display reranked scores if available
if rankedDocs, ok := result["ranked_documents"].([]rag.DocumentSearchResult); ok && len(rankedDocs) > 0 {
fmt.Println("\nRelevance Scores:")
for i, rd := range rankedDocs {
if i >= 3 {
break
}
method := "original"
if m, ok := rd.Metadata["reranking_method"].(string); ok {
method = m
}
fmt.Printf(" [%d] Score: %.4f (Method: %s)\n", i+1, rd.Score, method)
}
}
// Display answer if available
if answer, ok := result["answer"].(string); ok {
fmt.Printf("\nAnswer: %s\n", truncate(answer, 200))
}
}
func truncate(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/memory_graph_integration/main.go | examples/memory_graph_integration/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
)
// AgentState for the example
type AgentState struct {
Query string
Intent string
Info string
Response string
}
func classifyIntent(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("[Memory Integration] Classifying intent...")
// Simulate classification
agentState.Intent = "search"
return state, nil
}
func retrieveInformation(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("[Memory Integration] Retrieving information...")
// Simulate retrieval
agentState.Info = "Information about " + agentState.Query
return state, nil
}
func generateResponse(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("[Memory Integration] Generating response...")
agentState.Response = "Here is what I found: " + agentState.Info
return state, nil
}
func main() {
// Create a state graph with map state
g := graph.NewStateGraph[map[string]any]()
g.AddNode("classify", "classify", classifyIntent)
g.AddNode("retrieve", "retrieve", retrieveInformation)
g.AddNode("generate", "generate", generateResponse)
g.SetEntryPoint("classify")
g.AddEdge("classify", "retrieve")
g.AddEdge("retrieve", "generate")
g.AddEdge("generate", graph.END)
runnable, err := g.Compile()
if err != nil {
log.Fatal(err)
}
agentState := &AgentState{Query: "Go Generics"}
input := map[string]any{
"agent_state": agentState,
}
result, err := runnable.Invoke(context.Background(), input)
if err != nil {
log.Fatal(err)
}
finalState := result["agent_state"].(*AgentState)
fmt.Printf("Response: %s\n", finalState.Response)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/file_checkpointing/main.go | examples/file_checkpointing/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a temporary directory for checkpoints
checkpointDir := "./checkpoints"
if err := os.MkdirAll(checkpointDir, 0755); err != nil {
log.Fatal(err)
}
defer os.RemoveAll(checkpointDir) // Cleanup after run
fmt.Printf("Using checkpoint directory: %s\n", checkpointDir)
// Initialize FileCheckpointStore
store, err := graph.NewFileCheckpointStore(checkpointDir)
if err != nil {
log.Fatalf("Failed to create checkpoint store: %v", err)
}
// Define a simple graph
g := graph.NewCheckpointableStateGraph[map[string]any]()
// Add nodes that update state
g.AddNode("first", "first", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("Executing 'first' node")
state["step1"] = "completed"
return state, nil
})
g.AddNode("second", "second", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("Executing 'second' node")
state["step2"] = "completed"
return state, nil
})
g.AddEdge("first", "second")
g.AddEdge("second", graph.END)
g.SetEntryPoint("first")
// Set checkpoint config
g.SetCheckpointConfig(graph.CheckpointConfig{
Store: store,
AutoSave: true,
})
// Compile implementation
runnable, err := g.CompileCheckpointable()
if err != nil {
log.Fatalf("Failed to compile graph: %v", err)
}
// Run the graph
ctx := context.Background()
initialState := map[string]any{
"input": "start",
}
// Thread ID helps group checkpoints for a specific conversation/execution
config := &graph.Config{
Configurable: map[string]any{
"thread_id": "thread_1",
},
}
fmt.Println("--- Starting Graph Execution ---")
res, err := runnable.InvokeWithConfig(ctx, initialState, config)
if err != nil {
log.Fatalf("Execution failed: %v", err)
}
fmt.Printf("Final Result: %v\n", res)
// Verify checkpoints were saved
fmt.Println("\n--- Verifying Checkpoints ---")
files, err := os.ReadDir(checkpointDir)
if err != nil {
log.Fatalf("Failed to read checkpoint directory: %v", err)
}
count := 0
for _, file := range files {
if filepath.Ext(file.Name()) == ".json" {
fmt.Printf("Found checkpoint file: %s\n", file.Name())
count++
}
}
if count > 0 {
fmt.Printf("Successfully saved %d checkpoints to %s\n", count, checkpointDir)
} else {
log.Fatal("No checkpoints found!")
}
// Demonstrate listing via store
fmt.Println("\n--- Listing Checkpoints from Store ---")
checkpoints, err := store.List(ctx, "thread_1")
if err != nil {
log.Fatalf("Failed to list checkpoints: %v", err)
}
for _, cp := range checkpoints {
fmt.Printf("Checkpoint ID: %s, Node: %s, Version: %d\n", cp.ID, cp.NodeName, cp.Version)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/langchain_example/main.go | examples/langchain_example/main.go | //go:build ignore
// +build ignore
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/googleai"
"github.com/tmc/langchaingo/llms/openai"
)
// Example 1: Using OpenAI with LangChain
func OpenAIExample() {
fmt.Println("\n๐ค OpenAI Example with LangChain")
fmt.Println("==================================")
// Create OpenAI LLM client using LangChain
model, err := openai.New()
if err != nil {
log.Printf("OpenAI initialization failed: %v", err)
return
}
// Create a graph that uses the LLM
g := graph.NewStateGraph[[]llms.MessageContent]()
g.AddNode("chat", "chat", func(ctx context.Context, messages []llms.MessageContent) ([]llms.MessageContent, error) {
// Use LangChain's GenerateContent method
response, err := model.GenerateContent(ctx, messages,
llms.WithTemperature(0.7),
llms.WithMaxTokens(150),
)
if err != nil {
return nil, fmt.Errorf("LLM generation failed: %w", err)
}
// Append the response to messages
return append(messages,
llms.TextParts("ai", response.Choices[0].Content),
), nil
})
g.AddEdge("chat", graph.END)
g.SetEntryPoint("chat")
runnable, err := g.Compile()
if err != nil {
log.Fatalf("Failed to compile graph: %v", err)
}
// Execute with initial message
ctx := context.Background()
result, err := runnable.Invoke(ctx, []llms.MessageContent{
llms.TextParts("human", "What are the benefits of using LangChain with Go?"),
})
if err != nil {
log.Printf("Execution failed: %v", err)
return
}
// Print the conversation
messages := result
for _, msg := range messages {
fmt.Printf("%s: %s\n", msg.Role, msg.Parts[0])
}
}
// Example 2: Using Google AI (Gemini) with LangChain
func GoogleAIExample() {
fmt.Println("\n๐ Google AI (Gemini) Example with LangChain")
fmt.Println("=============================================")
// Create Google AI LLM client using LangChain
ctx := context.Background()
model, err := googleai.New(ctx)
if err != nil {
log.Printf("Google AI initialization failed: %v", err)
return
}
// Create a streaming graph with Google AI
g := graph.NewListenableStateGraph[[]llms.MessageContent]()
node := g.AddNode("gemini", "gemini", func(ctx context.Context, messages []llms.MessageContent) ([]llms.MessageContent, error) {
// Use LangChain's GenerateContent with Google AI
response, err := model.GenerateContent(ctx, messages,
llms.WithTemperature(0.9),
llms.WithTopP(0.95),
)
if err != nil {
return nil, fmt.Errorf("Gemini generation failed: %w", err)
}
return append(messages,
llms.TextParts("ai", response.Choices[0].Content),
), nil
})
// Add a progress listener for streaming feedback
// Note: ProgressListener expects map[string]any state, but here we have []llms.MessageContent.
// We can't use the standard ProgressListener directly if it's strict.
// However, in my recent fix, I made ProgressListener implement NodeListener[map[string]any].
// This graph expects NodeListener[[]llms.MessageContent].
// So we can't use standard ProgressListener here directly.
// We will create a custom listener for this example.
listener := graph.NodeListenerFunc[[]llms.MessageContent](func(ctx context.Context, event graph.NodeEvent, nodeName string, state []llms.MessageContent, err error) {
if event == graph.NodeEventStart {
fmt.Printf("๐ค Thinking with Gemini... (Node: %s)\n", nodeName)
}
})
node.AddListener(listener)
g.AddEdge("gemini", graph.END)
g.SetEntryPoint("gemini")
runnable, err := g.CompileListenable()
if err != nil {
log.Fatalf("Failed to compile graph: %v", err)
}
// Execute with creative prompt
result, err := runnable.Invoke(ctx, []llms.MessageContent{
llms.TextParts("human", "Write a haiku about Go programming"),
})
if err != nil {
log.Printf("Execution failed: %v", err)
return
}
// Print the response
messages := result
fmt.Printf("\nGemini's Response:\n%s\n", messages[len(messages)-1].Parts[0])
}
// Example 3: Multi-step reasoning with LangChain
func MultiStepReasoningExample() {
fmt.Println("\n๐ง Multi-Step Reasoning with LangChain")
fmt.Println("======================================")
// Use whichever LLM is available
var model llms.Model
var err error
ctx := context.Background()
if os.Getenv("OPENAI_API_KEY") != "" {
model, err = openai.New()
fmt.Println("Using OpenAI...")
} else if os.Getenv("GOOGLE_API_KEY") != "" {
model, err = googleai.New(ctx)
fmt.Println("Using Google AI...")
} else {
fmt.Println("No API keys found. Set OPENAI_API_KEY or GOOGLE_API_KEY")
return
}
if err != nil {
log.Fatalf("Failed to initialize LLM: %v", err)
}
// Create a multi-step reasoning graph
g := graph.NewCheckpointableStateGraph[map[string]any]()
// Step 1: Analyze the problem
g.AddNode("analyze", "analyze", func(ctx context.Context, data map[string]any) (map[string]any, error) {
messages := []llms.MessageContent{
llms.TextParts("system", "You are a helpful assistant that breaks down problems step by step."),
llms.TextParts("human", data["problem"].(string)),
}
response, err := model.GenerateContent(ctx, messages,
llms.WithTemperature(0.3), // Lower temperature for analysis
)
if err != nil {
return nil, err
}
data["analysis"] = response.Choices[0].Content
return data, nil
})
// Step 2: Generate solution
g.AddNode("solve", "solve", func(ctx context.Context, data map[string]any) (map[string]any, error) {
messages := []llms.MessageContent{
llms.TextParts("system", "Based on the analysis, provide a clear solution."),
llms.TextParts("human", fmt.Sprintf(
"Problem: %s\nAnalysis: %s\n\nProvide a solution:",
data["problem"], data["analysis"],
)),
}
response, err := model.GenerateContent(ctx, messages,
llms.WithTemperature(0.5),
)
if err != nil {
return nil, err
}
data["solution"] = response.Choices[0].Content
return data, nil
})
// Step 3: Verify solution
g.AddNode("verify", "verify", func(ctx context.Context, data map[string]any) (map[string]any, error) {
messages := []llms.MessageContent{
llms.TextParts("system", "Verify if the solution is correct and complete."),
llms.TextParts("human", fmt.Sprintf(
"Problem: %s\nSolution: %s\n\nVerify this solution:",
data["problem"], data["solution"],
)),
}
response, err := model.GenerateContent(ctx, messages,
llms.WithTemperature(0.2), // Very low temperature for verification
)
if err != nil {
return nil, err
}
data["verification"] = response.Choices[0].Content
return data, nil
})
// Connect the nodes
g.AddEdge("analyze", "solve")
g.AddEdge("solve", "verify")
g.AddEdge("verify", graph.END)
g.SetEntryPoint("analyze")
// Enable checkpointing
g.SetCheckpointConfig(graph.CheckpointConfig{
Store: graph.NewMemoryCheckpointStore(),
AutoSave: true,
})
runnable, err := g.CompileCheckpointable()
if err != nil {
log.Fatalf("Failed to compile graph: %v", err)
}
// Execute with a problem
problem := map[string]any{
"problem": "How can I optimize a Go web server that's handling 10,000 concurrent connections?",
}
result, err := runnable.Invoke(ctx, problem)
if err != nil {
log.Fatalf("Execution failed: %v", err)
}
// Display results
data := result
fmt.Printf("\n๐ Analysis:\n%s\n", data["analysis"])
fmt.Printf("\n๐ก Solution:\n%s\n", data["solution"])
fmt.Printf("\nโ
Verification:\n%s\n", data["verification"])
// Show checkpoints
checkpoints, _ := runnable.ListCheckpoints(ctx)
fmt.Printf("\n๐ Created %d checkpoints during reasoning\n", len(checkpoints))
}
func main() {
fmt.Println("๐ฆ๐ LangChain Integration Examples for LangGraphGo")
fmt.Println("===================================================")
// Run examples based on available API keys
if os.Getenv("OPENAI_API_KEY") != "" {
OpenAIExample()
} else {
fmt.Println("\nโ ๏ธ OpenAI example skipped (OPENAI_API_KEY not set)")
}
if os.Getenv("GOOGLE_API_KEY") != "" {
GoogleAIExample()
} else {
fmt.Println("\nโ ๏ธ Google AI example skipped (GOOGLE_API_KEY not set)")
}
// Multi-step example works with either API
if os.Getenv("OPENAI_API_KEY") != "" || os.Getenv("GOOGLE_API_KEY") != "" {
MultiStepReasoningExample()
} else {
fmt.Println("\nโ ๏ธ Multi-step reasoning example skipped (no API keys set)")
fmt.Println("\nTo run these examples, set one of the following environment variables:")
fmt.Println(" - OPENAI_API_KEY")
fmt.Println(" - GOOGLE_API_KEY")
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_chroma_example/main.go | examples/rag_chroma_example/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/retriever"
"github.com/smallnest/langgraphgo/rag/store"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
ctx := context.Background()
// Initialize LLM
llm, err := openai.New()
if err != nil {
log.Fatalf("Failed to create LLM: %v", err)
}
// Create sample documents
documents := []rag.Document{
{
Content: "Chroma is an open-source vector database that allows you to store and query embeddings. " +
"It is designed to be easy to use and integrate with LLM applications.",
Metadata: map[string]any{"source": "chroma_docs"},
},
{
Content: "LangGraphGo integrates with various vector stores including Chroma, Pinecone, and Weaviate " +
"to enable RAG capabilities in your Go applications.",
Metadata: map[string]any{"source": "langgraphgo_docs"},
},
}
// Create embedder
embedder := store.NewMockEmbedder(128)
// Create Chroma vector store (using mock for example, as Chroma client might require running instance)
vectorStore := store.NewInMemoryVectorStore(embedder)
// Generate embeddings and add documents
texts := make([]string, len(documents))
for i, doc := range documents {
texts[i] = doc.Content
}
embeddings, _ := embedder.EmbedDocuments(ctx, texts)
vectorStore.AddBatch(ctx, documents, embeddings)
// Create retriever
retriever := retriever.NewVectorStoreRetriever(vectorStore, embedder, 2)
// Configure RAG pipeline
config := rag.DefaultPipelineConfig()
config.Retriever = retriever
config.LLM = llm
// Build basic RAG pipeline
pipeline := rag.NewRAGPipeline(config)
err = pipeline.BuildBasicRAG()
if err != nil {
log.Fatalf("Failed to build RAG pipeline: %v", err)
}
// Compile the pipeline
runnable, err := pipeline.Compile()
if err != nil {
log.Fatalf("Failed to compile pipeline: %v", err)
}
// Visualize
exporter := graph.GetGraphForRunnable(runnable)
fmt.Println(exporter.DrawASCII())
// Run query
query := "What is Chroma?"
fmt.Printf("\nQuery: %s\n", query)
result, err := runnable.Invoke(ctx, map[string]any{
"query": query,
})
if err != nil {
log.Fatalf("Failed to process query: %v", err)
}
if answer, ok := result["answer"].(string); ok {
fmt.Printf("Answer: %s\n", answer)
}
if docs, ok := result["documents"].([]rag.RAGDocument); ok {
fmt.Println("Retrieved Documents:")
for j, doc := range docs {
fmt.Printf(" [%d] %s\n", j+1, truncate(doc.Content, 100))
}
}
}
func truncate(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/complex_tools/tools.go | examples/complex_tools/tools.go | package main
import (
"context"
"encoding/json"
"fmt"
"math"
"time"
)
// HotelBookingParams ้
ๅบ้ข่ฎขๅๆฐ
type HotelBookingParams struct {
CheckIn string `json:"check_in"`
CheckOut string `json:"check_out"`
Guests int `json:"guests"`
RoomType string `json:"room_type"`
Breakfast bool `json:"breakfast"`
Parking bool `json:"parking"`
View string `json:"view"`
MaxPrice float64 `json:"max_price"`
SpecialRequests []string `json:"special_requests"`
}
// HotelBookingTool ้
ๅบ้ข่ฎขๅทฅๅ
ท
type HotelBookingTool struct{}
func (t HotelBookingTool) Name() string {
return "hotel_booking"
}
func (t HotelBookingTool) Description() string {
return `้ข่ฎข้
ๅบๆฟ้ด๏ผๆฏๆๅค็ง้้กน้
็ฝฎใ
ๅๆฐ่ฏดๆ๏ผ
- check_in: ๅ
ฅไฝๆฅๆ๏ผๆ ผๅผ๏ผYYYY-MM-DD๏ผ
- check_out: ้ๆฟๆฅๆ๏ผๆ ผๅผ๏ผYYYY-MM-DD๏ผ
- guests: ๅฎขไบบๆฐ้๏ผ1-10ไบบ๏ผ
- room_type: ๆฟ้ด็ฑปๅ๏ผๆ ๅ้ดstandardใ่ฑชๅ้ดdeluxeใๅฅๆฟsuiteใๆป็ปๅฅๆฟpenthouse๏ผ
- breakfast: ๆฏๅฆๅ
ๅซๆฉ้ค๏ผtrue/false๏ผ
- parking: ๆฏๅฆ้่ฆๅ่ฝฆไฝ๏ผtrue/false๏ผ
- view: ๆฟ้ดๆฏ่งๅๅฅฝ๏ผๆ noneใๅๅธๆฏ่งcityใๆตทๆฏoceanใๅฑฑๆฏmountain๏ผ
- max_price: ๆฏๆๆ้ซไปทๆ ผ๏ผไพๅฆ๏ผ200.00๏ผ
- special_requests: ็นๆฎ่ฆๆฑๆฐ็ป๏ผๅฏ้๏ผ`
}
func (t HotelBookingTool) Schema() map[string]any {
return map[string]any{
"type": "object",
"properties": map[string]any{
"check_in": map[string]any{
"type": "string",
"description": "ๅ
ฅไฝๆฅๆ๏ผๆ ผๅผไธบYYYY-MM-DD",
"format": "date",
},
"check_out": map[string]any{
"type": "string",
"description": "้ๆฟๆฅๆ๏ผๆ ผๅผไธบYYYY-MM-DD",
"format": "date",
},
"guests": map[string]any{
"type": "integer",
"description": "ๅฎขไบบๆฐ้๏ผ1-10ไบบ๏ผ",
"minimum": 1,
"maximum": 10,
},
"room_type": map[string]any{
"type": "string",
"description": "ๆฟ้ด็ฑปๅ",
"enum": []string{"standard", "deluxe", "suite", "penthouse"},
},
"breakfast": map[string]any{
"type": "boolean",
"description": "ๆฏๅฆๅ
ๅซๆฉ้ค",
},
"parking": map[string]any{
"type": "boolean",
"description": "ๆฏๅฆ้่ฆๅ่ฝฆไฝ",
},
"view": map[string]any{
"type": "string",
"description": "ๆฟ้ดๆฏ่งๅๅฅฝ",
"enum": []string{"none", "city", "ocean", "mountain"},
},
"max_price": map[string]any{
"type": "number",
"description": "ๆฏๆๆ้ซไปทๆ ผ",
},
"special_requests": map[string]any{
"type": "array",
"description": "็นๆฎ่ฆๆฑๅ่กจ",
"items": map[string]any{
"type": "string",
},
},
},
"required": []string{"check_in", "check_out", "guests", "room_type"},
}
}
func (t HotelBookingTool) Call(ctx context.Context, input string) (string, error) {
var params HotelBookingParams
if err := json.Unmarshal([]byte(input), ¶ms); err != nil {
return "", fmt.Errorf("่พๅ
ฅๅๆฐๆ ๆ: %w", err)
}
// ้ช่ฏๅฟ
ๅกซๅญๆฎต
if params.CheckIn == "" {
return "", fmt.Errorf("ๅ
ฅไฝๆฅๆๆฏๅฟ
ๅกซ้กน")
}
if params.CheckOut == "" {
return "", fmt.Errorf("้ๆฟๆฅๆๆฏๅฟ
ๅกซ้กน")
}
if params.Guests < 1 || params.Guests > 10 {
return "", fmt.Errorf("ๅฎขไบบๆฐ้ๅฟ
้กปๅจ1ๅฐ10ไน้ด")
}
// ่ฎก็ฎๅ
ฅไฝๅคฉๆฐ
checkIn, _ := time.Parse("2006-01-02", params.CheckIn)
checkOut, _ := time.Parse("2006-01-02", params.CheckOut)
nights := int(checkOut.Sub(checkIn).Hours() / 24)
if nights <= 0 {
return "", fmt.Errorf("้ๆฟๆฅๆๅฟ
้กปๆไบๅ
ฅไฝๆฅๆ")
}
// ่ฎก็ฎๅบ็กไปทๆ ผ
basePrice := 100.0
switch params.RoomType {
case "deluxe":
basePrice = 180.0
case "suite":
basePrice = 300.0
case "penthouse":
basePrice = 500.0
}
// ๆทปๅ ้ขๅคๆๅก่ดน็จ
if params.Breakfast {
basePrice += 25.0
}
if params.Parking {
basePrice += 15.0
}
if params.View == "ocean" {
basePrice += 50.0
} else if params.View == "city" {
basePrice += 30.0
} else if params.View == "mountain" {
basePrice += 35.0
}
// ๆฃๆฅๆ้ซไปทๆ ผ้ๅถ
if params.MaxPrice > 0 && basePrice > params.MaxPrice {
return "", fmt.Errorf("ๆฟ้ดไปทๆ ผ๏ผ%.2f๏ผ่ถ
่ฟไบๆ้ซไปทๆ ผ้ๅถ๏ผ%.2f๏ผ", basePrice, params.MaxPrice)
}
totalPrice := basePrice * float64(nights)
result := map[string]any{
"booking_id": fmt.Sprintf("HTL-%d", time.Now().Unix()),
"check_in": params.CheckIn,
"check_out": params.CheckOut,
"nights": nights,
"guests": params.Guests,
"room_type": params.RoomType,
"breakfast": params.Breakfast,
"parking": params.Parking,
"view": params.View,
"price_per_night": basePrice,
"total_price": totalPrice,
"special_requests": params.SpecialRequests,
}
jsonResult, _ := json.MarshalIndent(result, "", " ")
return string(jsonResult), nil
}
// MortgageCalculationParams ๆฟ่ดท่ฎก็ฎๅๆฐ
type MortgageCalculationParams struct {
Principal float64 `json:"principal"`
InterestRate float64 `json:"interest_rate"`
Years int `json:"years"`
DownPayment float64 `json:"down_payment"`
PropertyTax float64 `json:"property_tax"`
Insurance float64 `json:"insurance"`
ExtraPayment ExtraPaymentInfo `json:"extra_payment"`
}
// ExtraPaymentInfo ้ขๅค่ฟๆฌพไฟกๆฏ๏ผๅตๅฅๅฏน่ฑก๏ผ
type ExtraPaymentInfo struct {
Enabled bool `json:"enabled"`
Amount float64 `json:"amount"`
Frequency string `json:"frequency"` // monthly, yearly, onetime
StartYear int `json:"start_year"`
}
// MortgageCalculatorTool ๆฟ่ดท่ฎก็ฎๅจๅทฅๅ
ท
type MortgageCalculatorTool struct{}
func (t MortgageCalculatorTool) Name() string {
return "mortgage_calculator"
}
func (t MortgageCalculatorTool) Description() string {
return `่ฎก็ฎๆฟ่ดทๆไพ๏ผๆฏๆ้ขๅค่ฟๆฌพใ็จ่ดนๅไฟ้ฉใ
ๅๆฐ่ฏดๆ๏ผ
- principal: ่ดทๆฌพๆฌ้๏ผไพๅฆ๏ผ300000๏ผ
- interest_rate: ๅนดๅฉ็็พๅๆฏ๏ผไพๅฆ๏ผ6.5่กจ็คบ6.5%๏ผ
- years: ่ดทๆฌพๅนด้๏ผไพๅฆ๏ผ30๏ผ
- down_payment: ้ฆไป้้ข๏ผไพๅฆ๏ผ60000๏ผ
- property_tax: ๅนดๆฟไบง็จ๏ผไพๅฆ๏ผ3000๏ผ
- insurance: ๅนดไฟ้ฉ่ดน๏ผไพๅฆ๏ผ1200๏ผ
- extra_payment: ้ขๅค่ฟๆฌพ้
็ฝฎ๏ผๅตๅฅๅฏน่ฑก๏ผ๏ผ
- enabled: ๆฏๅฆๅฏ็จ้ขๅค่ฟๆฌพ
- amount: ้ขๅค่ฟๆฌพ้้ข
- frequency: ่ฟๆฌพ้ข็๏ผmonthlyๆๆใyearlyๆๅนดใonetimeไธๆฌกๆง๏ผ
- start_year: ๅผๅง้ขๅค่ฟๆฌพ็ๅนดไปฝ`
}
func (t MortgageCalculatorTool) Schema() map[string]any {
return map[string]any{
"type": "object",
"properties": map[string]any{
"principal": map[string]any{
"type": "number",
"description": "่ดทๆฌพๆฌ้้้ข",
},
"interest_rate": map[string]any{
"type": "number",
"description": "ๅนดๅฉ็็พๅๆฏ๏ผไพๅฆ๏ผ6.5่กจ็คบ6.5%๏ผ",
},
"years": map[string]any{
"type": "integer",
"description": "่ดทๆฌพๅนด้",
"minimum": 1,
"maximum": 50,
},
"down_payment": map[string]any{
"type": "number",
"description": "้ฆไป้้ข",
},
"property_tax": map[string]any{
"type": "number",
"description": "ๅนดๅบฆๆฟไบง็จ้้ข",
},
"insurance": map[string]any{
"type": "number",
"description": "ๅนดๅบฆไฟ้ฉ้้ข",
},
"extra_payment": map[string]any{
"type": "object",
"description": "้ขๅค่ฟๆฌพ้
็ฝฎ",
"properties": map[string]any{
"enabled": map[string]any{
"type": "boolean",
"description": "ๆฏๅฆๅฏ็จ้ขๅค่ฟๆฌพ",
},
"amount": map[string]any{
"type": "number",
"description": "้ขๅค่ฟๆฌพ้้ข",
},
"frequency": map[string]any{
"type": "string",
"description": "่ฟๆฌพ้ข็",
"enum": []string{"monthly", "yearly", "onetime"},
},
"start_year": map[string]any{
"type": "integer",
"description": "ๅผๅง้ขๅค่ฟๆฌพ็ๅนดไปฝ",
},
},
},
},
"required": []string{"principal", "interest_rate", "years"},
}
}
func (t MortgageCalculatorTool) Call(ctx context.Context, input string) (string, error) {
var params MortgageCalculationParams
if err := json.Unmarshal([]byte(input), ¶ms); err != nil {
return "", fmt.Errorf("่พๅ
ฅๅๆฐๆ ๆ: %w", err)
}
// ้ช่ฏๅฟ
ๅกซๅญๆฎต
if params.Principal <= 0 {
return "", fmt.Errorf("่ดทๆฌพๆฌ้ๅฟ
้กปๅคงไบ0")
}
if params.InterestRate <= 0 {
return "", fmt.Errorf("ๅฉ็ๅฟ
้กปๅคงไบ0")
}
if params.Years <= 0 {
return "", fmt.Errorf("่ดทๆฌพๅนด้ๅฟ
้กปๅคงไบ0")
}
// ่ฎก็ฎๅฎ้
่ดทๆฌพ้้ข๏ผๆฃ้ค้ฆไป๏ผ
loanAmount := params.Principal - params.DownPayment
if loanAmount <= 0 {
return "", fmt.Errorf("้ฆไป้้ขไธ่ฝ่ถ
่ฟ่ดทๆฌพๆฌ้")
}
// ่ฎก็ฎๆไพ
monthlyRate := params.InterestRate / 100 / 12
numPayments := params.Years * 12
monthlyPayment := loanAmount * (monthlyRate * math.Pow(1+monthlyRate, float64(numPayments))) /
(math.Pow(1+monthlyRate, float64(numPayments)) - 1)
// ๆทปๅ ๆๅบฆๆฟไบง็จๅไฟ้ฉ
monthlyTax := params.PropertyTax / 12
monthlyInsurance := params.Insurance / 12
totalMonthlyPayment := monthlyPayment + monthlyTax + monthlyInsurance
// ่ฎก็ฎๆป่ฟๆฌพ้ขๅๅฉๆฏ
totalPayment := monthlyPayment * float64(numPayments)
totalInterest := totalPayment - loanAmount
// ่ฎก็ฎ้ขๅค่ฟๆฌพ็ๅฝฑๅ
extraPaymentResult := map[string]any{}
if params.ExtraPayment.Enabled && params.ExtraPayment.Amount > 0 {
savedInterest := params.ExtraPayment.Amount * 100 // ็ฎๅ่ฎก็ฎ
paidOffMonths := numPayments - int(params.ExtraPayment.Amount/2)
extraPaymentResult = map[string]any{
"extra_payment_enabled": true,
"extra_payment_amount": params.ExtraPayment.Amount,
"extra_payment_frequency": params.ExtraPayment.Frequency,
"estimated_interest_saved": savedInterest,
"paid_off_months_early": numPayments - paidOffMonths,
"new_payoff_date": time.Now().AddDate(0, paidOffMonths, 0).Format("2006-01-02"),
}
} else {
extraPaymentResult = map[string]any{
"extra_payment_enabled": false,
}
}
result := map[string]any{
"loan_amount": loanAmount,
"down_payment": params.DownPayment,
"interest_rate": params.InterestRate,
"loan_term_years": params.Years,
"monthly_principal_interest": monthlyPayment,
"monthly_property_tax": monthlyTax,
"monthly_insurance": monthlyInsurance,
"total_monthly_payment": totalMonthlyPayment,
"total_payment": totalPayment + (monthlyTax+monthlyInsurance)*float64(numPayments),
"total_interest": totalInterest,
"payoff_date": time.Now().AddDate(params.Years, 0, 0).Format("2006-01-02"),
"extra_payment_info": extraPaymentResult,
}
jsonResult, _ := json.MarshalIndent(result, "", " ")
return string(jsonResult), nil
}
// BatchOperationItem ๆน้ๆไฝไธญ็ๅไธช้กน็ฎ
type BatchOperationItem struct {
ID string `json:"id"`
Action string `json:"action"` // create, update, delete
Quantity int `json:"quantity"`
Price float64 `json:"price"`
Metadata map[string]interface{} `json:"metadata"`
}
// BatchOperationParams ๆน้ๆไฝๅๆฐ
type BatchOperationParams struct {
Operation string `json:"operation"`
Items []BatchOperationItem `json:"items"`
DryRun bool `json:"dry_run"`
Priority string `json:"priority"`
}
// BatchOperationTool ๆน้ๆไฝๅทฅๅ
ท
type BatchOperationTool struct{}
func (t BatchOperationTool) Name() string {
return "batch_operation"
}
func (t BatchOperationTool) Description() string {
return `ๅฏนๅคไธช้กน็ฎๆง่กๆน้ๆไฝใ
ๅๆฐ่ฏดๆ๏ผ
- operation: ๆไฝ็ฑปๅ๏ผprocessๅค็ใvalidate้ช่ฏใexportๅฏผๅบ๏ผ
- items: ้กน็ฎๆฐ็ป๏ผๆฏไธช้กน็ฎๅ
ๅซ๏ผ
- id: ๅฏไธๆ ่ฏ็ฌฆ
- action: ่ฆๆง่ก็ๆไฝ๏ผcreateๅๅปบใupdateๆดๆฐใdeleteๅ ้ค๏ผ
- quantity: ๆฐ้
- price: ๅไปท
- metadata: ้ขๅค็้ฎๅผๅฏน๏ผๅตๅฅๅฏน่ฑก๏ผ
- dry_run: ๅฆๆไธบtrue๏ผไป
้ช่ฏไธๆง่ก๏ผtrue/false๏ผ
- priority: ๆไฝไผๅ
็บง๏ผlowไฝใnormalๆฎ้ใhigh้ซใurgent็ดงๆฅ๏ผ`
}
func (t BatchOperationTool) Schema() map[string]any {
return map[string]any{
"type": "object",
"properties": map[string]any{
"operation": map[string]any{
"type": "string",
"description": "ๆน้ๆไฝ็ฑปๅ",
"enum": []string{"process", "validate", "export"},
},
"items": map[string]any{
"type": "array",
"description": "่ฆๅค็็้กน็ฎๆฐ็ป",
"items": map[string]any{
"type": "object",
"properties": map[string]any{
"id": map[string]any{
"type": "string",
"description": "้กน็ฎๅฏไธๆ ่ฏ็ฌฆ",
},
"action": map[string]any{
"type": "string",
"description": "่ฆๆง่ก็ๆไฝ",
"enum": []string{"create", "update", "delete"},
},
"quantity": map[string]any{
"type": "integer",
"description": "้กน็ฎๆฐ้",
"minimum": 0,
},
"price": map[string]any{
"type": "number",
"description": "ๅไปท",
"minimum": 0,
},
"metadata": map[string]any{
"type": "object",
"description": "้ขๅค็ๅ
ๆฐๆฎ๏ผ้ฎๅผๅฏน๏ผ",
},
},
"required": []string{"id", "action"},
},
},
"dry_run": map[string]any{
"type": "boolean",
"description": "ๅฆๆไธบtrue๏ผไป
้ช่ฏ่ไธๆง่ก",
},
"priority": map[string]any{
"type": "string",
"description": "ๆไฝไผๅ
็บง",
"enum": []string{"low", "normal", "high", "urgent"},
},
},
"required": []string{"operation", "items"},
}
}
func (t BatchOperationTool) Call(ctx context.Context, input string) (string, error) {
var params BatchOperationParams
if err := json.Unmarshal([]byte(input), ¶ms); err != nil {
return "", fmt.Errorf("่พๅ
ฅๅๆฐๆ ๆ: %w", err)
}
// ้ช่ฏๅฟ
ๅกซๅญๆฎต
if params.Operation == "" {
return "", fmt.Errorf("ๆไฝ็ฑปๅๆฏๅฟ
ๅกซ้กน")
}
if len(params.Items) == 0 {
return "", fmt.Errorf("่ณๅฐ้่ฆไธไธช้กน็ฎ")
}
// ๅค็ๆฏไธช้กน็ฎ
results := make([]map[string]any, 0, len(params.Items))
totalValue := 0.0
summary := map[string]int{
"create": 0,
"update": 0,
"delete": 0,
"failed": 0,
}
for _, item := range params.Items {
itemResult := map[string]any{
"id": item.ID,
"action": item.Action,
"status": "pending",
}
if params.DryRun {
itemResult["status"] = "validated (dry run)"
itemResult["message"] = "ๅฐไผๅค็ๆญค้กน็ฎ"
} else {
// ๆจกๆๅค็
if item.Action == "create" || item.Action == "update" {
itemValue := float64(item.Quantity) * item.Price
totalValue += itemValue
itemResult["value"] = itemValue
itemResult["status"] = "success"
itemResult["message"] = fmt.Sprintf("ๅทฒๅค็ %d ไปถ๏ผๅไปท %.2f", item.Quantity, item.Price)
}
summary[item.Action]++
}
if item.Metadata != nil {
itemResult["metadata_processed"] = len(item.Metadata)
}
results = append(results, itemResult)
}
result := map[string]any{
"operation": params.Operation,
"dry_run": params.DryRun,
"priority": params.Priority,
"total_items": len(params.Items),
"summary": summary,
"total_value": totalValue,
"timestamp": time.Now().Format(time.RFC3339),
"results": results,
}
jsonResult, _ := json.MarshalIndent(result, "", " ")
return string(jsonResult), nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/complex_tools/main.go | examples/complex_tools/main.go | package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/tools"
"github.com/smallnest/langgraphgo/prebuilt"
)
func main() {
// ๆฃๆฅAPIๅฏ้ฅ
if os.Getenv("OPENAI_API_KEY") == "" {
log.Fatal("ๆช่ฎพ็ฝฎOPENAI_API_KEY็ฏๅขๅ้")
}
// ๅๅงๅLLM
llm, err := openai.New()
if err != nil {
log.Fatal(err)
}
// ๅๅปบๅคๆๅทฅๅ
ท
hotelTool := &SimpleToolWrapper{
name: "hotel_booking",
description: HotelBookingTool{}.Description(),
schema: HotelBookingTool{}.Schema(),
handler: HotelBookingTool{}.Call,
}
mortgageTool := &SimpleToolWrapper{
name: "mortgage_calculator",
description: MortgageCalculatorTool{}.Description(),
schema: MortgageCalculatorTool{}.Schema(),
handler: MortgageCalculatorTool{}.Call,
}
batchTool := &SimpleToolWrapper{
name: "batch_operation",
description: BatchOperationTool{}.Description(),
schema: BatchOperationTool{}.Schema(),
handler: BatchOperationTool{}.Call,
}
allTools := []tools.Tool{hotelTool, mortgageTool, batchTool}
// ๆๅฐๅทฅๅ
ทschema็จไบๆผ็คบ
fmt.Println("=== ๅคๆๅทฅๅ
ท็คบไพ ===")
fmt.Println("\nๅฏ็จๅทฅๅ
ทๅๅ
ถๅๆฐschema๏ผ")
fmt.Println()
for _, tool := range allTools {
fmt.Printf("ๅทฅๅ
ทๅ็งฐ: %s\n", tool.Name())
fmt.Printf("ๆ่ฟฐ: %s\n", tool.Description())
if st, ok := tool.(prebuilt.ToolWithSchema); ok {
if schema := st.Schema(); schema != nil {
schemaJSON, _ := json.MarshalIndent(schema, "", " ")
fmt.Printf("Schema:\n%s\n", string(schemaJSON))
}
}
fmt.Println()
}
// ็ดๆฅๆผ็คบๅทฅๅ
ท่ฐ็จ
fmt.Println("=== ็ดๆฅๅทฅๅ
ท่ฐ็จ็คบไพ ===")
fmt.Println()
ctx := context.Background()
// ็คบไพ1๏ผ้
ๅบ้ข่ฎข
fmt.Println("1. ้
ๅบ้ข่ฎข็คบไพ๏ผ")
hotelInput := HotelBookingParams{
CheckIn: "2026-02-01",
CheckOut: "2026-02-05",
Guests: 2,
RoomType: "deluxe",
Breakfast: true,
Parking: true,
View: "ocean",
MaxPrice: 250.0,
SpecialRequests: []string{"้ซๅฑ", "ๅฎ้ๆฟ้ด"},
}
hotelInputJSON, _ := json.Marshal(hotelInput)
result, err := hotelTool.Call(ctx, string(hotelInputJSON))
if err != nil {
log.Printf("้
ๅบ้ข่ฎข้่ฏฏ: %v", err)
} else {
fmt.Printf("็ปๆ:\n%s\n\n", result)
}
// ็คบไพ2๏ผๆฟ่ดท่ฎก็ฎๅจ
fmt.Println("2. ๆฟ่ดท่ฎก็ฎๅจ็คบไพ๏ผ")
mortgageInput := MortgageCalculationParams{
Principal: 450000.0,
InterestRate: 6.5,
Years: 30,
DownPayment: 90000.0,
PropertyTax: 3600.0,
Insurance: 1200.0,
ExtraPayment: ExtraPaymentInfo{
Enabled: true,
Amount: 200.0,
Frequency: "monthly",
StartYear: 1,
},
}
mortgageInputJSON, _ := json.Marshal(mortgageInput)
result, err = mortgageTool.Call(ctx, string(mortgageInputJSON))
if err != nil {
log.Printf("ๆฟ่ดท่ฎก็ฎ้่ฏฏ: %v", err)
} else {
fmt.Printf("็ปๆ:\n%s\n\n", result)
}
// ็คบไพ3๏ผๆน้ๆไฝ
fmt.Println("3. ๆน้ๆไฝ็คบไพ๏ผ")
batchInput := BatchOperationParams{
Operation: "process",
Items: []BatchOperationItem{
{
ID: "ITEM-001",
Action: "create",
Quantity: 100,
Price: 15.99,
Metadata: map[string]any{"category": "็ตๅญไบงๅ", "brand": "TechCo"},
},
{
ID: "ITEM-002",
Action: "update",
Quantity: 50,
Price: 29.99,
Metadata: map[string]any{"category": "้
ไปถ", "brand": "AccBrand"},
},
{
ID: "ITEM-003",
Action: "create",
Quantity: 200,
Price: 9.50,
Metadata: map[string]any{"category": "็จๅ", "urgent": true},
},
},
DryRun: false,
Priority: "high",
}
batchInputJSON, _ := json.Marshal(batchInput)
result, err = batchTool.Call(ctx, string(batchInputJSON))
if err != nil {
log.Printf("ๆน้ๆไฝ้่ฏฏ: %v", err)
} else {
fmt.Printf("็ปๆ:\n%s\n\n", result)
}
// ็คบไพ4๏ผไฝฟ็จไปฃ็่ฐ็จๅทฅๅ
ท
fmt.Println("=== ไฝฟ็จReActไปฃ็่ฐ็จๅทฅๅ
ท๏ผๆฏๆๅคๆschema๏ผ ===")
fmt.Println()
agent, err := prebuilt.CreateReactAgentMap(llm, allTools, 10)
if err != nil {
log.Fatal(err)
}
// ไปฃ็ๆต่ฏๆฅ่ฏข
queries := []string{
"ๆๆณ้ข่ฎข2026-02-10่ณ2026-02-15็้
ๅบๆฟ้ดใ้่ฆ2ไฝๅฎขไบบ๏ผ่ฑชๅ้ดๆตทๆฏๆฟ๏ผๅซๆฉ้ค๏ผๆฏๆ้ข็ฎ300็พๅ
ใ",
"่ฎก็ฎไธ็ฌ40ไธ็พๅ
ใ30ๅนดๆใๅฉ็6.5%็ๆฟ่ดทๆไพ๏ผ้ฆไป8ไธ็พๅ
ใๅ
ๅซ3000็พๅ
ๅนดๆฟไบง็จๅ1200็พๅ
ไฟ้ฉใ",
"ๅค็่ฟไธไธช้กน็ฎ๏ผๅๅปบITEM-A๏ผ100ไปถ๏ผๆฏไปถ10็พๅ
๏ผ๏ผๅๅปบITEM-B๏ผ50ไปถ๏ผๆฏไปถ25็พๅ
๏ผ๏ผๆดๆฐITEM-C๏ผ75ไปถ๏ผๆฏไปถ15็พๅ
๏ผใ",
}
for i, query := range queries {
fmt.Printf("ๆฅ่ฏข %d: %s\n", i+1, query)
fmt.Println("---")
resp, err := agent.Invoke(ctx, map[string]any{
"messages": []llms.MessageContent{
llms.TextParts(llms.ChatMessageTypeHuman, query),
},
})
if err != nil {
log.Printf("ไปฃ็้่ฏฏ: %v\n", err)
} else {
if msgs, ok := resp["messages"].([]llms.MessageContent); ok && len(msgs) > 0 {
for _, msg := range msgs {
if msg.Role == llms.ChatMessageTypeAI {
for _, part := range msg.Parts {
if textPart, ok := part.(llms.TextContent); ok {
fmt.Printf("ไปฃ็ๅๅค: %s\n\n", textPart.Text)
}
}
}
}
}
}
fmt.Println()
}
fmt.Println("=== ็คบไพ็ปๆ ===")
}
// SimpleToolWrapper ๅ
่ฃ
ๅคๆๅทฅๅ
ทไปฅๅฎ็ฐtools.Toolๆฅๅฃ
type SimpleToolWrapper struct {
name string
description string
schema map[string]any
handler func(ctx context.Context, input string) (string, error)
}
func (w *SimpleToolWrapper) Name() string {
return w.name
}
func (w *SimpleToolWrapper) Description() string {
return w.description
}
func (w *SimpleToolWrapper) Call(ctx context.Context, input string) (string, error) {
return w.handler(ctx, input)
}
func (w *SimpleToolWrapper) Schema() map[string]any {
return w.schema
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_langchain_vectorstore_example/main.go | examples/rag_langchain_vectorstore_example/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/retriever"
"github.com/smallnest/langgraphgo/rag/store"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
ctx := context.Background()
llm, err := openai.New()
if err != nil {
log.Fatalf("Failed to create LLM: %v", err)
}
documents := []rag.Document{
{Content: "LangChain is a framework for developing applications powered by language models."},
}
embedder := store.NewMockEmbedder(128)
vectorStore := store.NewInMemoryVectorStore(embedder)
texts := make([]string, len(documents))
for i, doc := range documents {
texts[i] = doc.Content
}
embeddings, _ := embedder.EmbedDocuments(ctx, texts)
vectorStore.AddBatch(ctx, documents, embeddings)
retriever := retriever.NewVectorStoreRetriever(vectorStore, embedder, 2)
config := rag.DefaultPipelineConfig()
config.Retriever = retriever
config.LLM = llm
pipeline := rag.NewRAGPipeline(config)
err = pipeline.BuildBasicRAG()
if err != nil {
log.Fatalf("Failed to build RAG pipeline: %v", err)
}
runnable, err := pipeline.Compile()
if err != nil {
log.Fatalf("Failed to compile pipeline: %v", err)
}
exporter := graph.GetGraphForRunnable(runnable)
fmt.Println(exporter.DrawASCII())
query := "What is LangChain?"
fmt.Printf("\nQuery: %s\n", query)
result, err := runnable.Invoke(ctx, map[string]any{
"query": query,
})
if err != nil {
log.Fatalf("Failed to process query: %v", err)
}
if answer, ok := result["answer"].(string); ok {
fmt.Printf("Answer: %s\n", answer)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/streaming_modes/main.go | examples/streaming_modes/main.go | package main
import (
"context"
"fmt"
"time"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a streaming graph
g := graph.NewStreamingStateGraph[map[string]any]()
// Define nodes
g.AddNode("step_1", "step_1", func(ctx context.Context, state map[string]any) (map[string]any, error) {
time.Sleep(500 * time.Millisecond)
return map[string]any{"step_1": "completed"}, nil
})
g.AddNode("step_2", "step_2", func(ctx context.Context, state map[string]any) (map[string]any, error) {
time.Sleep(500 * time.Millisecond)
return map[string]any{"step_2": "completed"}, nil
})
g.SetEntryPoint("step_1")
g.AddEdge("step_1", "step_2")
g.AddEdge("step_2", graph.END)
// 1. Stream Mode: Updates (Default)
fmt.Println("=== Streaming Updates ===")
g.SetStreamConfig(graph.StreamConfig{Mode: graph.StreamModeUpdates, BufferSize: 10})
runnable, _ := g.CompileStreaming()
updates := runnable.Stream(context.Background(), map[string]any{})
for event := range updates.Events {
fmt.Printf("Event: %s, Node: %s, State: %v\n", event.Event, event.NodeName, event.State)
}
// 2. Stream Mode: Values (Full State)
fmt.Println("\n=== Streaming Values ===")
g.SetStreamConfig(graph.StreamConfig{Mode: graph.StreamModeValues, BufferSize: 10})
runnable, _ = g.CompileStreaming()
values := runnable.Stream(context.Background(), map[string]any{})
for event := range values.Events {
fmt.Printf("Event: %s, State: %v\n", event.Event, event.State)
}
// 3. Stream Mode: Debug (All Events)
fmt.Println("\n=== Streaming Debug ===")
g.SetStreamConfig(graph.StreamConfig{Mode: graph.StreamModeDebug, BufferSize: 10})
runnable, _ = g.CompileStreaming()
debug := runnable.Stream(context.Background(), map[string]any{})
for event := range debug.Events {
fmt.Printf("[%s] %s: %v\n", event.Timestamp.Format(time.StampMilli), event.Event, event.NodeName)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/swarm/main.go | examples/swarm/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
)
// Swarm style: Multiple specialized agents that can hand off to each other directly.
// This is different from Supervisor style where a central node routes.
// Here, nodes themselves decide next step.
func main() {
// Define the graph
workflow := graph.NewStateGraph[map[string]any]()
// Schema: shared state
schema := graph.NewMapSchema()
schema.RegisterReducer("history", graph.AppendReducer)
workflow.SetSchema(schema)
// Agent 1: Triage
workflow.AddNode("Triage", "Triage", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("[Triage] analyzing request...")
return map[string]any{
"history": []string{"Triage reviewed request"},
"intent": "research", // Simplified logic: always determine research needed
},
nil
})
// Agent 2: Researcher
workflow.AddNode("Researcher", "Researcher", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("[Researcher] conducting research...")
return map[string]any{
"history": []string{"Researcher gathered data"},
"data": "Some facts found",
},
nil
})
// Agent 3: Writer
workflow.AddNode("Writer", "Writer", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("[Writer] writing report...")
data, _ := state["data"].(string)
return map[string]any{
"history": []string{"Writer created report"},
"report": fmt.Sprintf("Report based on %s", data),
},
nil
})
// Define Handoffs (Edges)
workflow.SetEntryPoint("Triage")
// Triage decides where to go
workflow.AddConditionalEdge("Triage", func(ctx context.Context, state map[string]any) string {
intent, _ := state["intent"].(string)
if intent == "research" {
return "Researcher"
}
if intent == "write" {
return "Writer"
}
return graph.END
})
// Researcher hands off to Writer
workflow.AddEdge("Researcher", "Writer")
// Writer finishes
workflow.AddEdge("Writer", graph.END)
// Compile
app, err := workflow.Compile()
if err != nil {
log.Fatal(err)
}
// Execute
fmt.Println("---" + " Starting Swarm ---")
initialState := map[string]any{
"history": []string{},
}
result, err := app.Invoke(context.Background(), initialState)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Final State: %v\n", result)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/mental_loop/main.go | examples/mental_loop/main.go | // Mental Loop Trading Agent
//
// This example implements the "Mental Loop" (Simulator-in-the-Loop) architecture
// from the Agentic Architectures series by Fareed Khan.
//
// Architecture Overview:
//
// 1. OBSERVE: The agent observes the current state of the environment
// 2. PROPOSE: Based on goals and current state, propose a high-level action/strategy
// 3. SIMULATE: Fork the environment state and run the proposed action forward
// to observe potential outcomes in a sandboxed simulation
// 4. ASSESS & REFINE: Analyze simulation results to evaluate risks and rewards,
// refining the initial proposal into a final, concrete action
// 5. EXECUTE: Execute the final, refined action in the real environment
// 6. REPEAT: Begin again from the new state
//
// This "think before you act" approach allows agents to:
// - Perform what-if analysis
// - Anticipate consequences
// - Refine plans for safety and effectiveness
//
// Use cases: Robotics (simulating movements), High-stakes decisions (finance,
// healthcare), Complex game AI, and any domain where mistakes have real consequences.
//
// Reference: https://github.com/FareedKhan-dev/all-agentic-architectures/blob/main/10_mental_loop.ipynb
package main
import (
"context"
"fmt"
"log"
"math"
"math/rand"
"os"
"strings"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
// ==================== Data Models ====================
// Portfolio represents the agent's trading portfolio
type Portfolio struct {
Cash float64
Shares int
}
// Value returns the total portfolio value at the current price
func (p *Portfolio) Value(currentPrice float64) float64 {
return p.Cash + float64(p.Shares)*currentPrice
}
// MarketSimulator simulates a stock market environment
// This serves as both the "real world" and the sandbox for simulations
type MarketSimulator struct {
Day int
Price float64
Volatility float64 // Standard deviation for price changes
Drift float64 // General trend (daily return)
MarketNews string
Portfolio Portfolio
}
// ProposedAction represents the high-level strategy proposed by the analyst
type ProposedAction struct {
Strategy string // e.g., "buy aggressively", "sell cautiously", "hold"
Reasoning string
}
// FinalDecision represents the final, concrete action to be executed
type FinalDecision struct {
Action string // "buy", "sell", or "hold"
Amount float64
Reasoning string
}
// SimulationResult stores the outcome of one simulation run
type SimulationResult struct {
SimNum int
InitialValue float64
FinalValue float64
ReturnPct float64
}
// ==================== Agent State ====================
// AgentState represents the state passed between nodes in the graph
type AgentState struct {
RealMarket *MarketSimulator
ProposedAction *ProposedAction
SimulationResults []SimulationResult
FinalDecision *FinalDecision
}
// ==================== Market Simulator Methods ====================
// Step advances the simulation by one day, executing a trade first
func (m *MarketSimulator) Step(action string, amount float64) {
// 1. Execute trade
switch action {
case "buy": // amount is number of shares
sharesToBuy := int(amount)
cost := float64(sharesToBuy) * m.Price
if m.Portfolio.Cash >= cost {
m.Portfolio.Shares += sharesToBuy
m.Portfolio.Cash -= cost
}
case "sell": // amount is number of shares
sharesToSell := int(amount)
if m.Portfolio.Shares >= sharesToSell {
m.Portfolio.Shares -= sharesToSell
m.Portfolio.Cash += float64(sharesToSell) * m.Price
}
}
// 2. Update market price using Geometric Brownian Motion
// daily_return = normal(drift, volatility)
dailyReturn := rand.NormFloat64()*m.Volatility + m.Drift
m.Price *= (1 + dailyReturn)
// 3. Advance time
m.Day++
// 4. Potentially update news
if rand.Float64() < 0.1 { // 10% chance of new news
newsOptions := []string{
"Positive earnings report expected.",
"New competitor enters the market.",
"Macroeconomic outlook is strong.",
"Regulatory concerns are growing.",
}
m.MarketNews = newsOptions[rand.Intn(len(newsOptions))]
// News affects drift
if strings.Contains(m.MarketNews, "Positive") || strings.Contains(m.MarketNews, "strong") {
m.Drift = 0.05
} else {
m.Drift = -0.05
}
} else {
m.Drift = 0.01 // Revert to normal drift
}
}
// GetStateString returns a human-readable description of the market state
func (m *MarketSimulator) GetStateString() string {
return fmt.Sprintf("Day %d: Price=$%.2f, News: %s\nPortfolio: $%.2f (%d shares, $%.2f cash)",
m.Day, m.Price, m.MarketNews,
m.Portfolio.Value(m.Price), m.Portfolio.Shares, m.Portfolio.Cash)
}
// Copy creates a deep copy for simulation (sandboxing)
func (m *MarketSimulator) Copy() *MarketSimulator {
return &MarketSimulator{
Day: m.Day,
Price: m.Price,
Volatility: m.Volatility,
Drift: m.Drift,
MarketNews: m.MarketNews,
Portfolio: Portfolio{
Cash: m.Portfolio.Cash,
Shares: m.Portfolio.Shares,
},
}
}
// ==================== Graph Nodes ====================
// ProposeActionNode observes the market and proposes a high-level strategy
// This is the "OBSERVE -> PROPOSE" step of the mental loop
func ProposeActionNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("\n--- ๐ง Analyst Proposing Action ---")
// Create prompt for the analyst
prompt := fmt.Sprintf(`You are a sharp financial analyst. Based on the current market state, propose a trading strategy.
Market State:
%s
Respond in the following format (keep reasoning concise):
STRATEGY: [buy aggressively|buy cautiously|sell aggressively|sell cautiously|hold]
REASONING: [brief reasoning for the proposed strategy]`,
agentState.RealMarket.GetStateString())
// Call LLM
llm := state["llm"].(llms.Model)
resp, err := llms.GenerateFromSinglePrompt(ctx, llm, prompt)
if err != nil {
return nil, fmt.Errorf("analyst LLM call failed: %w", err)
}
// Parse the response
proposal := parseProposedAction(resp)
agentState.ProposedAction = proposal
fmt.Printf("Proposal: %s. Reason: %s\n",
proposal.Strategy, proposal.Reasoning)
return state, nil
}
// RunSimulationNode runs the proposed strategy in a sandboxed simulation
// This is the "SIMULATE" step of the mental loop
func RunSimulationNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("\n--- ๐ค Running Simulations ---")
strategy := agentState.ProposedAction.Strategy
numSimulations := 5
simulationHorizon := 10 // days
results := make([]SimulationResult, numSimulations)
for i := 0; i < numSimulations; i++ {
// IMPORTANT: Create a deep copy to not affect the real market state
simulatedMarket := agentState.RealMarket.Copy()
initialValue := simulatedMarket.Portfolio.Value(simulatedMarket.Price)
// Translate strategy to a concrete action for the simulation
var action string
var amount float64
if strings.Contains(strategy, "buy") {
action = "buy"
// Aggressively = 25% of cash, Cautiously = 10%
cashRatio := 0.25
if strings.Contains(strategy, "cautiously") {
cashRatio = 0.1
}
amount = math.Floor((simulatedMarket.Portfolio.Cash * cashRatio) / simulatedMarket.Price)
} else if strings.Contains(strategy, "sell") {
action = "sell"
// Aggressively = 25% of shares, Cautiously = 10%
sharesRatio := 0.25
if strings.Contains(strategy, "cautiously") {
sharesRatio = 0.1
}
amount = math.Floor(float64(simulatedMarket.Portfolio.Shares) * sharesRatio)
} else {
action = "hold"
amount = 0
}
// Run the simulation forward
simulatedMarket.Step(action, amount)
for j := 0; j < simulationHorizon-1; j++ {
simulatedMarket.Step("hold", 0) // Just hold after the initial action
}
finalValue := simulatedMarket.Portfolio.Value(simulatedMarket.Price)
returnPct := (finalValue - initialValue) / initialValue * 100
results[i] = SimulationResult{
SimNum: i + 1,
InitialValue: initialValue,
FinalValue: finalValue,
ReturnPct: returnPct,
}
}
agentState.SimulationResults = results
fmt.Println("Simulation complete. Results will be passed to the risk manager.")
return state, nil
}
// RefineAndDecideNode analyzes simulation results and makes a final decision
// This is the "ASSESS & REFINE" step of the mental loop
func RefineAndDecideNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("\n--- ๐ง Risk Manager Refining Decision ---")
// Format simulation results
var resultsSummary strings.Builder
for _, r := range agentState.SimulationResults {
resultsSummary.WriteString(fmt.Sprintf("Sim %d: Initial=$%.2f, Final=$%.2f, Return=%.2f%%\n",
r.SimNum, r.InitialValue, r.FinalValue, r.ReturnPct))
}
// Calculate statistics
var avgReturn, minReturn, maxReturn, positiveCount float64
minReturn = math.Inf(1)
maxReturn = math.Inf(-1)
for _, r := range agentState.SimulationResults {
avgReturn += r.ReturnPct
if r.ReturnPct < minReturn {
minReturn = r.ReturnPct
}
if r.ReturnPct > maxReturn {
maxReturn = r.ReturnPct
}
if r.ReturnPct > 0 {
positiveCount++
}
}
avgReturn /= float64(len(agentState.SimulationResults))
// Create prompt for the risk manager
prompt := fmt.Sprintf(`You are a cautious risk manager. Your analyst proposed a strategy. You have run simulations to test it.
Based on the potential outcomes, make a final, concrete decision.
If results are highly variable or negative, reduce risk (e.g., buy/sell fewer shares, or hold).
Initial Proposal: %s
Simulation Results:
%s
Real Market State:
%s
Simulation Statistics:
- Average Return: %.2f%%
- Best Case: %.2f%%
- Worst Case: %.2f%%
- Positive Outcomes: %d/%d
Respond in the following format:
DECISION: [buy|sell|hold]
AMOUNT: [number of shares, 0 if holding]
REASONING: [final reasoning, referencing simulation results]`,
agentState.ProposedAction.Strategy,
resultsSummary.String(),
agentState.RealMarket.GetStateString(),
avgReturn, maxReturn, minReturn,
int(positiveCount), len(agentState.SimulationResults))
// Call LLM
llm := state["llm"].(llms.Model)
resp, err := llms.GenerateFromSinglePrompt(ctx, llm, prompt)
if err != nil {
return nil, fmt.Errorf("risk manager LLM call failed: %w", err)
}
// Parse the response
decision := parseFinalDecision(resp)
agentState.FinalDecision = decision
fmt.Printf("Final Decision: %s %.0f shares. Reason: %s\n",
decision.Action, decision.Amount, decision.Reasoning)
return state, nil
}
// ExecuteInRealWorldNode executes the final decision in the real market
// This is the "EXECUTE" step of the mental loop
func ExecuteInRealWorldNode(ctx context.Context, state map[string]any) (map[string]any, error) {
agentState := state["agent_state"].(*AgentState)
fmt.Println("\n--- ๐ Executing in Real World ---")
decision := agentState.FinalDecision
realMarket := agentState.RealMarket
fmt.Printf("Before: %s\n", realMarket.GetStateString())
realMarket.Step(decision.Action, decision.Amount)
fmt.Printf("After: %s\n", realMarket.GetStateString())
return state, nil
}
// ==================== Parsing Helpers ====================
func parseProposedAction(response string) *ProposedAction {
proposal := &ProposedAction{
Strategy: "hold",
Reasoning: response,
}
lines := strings.Split(response, "\n")
inReasoning := false
for _, line := range lines {
line = strings.TrimSpace(line)
upperLine := strings.ToUpper(line)
if strings.HasPrefix(upperLine, "STRATEGY:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
proposal.Strategy = strings.TrimSpace(parts[1])
// Remove markdown formatting like **STRATEGY:**
proposal.Strategy = strings.ReplaceAll(proposal.Strategy, "**", "")
proposal.Strategy = strings.ToLower(proposal.Strategy)
}
inReasoning = false
} else if strings.HasPrefix(upperLine, "REASONING:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
proposal.Reasoning = strings.TrimSpace(parts[1])
// Remove markdown formatting
proposal.Reasoning = strings.ReplaceAll(proposal.Reasoning, "**", "")
}
inReasoning = true
} else if inReasoning && line != "" {
// Continue collecting reasoning
if proposal.Reasoning != "" && proposal.Reasoning != response {
proposal.Reasoning += " " + line
}
}
}
// If no explicit reasoning field was found, use the whole response
// but try to extract just the reasoning part
if proposal.Reasoning == response {
// Try to find reasoning after STRATEGY line
lines := strings.Split(response, "\n")
for i, line := range lines {
if strings.Contains(strings.ToUpper(line), "STRATEGY:") {
if i+1 < len(lines) {
reasoningLines := []string{}
for j := i + 1; j < len(lines); j++ {
nextLine := strings.TrimSpace(lines[j])
if nextLine != "" && !strings.HasPrefix(strings.ToUpper(nextLine), "STRATEGY:") {
reasoningLines = append(reasoningLines, nextLine)
}
}
if len(reasoningLines) > 0 {
proposal.Reasoning = strings.Join(reasoningLines, " ")
}
}
break
}
}
}
return proposal
}
func parseFinalDecision(response string) *FinalDecision {
decision := &FinalDecision{
Action: "hold",
Amount: 0,
Reasoning: response,
}
lines := strings.Split(response, "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
upperLine := strings.ToUpper(line)
if strings.HasPrefix(upperLine, "DECISION:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
decision.Action = strings.TrimSpace(parts[1])
decision.Action = strings.ToLower(decision.Action)
// Extract just the action word
words := strings.Fields(decision.Action)
if len(words) > 0 {
decision.Action = words[0]
}
}
} else if strings.HasPrefix(upperLine, "AMOUNT:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
var amount float64
fmt.Sscanf(strings.TrimSpace(parts[1]), "%f", &amount)
decision.Amount = amount
}
} else if strings.HasPrefix(upperLine, "REASONING:") {
parts := strings.SplitN(line, ":", 2)
if len(parts) > 1 {
decision.Reasoning = strings.TrimSpace(parts[1])
}
}
}
return decision
}
// ==================== Main Function ====================
func main() {
// Check for API key
if os.Getenv("OPENAI_API_KEY") == "" {
log.Fatal("OPENAI_API_KEY environment variable is required")
}
fmt.Println("=== ๐ Mental Loop (Simulator-in-the-Loop) Architecture ===")
fmt.Println()
fmt.Println("This demo implements a trading agent that uses an internal simulator")
fmt.Println("to test proposed actions before executing them in the real market.")
fmt.Println()
fmt.Println("Architecture: OBSERVE -> PROPOSE -> SIMULATE -> REFINE -> EXECUTE")
fmt.Println()
// Create LLM
llm, err := openai.New()
if err != nil {
log.Fatal(err)
}
// Create the mental loop graph with map state
workflow := graph.NewStateGraph[map[string]any]()
// Add nodes
workflow.AddNode("propose", "Observe and propose action", ProposeActionNode)
workflow.AddNode("simulate", "Run simulations", RunSimulationNode)
workflow.AddNode("refine", "Refine decision", RefineAndDecideNode)
workflow.AddNode("execute", "Execute in real world", ExecuteInRealWorldNode)
// Define edges: propose -> simulate -> refine -> execute
workflow.AddEdge("propose", "simulate")
workflow.AddEdge("simulate", "refine")
workflow.AddEdge("refine", "execute")
workflow.AddEdge("execute", graph.END)
// Set entry point
workflow.SetEntryPoint("propose")
// Compile the graph
app, err := workflow.Compile()
if err != nil {
log.Fatalf("Failed to compile graph: %v", err)
}
ctx := context.Background()
// Create initial market state
realMarket := &MarketSimulator{
Day: 0,
Price: 100.0,
Volatility: 0.1, // Standard deviation for price changes
Drift: 0.01, // General trend
MarketNews: "Market is stable.",
Portfolio: Portfolio{
Cash: 10000.0,
Shares: 0,
},
}
fmt.Println("--- Initial Market State ---")
fmt.Println(realMarket.GetStateString())
// Day 1: Good News
fmt.Println("\n--- Day 1: Good News Hits! ---")
realMarket.MarketNews = "Positive earnings report expected."
realMarket.Drift = 0.05
agentState := &AgentState{RealMarket: realMarket}
input := map[string]any{
"llm": llm,
"agent_state": agentState,
}
result, err := app.Invoke(ctx, input)
if err != nil {
log.Fatalf("Mental loop execution failed: %v", err)
}
agentState = result["agent_state"].(*AgentState)
// Day 2: Bad News
fmt.Println("\n--- Day 2: Bad News Hits! ---")
agentState.RealMarket.MarketNews = "New competitor enters the market."
agentState.RealMarket.Drift = -0.05
input = map[string]any{
"llm": llm,
"agent_state": agentState,
}
result, err = app.Invoke(ctx, input)
if err != nil {
log.Fatalf("Mental loop execution failed: %v", err)
}
// Print final summary
fmt.Println("\n=== ๐ Final Results ===")
finalState := result["agent_state"].(*AgentState)
fmt.Printf("Final Market State: %s\n", finalState.RealMarket.GetStateString())
initialValue := 10000.0
finalValue := finalState.RealMarket.Portfolio.Value(finalState.RealMarket.Price)
totalReturn := finalValue - initialValue
returnPct := (totalReturn / initialValue) * 100
fmt.Printf("\nTotal Return: $%.2f (%.2f%%)\n", totalReturn, returnPct)
fmt.Println("\n=== ๐ฏ Key Takeaways ===")
fmt.Println("The Mental Loop architecture enables agents to:")
fmt.Println("1. Think before acting by simulating outcomes")
fmt.Println("2. Assess risks before committing to real-world actions")
fmt.Println("3. Refine strategies based on what-if analysis")
fmt.Println("4. Make more nuanced, safer decisions in dynamic environments")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/file_checkpointing_resume/main.go | examples/file_checkpointing_resume/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a temporary directory for checkpoints
checkpointDir := "./checkpoints_resume"
if err := os.MkdirAll(checkpointDir, 0755); err != nil {
log.Fatal(err)
}
defer os.RemoveAll(checkpointDir) // Cleanup after run
fmt.Printf("Using checkpoint directory: %s\n", checkpointDir)
// Initialize FileCheckpointStore
store, err := graph.NewFileCheckpointStore(checkpointDir)
if err != nil {
log.Fatalf("Failed to create checkpoint store: %v", err)
}
// Define a simplified setup function to create the graph logic
createGraph := func() *graph.CheckpointableStateGraph[map[string]any] {
g := graph.NewCheckpointableStateGraph[map[string]any]()
g.AddNode("step1", "step1", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println(" [EXEC] Running Step 1")
state["step1"] = "done"
return state, nil
})
g.AddNode("step2", "step2", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println(" [EXEC] Running Step 2")
state["step2"] = "done"
return state, nil
})
g.AddNode("step3", "step3", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println(" [EXEC] Running Step 3")
state["step3"] = "done"
return state, nil
})
g.AddEdge("step1", "step2")
g.AddEdge("step2", "step3")
g.AddEdge("step3", graph.END)
g.SetEntryPoint("step1")
return g
}
// define common config
threadID := "resume_thread"
baseConfig := graph.CheckpointConfig{
Store: store,
AutoSave: true,
}
// ---------------------------------------------------------
// PHASE 1: Run until interrupted (after Step 2)
// ---------------------------------------------------------
fmt.Println("\n--- PHASE 1: Running until interruption after Step 2 ---")
g1 := createGraph()
g1.SetCheckpointConfig(baseConfig)
runnable1, err := g1.CompileCheckpointable()
if err != nil {
log.Fatal(err)
}
ctx := context.Background()
initialState := map[string]any{
"input": "start",
}
// Config with interrupt
config1 := &graph.Config{
Configurable: map[string]any{"thread_id": threadID},
// We interrupt AFTER step 2 runs.
// The graph will stop before executing step 3.
InterruptAfter: []string{"step2"},
}
res1, err := runnable1.InvokeWithConfig(ctx, initialState, config1)
if err != nil {
// We expect an interrupt error or a GraphInterrupt return if treated as error?
if _, ok := err.(*graph.GraphInterrupt); ok {
fmt.Printf(" [INFO] Graph interrupted as expected: %v\n", err)
} else {
log.Fatalf("Unexpected error in Phase 1: %v", err)
}
} else {
// If it didn't return an error/interrupt, maybe it finished?
fmt.Printf(" [WARN] Phase 1 finished without interrupt? Result: %v\n", res1)
}
// ---------------------------------------------------------
// PHASE 2: Resume from the interrupted state
// ---------------------------------------------------------
fmt.Println("\n--- PHASE 2: Resuming from checkpoint ---")
// 1. List checkpoints to find the latest state
checkpoints, err := store.List(ctx, threadID)
if err != nil {
log.Fatal(err)
}
if len(checkpoints) == 0 {
log.Fatal("No checkpoints found!")
}
// Sort by version (List implementation handles this but good to be sure or verify)
// Get the latest checkpoint
latestCP := checkpoints[len(checkpoints)-1]
fmt.Printf(" [INFO] Resuming from checkpoint: ID=%s, Node=%s, Version=%d\n", latestCP.ID, latestCP.NodeName, latestCP.Version)
fmt.Printf(" [INFO] State at checkpoint: %v\n", latestCP.State)
// 2. Prepare for resume
// We need to know where to resume FROM.
// Since we interrupted AFTER step 2, we want to proceed to step 3.
// Or, more accurately, we start execution.
// We must pass the LAST state as initial state.
// And we must tell the graph where to begin execution using `ResumeFrom`.
// Since we interrupted after `step2`, the next logical step strictly defined by the graph is `step3`.
// `ResumeFrom` overrides the entry point.
g2 := createGraph()
g2.SetCheckpointConfig(baseConfig)
runnable2, err := g2.CompileCheckpointable()
if err != nil {
log.Fatal(err)
}
config2 := &graph.Config{
Configurable: map[string]any{"thread_id": threadID},
ResumeFrom: []string{"step3"}, // Start directly at step 3
}
// Use the state from the checkpoint
// The state in checkpoint is generic 'any'. We cast it to map[string]any.
resumedState, ok := latestCP.State.(map[string]any)
if !ok {
// Try to handle map[string]interface{} from JSON unmarshal
if m, ok := latestCP.State.(map[string]interface{}); ok {
resumedState = m
} else {
log.Fatalf("Failed to cast checkpoint state to map[string]any: %T", latestCP.State)
}
}
// Invoke
res2, err := runnable2.InvokeWithConfig(ctx, resumedState, config2)
if err != nil {
log.Fatalf("Execution failed in Phase 2: %v", err)
}
fmt.Printf(" [INFO] Final Result: %v\n", res2)
// Verify complete execution state
finalMap := res2
if finalMap["step1"] == "done" && finalMap["step2"] == "done" && finalMap["step3"] == "done" {
fmt.Println(" [SUCCESS] Graph successfully resumed and completed all steps.")
} else {
fmt.Println(" [FAILURE] Final state missing steps.")
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/logger/main.go | examples/logger/main.go | package main
import (
"os"
"github.com/kataras/golog"
"github.com/smallnest/langgraphgo/log"
)
func main() {
// ็คบไพ 1: ไฝฟ็จ้ป่ฎค็ golog logger
defaultLogger := golog.Default
logger1 := log.NewGologLogger(defaultLogger)
logger1.Info("ไฝฟ็จ้ป่ฎค golog logger")
logger1.SetLevel(log.LogLevelDebug)
logger1.Debug("่ฐ่ฏไฟกๆฏ")
// ็คบไพ 2: ๅๅปบ่ชๅฎไน็ golog logger
customLogger := golog.New()
customLogger.SetPrefix("[ MyApp ] ")
customLogger.SetOutput(os.Stdout)
logger2 := log.NewGologLogger(customLogger)
logger2.SetLevel(log.LogLevelInfo)
logger2.Info("ไฝฟ็จ่ชๅฎไน golog logger")
// ็คบไพ 3: ไฝฟ็จไธๅ็ golog ้
็ฝฎ
errorLogger := golog.New()
errorLogger.SetLevel("error")
errorLogger.SetPrefix("[ ERROR ] ")
logger3 := log.NewGologLogger(errorLogger)
logger3.Debug("่ฟๆกไธไผๆพ็คบ")
logger3.Error("้่ฏฏไฟกๆฏไผๆพ็คบ")
log.SetDefaultLogger(logger1)
log.Info("ไฝฟ็จ้ป่ฎค golog logger")
log.Debug("่ฐ่ฏไฟกๆฏ")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_query_rewrite/main.go | examples/rag_query_rewrite/main.go | package main
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
g := graph.NewStateGraph[map[string]any]()
g.AddNode("rewrite_query", "Rewrite user query for better retrieval", func(ctx context.Context, state map[string]any) (map[string]any, error) {
query, _ := state["query"].(string)
fmt.Printf("Original query: %s\n", query)
rewritten := "LangGraph architecture state management" // Simulated rewrite
fmt.Printf("Rewritten query: %s\n", rewritten)
return map[string]any{"rewritten_query": rewritten}, nil
})
g.AddNode("retrieve", "Retrieve documents", func(ctx context.Context, state map[string]any) (map[string]any, error) {
query, _ := state["rewritten_query"].(string)
fmt.Printf("Retrieving documents for: %s\n", query)
docs := []string{"Doc A: LangGraph manages state...", "Doc B: Graph nodes execution..."}
return map[string]any{"documents": docs}, nil
})
g.AddNode("generate", "Generate Answer", func(ctx context.Context, state map[string]any) (map[string]any, error) {
docs, _ := state["documents"].([]string)
fmt.Printf("Generating answer based on %d documents\n", len(docs))
answer := "LangGraph uses a graph-based approach for state management."
return map[string]any{"answer": answer}, nil
})
g.SetEntryPoint("rewrite_query")
g.AddEdge("rewrite_query", "retrieve")
g.AddEdge("retrieve", "generate")
g.AddEdge("generate", graph.END)
runnable, err := g.Compile()
if err != nil {
panic(err)
}
res, err := runnable.Invoke(context.Background(), map[string]any{"query": "How does LangGraph handle state?"})
if err != nil {
panic(err)
}
fmt.Printf("Answer: %s\n", res["answer"])
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/reflection_agent/main.go | examples/reflection_agent/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Check for API key
if os.Getenv("OPENAI_API_KEY") == "" {
log.Fatal("OPENAI_API_KEY environment variable is required")
}
// Create LLM
model, err := openai.New()
if err != nil {
log.Fatalf("Failed to create OpenAI client: %v", err)
}
fmt.Println("=== Reflection Agent Example ===\n")
// Example 1: Basic Reflection
fmt.Println("--- Example 1: Basic Reflection ---")
runBasicReflection(model)
fmt.Println("\n" + strings.Repeat("=", 60) + "\n")
// Example 2: Technical Writing with Custom Prompts
fmt.Println("--- Example 2: Technical Writing with Custom Prompts ---")
runTechnicalWriting(model)
fmt.Println("\n" + strings.Repeat("=", 60) + "\n")
// Example 3: Code Review Reflection
fmt.Println("--- Example 3: Code Review Reflection ---")
runCodeReview(model)
}
func runBasicReflection(model llms.Model) {
config := prebuilt.ReflectionAgentConfig{
Model: model,
MaxIterations: 3,
Verbose: true,
}
// Use map state convenience function
agent, err := prebuilt.CreateReflectionAgentMap(config)
if err != nil {
log.Fatalf("Failed to create agent: %v", err)
}
query := "Explain the CAP theorem in distributed systems"
fmt.Printf("Query: %s\n\n", query)
initialState := map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart(query)},
},
},
}
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatalf("Failed to invoke agent: %v", err)
}
printResults(result)
}
func runTechnicalWriting(model llms.Model) {
config := prebuilt.ReflectionAgentConfig{
Model: model,
MaxIterations: 2,
Verbose: true,
SystemMessage: "You are an expert technical writer. Create clear, accurate, and comprehensive documentation.",
ReflectionPrompt: `You are a senior technical editor reviewing documentation.
Evaluate the documentation for:
1. **Clarity**: Is it easy to understand for the target audience?
2. **Completeness**: Does it cover all necessary aspects?
3. **Examples**: Are there practical examples to illustrate concepts?
4. **Structure**: Is the information well-organized?
5. **Accuracy**: Is the technical information correct?
Provide specific, actionable feedback.`,
}
agent, err := prebuilt.CreateReflectionAgentMap(config)
if err != nil {
log.Fatalf("Failed to create agent: %v", err)
}
query := "Write documentation for a REST API endpoint that creates a new user account"
fmt.Printf("Query: %s\n\n", query)
initialState := map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart(query)},
},
},
}
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatalf("Failed to invoke agent: %v", err)
}
printResults(result)
}
func runCodeReview(model llms.Model) {
config := prebuilt.ReflectionAgentConfig{
Model: model,
MaxIterations: 2,
Verbose: true,
SystemMessage: "You are an experienced software engineer providing code review feedback.",
ReflectionPrompt: `You are a principal engineer reviewing code review comments.
Evaluate the review for:
1. **Constructiveness**: Is the feedback helpful and actionable?
2. **Completeness**: Are all important issues identified?
3. **Balance**: Does it acknowledge both strengths and weaknesses?
4. **Specificity**: Are suggestions concrete and clear?
5. **Tone**: Is the feedback professional and respectful?
Provide recommendations for improvement.`,
}
agent, err := prebuilt.CreateReflectionAgentMap(config)
if err != nil {
log.Fatalf("Failed to create agent: %v", err)
}
codeSnippet := `
func getUserById(id int) (*User, error) {
var user User
err := db.QueryRow("SELECT * FROM users WHERE id = " + strconv.Itoa(id)).Scan(&user)
return &user, err
}
`
query := fmt.Sprintf("Review this Go function and provide feedback:\n%s", codeSnippet)
fmt.Printf("Query: Code review for getUserById function\n\n")
initialState := map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart(query)},
},
},
}
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatalf("Failed to invoke agent: %v", err)
}
printResults(result)
}
func printResults(finalState map[string]any) {
// Print iteration count
iteration, _ := finalState["iteration"].(int)
fmt.Printf("\nโ
Completed after %d iteration(s)\n\n", iteration)
// Print final draft
if draft, ok := finalState["draft"].(string); ok {
fmt.Println("=== Final Response ===")
fmt.Println(draft)
}
// Print last reflection (if available)
if reflection, ok := finalState["reflection"].(string); ok {
fmt.Println("\n=== Final Reflection ===")
fmt.Println(reflection)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/pev_agent/main.go | examples/pev_agent/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/tools"
)
// CalculatorTool for PEV demo
type CalculatorTool struct{}
func (t CalculatorTool) Name() string { return "calculator" }
func (t CalculatorTool) Description() string {
return "Useful for basic math. Input: 'a op b' (e.g. '2 + 2')"
}
func (t CalculatorTool) Call(ctx context.Context, input string) (string, error) {
// Simple implementation
parts := strings.Fields(input)
if len(parts) != 3 {
return "", fmt.Errorf("invalid format")
}
return fmt.Sprintf("Result of %s is simulated as 42", input), nil
}
func main() {
if os.Getenv("OPENAI_API_KEY") == "" {
log.Fatal("OPENAI_API_KEY not set")
}
model, err := openai.New()
if err != nil {
log.Fatal(err)
}
config := prebuilt.PEVAgentConfig{
Model: model,
Tools: []tools.Tool{CalculatorTool{}},
MaxRetries: 3,
Verbose: true,
}
// Use map state convenience function
agent, err := prebuilt.CreatePEVAgentMap(config)
if err != nil {
log.Fatal(err)
}
ctx := context.Background()
query := "Calculate 15 * 3 and verify if it's correct"
fmt.Printf("User: %s\n\n", query)
initialState := map[string]any{
"messages": []llms.MessageContent{
llms.TextParts(llms.ChatMessageTypeHuman, query),
},
}
res, err := agent.Invoke(ctx, initialState)
if err != nil {
log.Fatal(err)
}
fmt.Printf("\nFinal Answer: %v\n", res["final_answer"])
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/generic_state_graph_react_agent/main.go | examples/generic_state_graph_react_agent/main.go | package main
import (
"context"
"fmt"
_ "github.com/smallnest/langgraphgo/prebuilt"
_ "github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/tools"
)
// SimpleCounterTool is a basic tool that counts
type SimpleCounterTool struct {
count int
}
func (t *SimpleCounterTool) Name() string {
return "counter"
}
func (t *SimpleCounterTool) Description() string {
return "A simple counter tool. Input should be the number to add to the counter."
}
func (t *SimpleCounterTool) Call(ctx context.Context, input string) (string, error) {
// For simplicity, just increment
t.count++
return fmt.Sprintf("Counter is now: %d", t.count), nil
}
// EchoTool echoes back the input
type EchoTool struct{}
func (t *EchoTool) Name() string {
return "echo"
}
func (t *EchoTool) Description() string {
return "Echoes back the input text"
}
func (t *EchoTool) Call(ctx context.Context, input string) (string, error) {
return fmt.Sprintf("Echo: %s", input), nil
}
func main() {
fmt.Println("๐ค Typed ReAct Agent Example")
fmt.Println("==========================")
// Create tools
tools := []tools.Tool{
&SimpleCounterTool{},
&EchoTool{},
}
// Note: You would typically use a real LLM here
// For this example, we'll just show the structure
fmt.Println("\nAvailable tools:")
for _, tool := range tools {
fmt.Printf("- %s: %s\n", tool.Name(), tool.Description())
}
// Example of creating a typed ReAct agent
fmt.Println("\n๐ก Creating Typed ReAct Agent...")
fmt.Println("```go")
fmt.Println("// Define your state type")
fmt.Println("type AgentState struct {")
fmt.Println(" Messages []llms.MessageContent")
fmt.Println("}")
fmt.Println("")
fmt.Println("// Create the agent")
fmt.Println("agent, err := prebuilt.CreateReactAgentTyped[model, []tools.Tool]()")
fmt.Println("```")
// Example with custom state
fmt.Println("\n๐ก Creating ReAct Agent with Custom State...")
fmt.Println("```go")
fmt.Println("// Define custom state")
fmt.Println("type CustomState struct {")
fmt.Println(" Messages []llms.MessageContent")
fmt.Println(" StepCount int")
fmt.Println(" ToolUseCount map[string]int")
fmt.Println("}")
fmt.Println("")
fmt.Println("// Create agent with custom state")
fmt.Println("agent, err := prebuilt.CreateReactAgentWithCustomStateTyped[CustomState] (")
fmt.Println(" model,")
fmt.Println(" tools,")
fmt.Println(" func(s CustomState) []llms.MessageContent { return s.Messages },")
fmt.Println(" func(s CustomState, msgs []llms.MessageContent) CustomState {")
fmt.Println(" s.Messages = msgs")
fmt.Println(" s.StepCount++")
fmt.Println(" return s")
fmt.Println(" },")
fmt.Println(" func(msgs []llms.MessageContent) bool { /* check for tool calls */ },")
fmt.Println(")")
fmt.Println("```")
// Demonstrate type safety benefits
fmt.Println("\nโจ Benefits of Typed ReAct Agent:")
fmt.Println("1. Compile-time type safety - no runtime type assertions needed!")
fmt.Println("2. Better IDE support with full autocomplete")
fmt.Println("3. Self-documenting code with explicit state types")
fmt.Println("4. Custom state with additional fields (counters, metadata, etc.)")
fmt.Println("5. Type-safe tool integration")
// Example of how you would use it
fmt.Println("\n๐ Example Usage Pattern:")
fmt.Println("```go")
fmt.Println("// Initial state")
fmt.Println("state := ReactAgentState{")
fmt.Println(" Messages: []llms.MessageContent{")
fmt.Println(" llms.TextParts(llms.ChatMessageTypeHuman, \"What's 2+2?\"),")
fmt.Println(" },")
fmt.Println("}")
fmt.Println("")
fmt.Println("// Execute agent")
fmt.Println("result, err := agent.Invoke(ctx, state)")
fmt.Println("")
fmt.Println("// Result is fully typed!")
fmt.Println("fmt.Printf(\"Final messages: %v\\n\", result.Messages)")
fmt.Println("```")
fmt.Println("\nโ
Example completed successfully!")
fmt.Println("\nNote: To run with a real LLM, replace the model parameter")
fmt.Println(" with an actual LLM instance (e.g., OpenAI, Anthropic, etc.)")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/rag_falkordb_debug_query/main.go | examples/rag_falkordb_debug_query/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/redis/go-redis/v9"
"github.com/smallnest/langgraphgo/rag"
"github.com/smallnest/langgraphgo/rag/store"
)
func main() {
ctx := context.Background()
// ่ฟๆฅๅฐFalkorDB
client := redis.NewClient(&redis.Options{Addr: "localhost:6379"})
defer client.Close()
fmt.Println("=== ่ฐ่ฏFalkorDBๆฅ่ฏข้ฎ้ข ===\n")
// 1. ๆฃๆฅๅพไธญๆๅคๅฐ่็น
fmt.Println("1. ๆฃๆฅๆๆ่็น:")
allNodesQuery := "MATCH (n) RETURN count(n) as count"
result, err := client.Do(ctx, "GRAPH.QUERY", "simple_rag_graph", allNodesQuery, "--compact").Result()
if err != nil {
log.Printf("ๆฅ่ฏข่็นๆฐๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("ๆป่็นๆฐ: %v\n", result)
}
// 2. ๆฃๆฅๆๆๆ ็ญพ
fmt.Println("\n2. ๆฃๆฅๆๆๆ ็ญพ:")
allLabelsQuery := "CALL db.labels() YIELD label RETURN label"
result, err = client.Do(ctx, "GRAPH.QUERY", "simple_rag_graph", allLabelsQuery, "--compact").Result()
if err != nil {
log.Printf("ๆฅ่ฏขๆ ็ญพๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("ๆๆๆ ็ญพ: %v\n", result)
}
// 3. ๆฃๆฅ็นๅฎ็ฑปๅ็่็น
fmt.Println("\n3. ๆฃๆฅPERSON่็น:")
personQuery := "MATCH (n:PERSON) RETURN n"
result, err = client.Do(ctx, "GRAPH.QUERY", "simple_rag_graph", personQuery, "--compact").Result()
if err != nil {
log.Printf("ๆฅ่ฏขPERSON่็นๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("PERSON่็น: %v\n", result)
if r, ok := result.([]interface{}); ok && len(r) > 1 {
if rows, ok := r[1].([]interface{}); ok {
fmt.Printf("ๆพๅฐ %d ไธชPERSON่็น\n", len(rows))
for i, row := range rows {
fmt.Printf(" [%d] %v\n", i, row)
}
}
}
}
// 4. ๆฃๆฅๆๆๅฑๆง
fmt.Println("\n4. ๆฃๆฅjohn_smith่็น็่ฏฆ็ปไฟกๆฏ:")
detailQuery := "MATCH (n) WHERE n.id = 'john_smith' RETURN n, labels(n), properties(n)"
result, err = client.Do(ctx, "GRAPH.QUERY", "simple_rag_graph", detailQuery, "--compact").Result()
if err != nil {
log.Printf("ๆฅ่ฏข่ฏฆ็ปไฟกๆฏๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("่ฏฆ็ปไฟกๆฏ: %v\n", result)
}
// 5. ๆต่ฏไธไฝฟ็จcompactๆจกๅผ
fmt.Println("\n5. ๆต่ฏไธไฝฟ็จcompactๆจกๅผ:")
normalQuery := "MATCH (n:PERSON) RETURN n.id, n.name"
result, err = client.Do(ctx, "GRAPH.QUERY", "simple_rag_graph", normalQuery).Result()
if err != nil {
log.Printf("ๆฎ้ๆฅ่ฏขๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("ๆฎ้ๆฅ่ฏข็ปๆ: %v\n", result)
}
// 6. ๅๅปบไธไธชๆฐ็ๆต่ฏ่็น
fmt.Println("\n6. ๅๅปบๆฐ็ๆต่ฏ่็น:")
createQuery := "CREATE (p:Person {id: 'test_person', name: 'Test Person', role: 'test'})"
result, err = client.Do(ctx, "GRAPH.QUERY", "simple_rag_graph", createQuery, "--compact").Result()
if err != nil {
log.Printf("ๅๅปบๆต่ฏ่็นๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("ๅๅปบ็ปๆ: %v\n", result)
}
// 7. ๅๆฌกๆฅ่ฏขPerson
fmt.Println("\n7. ๅๆฌกๆฅ่ฏขPerson่็น:")
result, err = client.Do(ctx, "GRAPH.QUERY", "simple_rag_graph", personQuery, "--compact").Result()
if err != nil {
log.Printf("ๆฅ่ฏขPERSON่็นๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("PERSON่็น: %v\n", result)
}
// 8. ไฝฟ็จ็ฅ่ฏๅพๆฅๅฃๆต่ฏ
fmt.Println("\n8. ไฝฟ็จ็ฅ่ฏๅพๆฅๅฃๆต่ฏ:")
kg, err := store.NewFalkorDBGraph("falkordb://localhost:6379/simple_rag_graph")
if err != nil {
log.Printf("ๅๅปบ็ฅ่ฏๅพๅคฑ่ดฅ: %v", err)
return
}
defer func() {
if falkorDB, ok := kg.(*store.FalkorDBGraph); ok {
falkorDB.Close()
}
}()
// ๆต่ฏGetEntity
fmt.Println("ๆต่ฏGetEntity:")
testEntity, err := kg.GetEntity(ctx, "test_person")
if err != nil {
log.Printf("่ทๅtest_personๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("่ทๅๅฐtest_person: ID=%s, Name=%s, Type=%s\n", testEntity.ID, testEntity.Name, testEntity.Type)
fmt.Printf("Properties: %+v\n", testEntity.Properties)
}
// ่ฐ่ฏ๏ผๆๅจ่งฃๆtest_person
fmt.Println("\n่ฐ่ฏtest_person็ๅๅงๆฐๆฎ:")
debugQuery := "MATCH (n) WHERE n.id = 'test_person' RETURN n"
debugResult, err := client.Do(ctx, "GRAPH.QUERY", "simple_rag_graph", debugQuery, "--compact").Result()
if err != nil {
log.Printf("่ฐ่ฏๆฅ่ฏขๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("test_personๅๅงๆฐๆฎ: %v\n", debugResult)
if r, ok := debugResult.([]interface{}); ok && len(r) > 1 {
if rows, ok := r[1].([]interface{}); ok && len(rows) > 0 {
if row, ok := rows[0].([]interface{}); ok && len(row) > 0 {
if node, ok := row[0].([]interface{}); ok {
fmt.Printf("node็ปๆ้ฟๅบฆ: %d\n", len(node))
for i, part := range node {
fmt.Printf(" [%d] type: %T, value: %v\n", i, part, part)
if arr, ok := part.([]interface{}); ok {
fmt.Printf(" ๅญๆฐ็ป้ฟๅบฆ: %d\n", len(arr))
for j, subpart := range arr {
fmt.Printf(" [%d] type: %T, value: %v\n", j, subpart, subpart)
}
}
}
}
}
}
}
}
// ๆต่ฏQuery
fmt.Println("\nๆต่ฏQueryๆนๆณ:")
graphQuery := &rag.GraphQuery{
EntityTypes: []string{"PERSON"},
Limit: 10,
}
queryResult, err := kg.Query(ctx, graphQuery)
if err != nil {
log.Printf("Queryๅคฑ่ดฅ: %v", err)
} else {
fmt.Printf("Query็ปๆ: ๆพๅฐ%dไธชๅฎไฝ, %dไธชๅ
ณ็ณป\n", len(queryResult.Entities), len(queryResult.Relationships))
for i, entity := range queryResult.Entities {
fmt.Printf(" [%d] %s (%s)\n", i, entity.Name, entity.Type)
}
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/custom_reducer/main.go | examples/custom_reducer/main.go | package main
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a new state graph with typed state map[string]any
g := graph.NewStateGraph[map[string]any]()
// Define schema with custom reducer for "tags"
schema := graph.NewMapSchema()
// Using generic AppendReducer
schema.RegisterReducer("tags", graph.AppendReducer)
g.SetSchema(schema)
g.AddNode("start", "start", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"tags": []string{"initial"}}, nil
})
g.AddNode("tagger_a", "tagger_a", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"tags": []string{"A"}}, nil
})
g.AddNode("tagger_b", "tagger_b", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"tags": []string{"B"}}, nil
})
// Parallel execution for taggers
g.SetEntryPoint("start")
g.AddEdge("start", "tagger_a")
g.AddEdge("start", "tagger_b")
g.AddEdge("tagger_a", graph.END)
g.AddEdge("tagger_b", graph.END)
runnable, err := g.Compile()
if err != nil {
panic(err)
}
// Execute
res, err := runnable.Invoke(context.Background(), map[string]any{})
if err != nil {
panic(err)
}
fmt.Printf("Result tags: %v\n", res["tags"])
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/streaming_pipeline/main.go | examples/streaming_pipeline/main.go | package main
import (
"context"
"fmt"
"strings"
"time"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a streaming graph for a text processing pipeline
g := graph.NewStreamingStateGraph[map[string]any]()
// Define nodes
analyze := g.AddNode("analyze", "analyze", func(ctx context.Context, state map[string]any) (map[string]any, error) {
input := state["input"].(string)
time.Sleep(200 * time.Millisecond)
return map[string]any{"analysis": fmt.Sprintf("Length: %d", len(input))}, nil
})
enhance := g.AddNode("enhance", "enhance", func(ctx context.Context, state map[string]any) (map[string]any, error) {
input := state["input"].(string)
time.Sleep(300 * time.Millisecond)
return map[string]any{"enhanced": strings.ToUpper(input)}, nil
})
summarize := g.AddNode("summarize", "summarize", func(ctx context.Context, state map[string]any) (map[string]any, error) {
analysis := state["analysis"].(string)
enhanced := state["enhanced"].(string)
time.Sleep(200 * time.Millisecond)
return map[string]any{"summary": fmt.Sprintf("%s -> %s", analysis, enhanced)}, nil
})
// Add progress listeners to nodes
progressListener := graph.NewProgressListener().WithTiming(true)
analyze.AddListener(progressListener)
enhance.AddListener(progressListener)
summarize.AddListener(progressListener)
// Define flow
g.SetEntryPoint("analyze")
g.AddEdge("analyze", "enhance")
g.AddEdge("enhance", "summarize")
g.AddEdge("summarize", graph.END)
// Compile
runnable, err := g.CompileStreaming()
if err != nil {
panic(err)
}
// Execute with streaming
fmt.Println("Starting pipeline...")
input := map[string]any{"input": "hello world"}
// Stream returns a channel wrapper
streamResult := runnable.Stream(context.Background(), input)
// Process events
for event := range streamResult.Events {
if event.Error != nil {
fmt.Printf("Error: %v\n", event.Error)
return
}
// We can react to specific events if needed,
// but the ProgressListener attached to nodes handles printing.
// Here we just consume the channel to let it run.
if event.Event == graph.NodeEventComplete {
// Maybe show partial results
if event.NodeName == "enhance" {
fmt.Printf(">> Intermediate result: %v\n", event.State)
}
}
}
fmt.Println("Pipeline finished.")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/memory_basic/main.go | examples/memory_basic/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/memory"
"github.com/tmc/langchaingo/llms"
langchainmemory "github.com/tmc/langchaingo/memory"
)
func main() {
ctx := context.Background()
// Example 1: Basic ConversationBuffer
fmt.Println("=== Example 1: Basic ConversationBuffer ===")
basicMemoryExample(ctx)
// Example 2: ConversationWindowBuffer (keeps last N turns)
fmt.Println("\n=== Example 2: ConversationWindowBuffer ===")
windowMemoryExample(ctx)
// Example 3: ChatMessageHistory with custom messages
fmt.Println("\n=== Example 3: ChatMessageHistory ===")
chatHistoryExample(ctx)
// Example 4: Custom memory keys
fmt.Println("\n=== Example 4: Custom Memory Keys ===")
customKeysExample(ctx)
// Example 5: Memory integration pattern
fmt.Println("\n=== Example 5: Memory Integration Pattern ===")
memoryIntegrationPattern(ctx)
}
// basicMemoryExample demonstrates basic conversation buffer usage
func basicMemoryExample(ctx context.Context) {
// Create a conversation buffer memory with return messages enabled
mem := memory.NewConversationBufferMemory(
langchainmemory.WithReturnMessages(true),
)
// Simulate a conversation
conversations := []struct {
input string
output string
}{
{"Hello, my name is Alice", "Hi Alice! Nice to meet you."},
{"What's my name?", "Your name is Alice."},
{"What did I just ask you?", "You asked me what your name is."},
}
for _, conv := range conversations {
// Save the conversation turn
err := mem.SaveContext(ctx, map[string]any{
"input": conv.input,
}, map[string]any{
"output": conv.output,
})
if err != nil {
log.Fatalf("Failed to save context: %v", err)
}
fmt.Printf("User: %s\n", conv.input)
fmt.Printf("AI: %s\n", conv.output)
}
// Get all messages
messages, err := mem.GetMessages(ctx)
if err != nil {
log.Fatalf("Failed to get messages: %v", err)
}
fmt.Printf("\nTotal messages in memory: %d\n", len(messages))
for i, msg := range messages {
fmt.Printf(" [%d] %s: %s\n", i+1, msg.GetType(), msg.GetContent())
}
}
// windowMemoryExample demonstrates conversation window buffer
func windowMemoryExample(ctx context.Context) {
// Create a window buffer that keeps only the last 2 conversation turns
mem := memory.NewConversationWindowBufferMemory(2,
langchainmemory.WithReturnMessages(true),
)
// Simulate multiple conversation turns
conversations := []struct {
input string
output string
}{
{"First question", "First answer"},
{"Second question", "Second answer"},
{"Third question", "Third answer"},
{"Fourth question", "Fourth answer"},
}
for i, conv := range conversations {
err := mem.SaveContext(ctx, map[string]any{
"input": conv.input,
}, map[string]any{
"output": conv.output,
})
if err != nil {
log.Fatalf("Failed to save context: %v", err)
}
fmt.Printf("Turn %d - User: %s | AI: %s\n", i+1, conv.input, conv.output)
}
// Get messages - should only have the last 2 turns (4 messages)
messages, err := mem.GetMessages(ctx)
if err != nil {
log.Fatalf("Failed to get messages: %v", err)
}
fmt.Printf("\nMessages in window (last 2 turns): %d\n", len(messages))
for i, msg := range messages {
fmt.Printf(" [%d] %s: %s\n", i+1, msg.GetType(), msg.GetContent())
}
}
// chatHistoryExample demonstrates direct chat message history usage
func chatHistoryExample(ctx context.Context) {
// Create a chat message history
history := memory.NewChatMessageHistory()
// Add different types of messages
err := history.AddMessage(ctx, llms.SystemChatMessage{
Content: "You are a helpful assistant.",
})
if err != nil {
log.Fatalf("Failed to add system message: %v", err)
}
err = history.AddUserMessage(ctx, "Hello!")
if err != nil {
log.Fatalf("Failed to add user message: %v", err)
}
err = history.AddAIMessage(ctx, "Hi! How can I help you today?")
if err != nil {
log.Fatalf("Failed to add AI message: %v", err)
}
// Get all messages
messages, err := history.Messages(ctx)
if err != nil {
log.Fatalf("Failed to get messages: %v", err)
}
fmt.Printf("Total messages: %d\n", len(messages))
for i, msg := range messages {
fmt.Printf(" [%d] %s: %s\n", i+1, msg.GetType(), msg.GetContent())
}
}
// customKeysExample demonstrates using custom input/output/memory keys
func customKeysExample(ctx context.Context) {
// Create memory with custom keys
mem := memory.NewConversationBufferMemory(
langchainmemory.WithInputKey("user_input"),
langchainmemory.WithOutputKey("ai_response"),
langchainmemory.WithMemoryKey("chat_history"),
langchainmemory.WithReturnMessages(true),
langchainmemory.WithHumanPrefix("User"),
langchainmemory.WithAIPrefix("Assistant"),
)
// Save context with custom keys
err := mem.SaveContext(ctx, map[string]any{
"user_input": "What's the weather like?",
}, map[string]any{
"ai_response": "I don't have access to real-time weather data.",
})
if err != nil {
log.Fatalf("Failed to save context: %v", err)
}
// Load memory variables
memVars, err := mem.LoadMemoryVariables(ctx, map[string]any{})
if err != nil {
log.Fatalf("Failed to load memory variables: %v", err)
}
fmt.Printf("Memory variables keys: %v\n", getKeys(memVars))
// Get messages
messages, err := mem.GetMessages(ctx)
if err != nil {
log.Fatalf("Failed to get messages: %v", err)
}
fmt.Printf("Messages: %d\n", len(messages))
for i, msg := range messages {
fmt.Printf(" [%d] %s: %s\n", i+1, msg.GetType(), msg.GetContent())
}
}
// memoryIntegrationPattern demonstrates how to integrate memory with a graph
func memoryIntegrationPattern(ctx context.Context) {
fmt.Println("This example shows the pattern for integrating memory with LangGraph:")
fmt.Println()
// Create memory
mem := memory.NewConversationBufferMemory(
langchainmemory.WithReturnMessages(true),
)
// Simulate conversation turns
conversations := []struct {
input string
output string
}{
{"Hello, my name is Bob", "Hi Bob! Nice to meet you."},
{"What's my name?", "Your name is Bob."},
}
for i, conv := range conversations {
fmt.Printf("[Turn %d]\n", i+1)
// This is what would happen in a graph node:
// 1. Load memory
memVars, _ := mem.LoadMemoryVariables(ctx, map[string]any{})
var historyMessages []llms.ChatMessage
if history, ok := memVars["history"]; ok {
if msgs, ok := history.([]llms.ChatMessage); ok {
historyMessages = msgs
}
}
fmt.Printf(" Memory contains %d messages\n", len(historyMessages))
// 2. Process (simulate LLM call with history + current input)
fmt.Printf(" User: %s\n", conv.input)
fmt.Printf(" AI: %s\n", conv.output)
// 3. Save to memory
mem.SaveContext(ctx, map[string]any{
"input": conv.input,
}, map[string]any{
"output": conv.output,
})
fmt.Println()
}
// Show final memory state
messages, _ := mem.GetMessages(ctx)
fmt.Printf("Final memory contains %d messages\n", len(messages))
fmt.Println("\nIn a real graph node, you would:")
fmt.Println(" 1. Load memory variables before LLM call")
fmt.Println(" 2. Combine history + current input")
fmt.Println(" 3. Call LLM with combined messages")
fmt.Println(" 4. Save input/output to memory")
}
// getKeys returns the keys of a map
func getKeys(m map[string]any) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/state_schema/main.go | examples/state_schema/main.go | package main
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a new state graph with typed state
g := graph.NewStateGraph[map[string]any]()
// Define Schema
// Using map schema where "steps" accumulates values
schema := graph.NewMapSchema()
schema.RegisterReducer("steps", graph.AppendReducer)
g.SetSchema(schema)
g.AddNode("node_a", "node_a", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"steps": "A"}, nil
})
g.AddNode("node_b", "node_b", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"steps": "B"}, nil
})
g.AddNode("node_c", "node_c", func(ctx context.Context, state map[string]any) (map[string]any, error) {
return map[string]any{"steps": "C"}, nil
})
// Linear chain
g.SetEntryPoint("node_a")
g.AddEdge("node_a", "node_b")
g.AddEdge("node_b", "node_c")
g.AddEdge("node_c", graph.END)
runnable, err := g.Compile()
if err != nil {
panic(err)
}
res, err := runnable.Invoke(context.Background(), map[string]any{"steps": []string{}})
if err != nil {
panic(err)
}
fmt.Printf("Steps: %v\n", res["steps"])
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/time_travel/main.go | examples/time_travel/main.go | package main
import (
"context"
"fmt"
"log"
"time"
"github.com/smallnest/langgraphgo/graph"
)
func main() {
// Create a checkpointable graph
g := graph.NewCheckpointableStateGraph[map[string]any]()
// 1. Initial State Node
g.AddNode("A", "A", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("Executing Node A")
return map[string]any{"trace": []string{"A"}}, nil
})
// 2. Second Node
g.AddNode("B", "B", func(ctx context.Context, state map[string]any) (map[string]any, error) {
fmt.Println("Executing Node B")
trace := state["trace"].([]string)
return map[string]any{"trace": append(trace, "B")}, nil
})
g.SetEntryPoint("A")
g.AddEdge("A", "B")
g.AddEdge("B", graph.END)
// Configure in-memory store
store := graph.NewMemoryCheckpointStore()
g.SetCheckpointConfig(graph.CheckpointConfig{
Store: store,
AutoSave: true,
})
runnable, err := g.CompileCheckpointable()
if err != nil {
log.Fatal(err)
}
ctx := context.Background()
// Run first time
fmt.Println("--- First Run ---")
res, err := runnable.Invoke(ctx, map[string]any{"input": "start"})
if err != nil {
log.Fatal(err)
}
fmt.Printf("Result 1: %v\n", res)
// Wait for async saves
time.Sleep(100 * time.Millisecond)
// List checkpoints
checkpoints, _ := runnable.ListCheckpoints(ctx)
if len(checkpoints) == 0 {
log.Fatal("No checkpoints found")
}
// "Time Travel": Load a previous checkpoint (e.g. after Node A)
// We want to find the checkpoint where NodeName is "A"
var targetCP *graph.Checkpoint
for _, cp := range checkpoints {
if cp.NodeName == "A" {
targetCP = cp
break
}
}
if targetCP != nil {
fmt.Println("\n--- Time Travel (Resuming from Node A) ---")
// Resume execution from this checkpoint
// ResumeFromCheckpoint loads the state.
// To continue execution, we usually need to know where to go next.
// Or if we just want to inspect state.
// If we want to branch off:
// We use InvokeWithConfig with ResumeFrom (this logic is app specific usually)
// But here let's just inspect
loadedState, err := runnable.LoadCheckpoint(ctx, targetCP.ID)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Traveled back to state after Node A: %v\n", loadedState.State)
// Branch off: Run Node B again but with modified state?
// Or just re-run B.
config := &graph.Config{
Configurable: map[string]any{
"thread_id": runnable.GetExecutionID(),
"checkpoint_id": targetCP.ID,
},
ResumeFrom: []string{"B"},
}
// Let's modify state "in place" conceptually (forking)
// by passing modified state to Invoke
forkedState := loadedState.State.(map[string]any)
forkedState["forked"] = true
resFork, err := runnable.InvokeWithConfig(ctx, forkedState, config)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Forked Result: %v\n", resFork)
}
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/human_in_the_loop/main.go | examples/human_in_the_loop/main.go | package main
import (
"context"
"errors"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
)
// State represents the workflow state
type State struct {
Input string
Approved bool
Output string
}
func main() {
// Create a new graph with typed state
g := graph.NewStateGraph[State]()
// Define nodes
g.AddNode("process_request", "process_request", func(ctx context.Context, state State) (State, error) {
fmt.Printf("[Process] Processing request: %s\n", state.Input)
state.Output = "Processed: " + state.Input
return state, nil
})
g.AddNode("human_approval", "human_approval", func(ctx context.Context, state State) (State, error) {
if state.Approved {
fmt.Println("[Human] Request APPROVED.")
state.Output += " (Approved)"
} else {
fmt.Println("[Human] Request REJECTED.")
state.Output += " (Rejected)"
}
return state, nil
})
g.AddNode("finalize", "finalize", func(ctx context.Context, state State) (State, error) {
fmt.Printf("[Finalize] Final output: %s\n", state.Output)
return state, nil
})
// Define edges
g.SetEntryPoint("process_request")
g.AddEdge("process_request", "human_approval")
g.AddEdge("human_approval", "finalize")
g.AddEdge("finalize", graph.END)
// Compile the graph
runnable, err := g.Compile()
if err != nil {
log.Fatal(err)
}
// Initial state
initialState := State{
Input: "Deploy to Production",
Approved: false,
}
// 1. Run with InterruptBefore "human_approval"
fmt.Println("=== Starting Workflow (Phase 1) ===")
config := &graph.Config{
InterruptBefore: []string{"human_approval"},
}
// Note: InvokeWithConfig returns (S, error) where S is State struct.
// But interrupt error is separate.
// If interrupted, it returns the state at interrupt and the GraphInterrupt error.
res, err := runnable.InvokeWithConfig(context.Background(), initialState, config)
// We expect an interrupt error
if err != nil {
var interrupt *graph.GraphInterrupt
if errors.As(err, &interrupt) {
fmt.Printf("Workflow interrupted at node: %s\n", interrupt.Node)
fmt.Printf("Current State: %+v\n", interrupt.State)
} else {
log.Fatalf("Unexpected error: %v", err)
}
} else {
// If it didn't interrupt, that's unexpected for this example
fmt.Printf("Workflow completed without interrupt: %+v\n", res)
return
}
// Simulate Human Interaction
fmt.Println("\n=== Human Interaction ===")
fmt.Println("Reviewing request...")
fmt.Println("Approving request...")
// Update state to reflect approval
var interrupt *graph.GraphInterrupt
errors.As(err, &interrupt)
// Since we use typed graph, interrupt.State is 'any' but contains 'State' struct.
currentState := interrupt.State.(State)
currentState.Approved = true // Human approves
// 2. Resume execution
fmt.Println("\n=== Resuming Workflow (Phase 2) ===")
resumeConfig := &graph.Config{
ResumeFrom: []string{interrupt.Node}, // Resume from the interrupted node
}
finalRes, err := runnable.InvokeWithConfig(context.Background(), currentState, resumeConfig)
if err != nil {
log.Fatalf("Error resuming workflow: %v", err)
}
fmt.Printf("Workflow completed successfully.\n")
fmt.Printf("Final Result: %+v\n", finalRes)
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/tree_of_thoughts/main.go | examples/tree_of_thoughts/main.go | package main
import (
"context"
"fmt"
"log"
"sort"
"strings"
"github.com/smallnest/langgraphgo/prebuilt"
)
// RiverState represents the state of the wolf-goat-cabbage river crossing puzzle
type RiverState struct {
LeftBank map[string]bool // Items on the left bank
RightBank map[string]bool // Items on the right bank
BoatLocation string // "left" or "right"
LastMove string // Description of the last move
}
// NewRiverState creates a new river state
func NewRiverState(left, right map[string]bool, boatLoc, lastMove string) *RiverState {
return &RiverState{
LeftBank: left,
RightBank: right,
BoatLocation: boatLoc,
LastMove: lastMove,
}
}
// IsValid checks if the state is valid (no rule violations)
func (s *RiverState) IsValid() bool {
// Check left bank
if s.LeftBank["wolf"] && s.LeftBank["goat"] && !s.LeftBank["farmer"] {
return false // Wolf eats goat
}
if s.LeftBank["goat"] && s.LeftBank["cabbage"] && !s.LeftBank["farmer"] {
return false // Goat eats cabbage
}
// Check right bank
if s.RightBank["wolf"] && s.RightBank["goat"] && !s.RightBank["farmer"] {
return false // Wolf eats goat
}
if s.RightBank["goat"] && s.RightBank["cabbage"] && !s.RightBank["farmer"] {
return false // Goat eats cabbage
}
return true
}
// IsGoal checks if all items are on the right bank
func (s *RiverState) IsGoal() bool {
return s.RightBank["farmer"] && s.RightBank["wolf"] &&
s.RightBank["goat"] && s.RightBank["cabbage"]
}
// GetDescription returns a human-readable description
func (s *RiverState) GetDescription() string {
leftItems := s.bankToString(s.LeftBank)
rightItems := s.bankToString(s.RightBank)
desc := fmt.Sprintf("Left: [%s] | Right: [%s] | Boat: %s",
leftItems, rightItems, s.BoatLocation)
if s.LastMove != "" {
desc += fmt.Sprintf(" | Move: %s", s.LastMove)
}
return desc
}
func (s *RiverState) bankToString(bank map[string]bool) string {
var items []string
if bank["farmer"] {
items = append(items, "Farmer")
}
if bank["wolf"] {
items = append(items, "Wolf")
}
if bank["goat"] {
items = append(items, "Goat")
}
if bank["cabbage"] {
items = append(items, "Cabbage")
}
return strings.Join(items, ", ")
}
// Hash returns a unique identifier for this state
func (s *RiverState) Hash() string {
// Create a deterministic hash
var left, right []string
for item, present := range s.LeftBank {
if present {
left = append(left, item)
}
}
for item, present := range s.RightBank {
if present {
right = append(right, item)
}
}
sort.Strings(left)
sort.Strings(right)
return fmt.Sprintf("L:%s|R:%s|B:%s",
strings.Join(left, ","),
strings.Join(right, ","),
s.BoatLocation)
}
// RiverPuzzleGenerator generates possible next states
type RiverPuzzleGenerator struct{}
func (g *RiverPuzzleGenerator) Generate(ctx context.Context, current prebuilt.ThoughtState) ([]prebuilt.ThoughtState, error) {
state := current.(*RiverState)
var nextStates []prebuilt.ThoughtState
// Determine which bank the farmer is on
var fromBank, toBank map[string]bool
var newBoatLoc string
if state.BoatLocation == "left" {
fromBank = state.LeftBank
toBank = state.RightBank
newBoatLoc = "right"
} else {
fromBank = state.RightBank
toBank = state.LeftBank
newBoatLoc = "left"
}
// Farmer must be on the same side as the boat
if !fromBank["farmer"] {
return nextStates, nil
}
// Option 1: Farmer goes alone
nextStates = append(nextStates, g.createNextState(state, fromBank, toBank, newBoatLoc, "", "Farmer crosses alone"))
// Option 2: Farmer takes wolf
if fromBank["wolf"] {
nextStates = append(nextStates, g.createNextState(state, fromBank, toBank, newBoatLoc, "wolf", "Farmer takes Wolf"))
}
// Option 3: Farmer takes goat
if fromBank["goat"] {
nextStates = append(nextStates, g.createNextState(state, fromBank, toBank, newBoatLoc, "goat", "Farmer takes Goat"))
}
// Option 4: Farmer takes cabbage
if fromBank["cabbage"] {
nextStates = append(nextStates, g.createNextState(state, fromBank, toBank, newBoatLoc, "cabbage", "Farmer takes Cabbage"))
}
return nextStates, nil
}
func (g *RiverPuzzleGenerator) createNextState(
current *RiverState,
fromBank, toBank map[string]bool,
newBoatLoc, item, moveDesc string,
) prebuilt.ThoughtState {
// Copy banks
newLeft := make(map[string]bool)
newRight := make(map[string]bool)
for k, v := range current.LeftBank {
newLeft[k] = v
}
for k, v := range current.RightBank {
newRight[k] = v
}
// Determine which banks to update
var newFrom, newTo map[string]bool
if current.BoatLocation == "left" {
newFrom = newLeft
newTo = newRight
} else {
newFrom = newRight
newTo = newLeft
}
// Move farmer
newFrom["farmer"] = false
newTo["farmer"] = true
// Move item if specified
if item != "" {
newFrom[item] = false
newTo[item] = true
}
return NewRiverState(newLeft, newRight, newBoatLoc, moveDesc)
}
// SimpleEvaluator provides a simple heuristic evaluation
type SimpleEvaluator struct{}
func (e *SimpleEvaluator) Evaluate(ctx context.Context, state prebuilt.ThoughtState, pathLength int) (float64, error) {
riverState := state.(*RiverState)
// Prune invalid states
if !riverState.IsValid() {
return -1, nil
}
// Score based on progress: how many items are on the right bank
score := 0.0
if riverState.RightBank["farmer"] {
score += 1.0
}
if riverState.RightBank["wolf"] {
score += 1.0
}
if riverState.RightBank["goat"] {
score += 1.0
}
if riverState.RightBank["cabbage"] {
score += 1.0
}
// Penalize longer paths slightly to prefer shorter solutions
score -= float64(pathLength) * 0.01
return score, nil
}
func main() {
fmt.Println("=== Tree of Thoughts: River Crossing Puzzle ===")
fmt.Println()
fmt.Println("Problem: A farmer needs to transport a wolf, a goat, and a cabbage across a river.")
fmt.Println("Rules:")
fmt.Println(" 1. The boat can only carry the farmer and at most one other item")
fmt.Println(" 2. The wolf cannot be left alone with the goat")
fmt.Println(" 3. The goat cannot be left alone with the cabbage")
fmt.Println()
fmt.Println("Let's use Tree of Thoughts to find a solution!")
// Create initial state: everything on the left bank
initialState := NewRiverState(
map[string]bool{"farmer": true, "wolf": true, "goat": true, "cabbage": true},
map[string]bool{"farmer": false, "wolf": false, "goat": false, "cabbage": false},
"left",
"Initial state",
)
// Configure Tree of Thoughts
config := prebuilt.TreeOfThoughtsConfig{
Generator: &RiverPuzzleGenerator{},
Evaluator: &SimpleEvaluator{},
InitialState: initialState,
MaxDepth: 10,
MaxPaths: 10,
Verbose: true,
}
// Create agent using map state convenience function
agent, err := prebuilt.CreateTreeOfThoughtsAgentMap(config)
if err != nil {
log.Fatalf("Failed to create Tree of Thoughts agent: %v", err)
}
// Run search
fmt.Println("๐ Starting tree search...")
result, err := agent.Invoke(context.Background(), map[string]any{})
if err != nil {
log.Fatalf("Search failed: %v", err)
}
// Print solution
fmt.Println()
solution, ok := result["solution"].(prebuilt.SearchPath)
if !ok || len(solution.States) == 0 {
fmt.Println("No solution found")
} else {
fmt.Println("=== Solution Found ===")
for i, s := range solution.States {
fmt.Printf("Step %d: %s\n", i, s.GetDescription())
}
}
fmt.Println("\n=== Analysis ===")
fmt.Println("Tree of Thoughts systematically explored the search space,")
fmt.Println("evaluating multiple possible paths at each step and pruning")
fmt.Println("invalid branches (where the wolf would eat the goat, or the")
fmt.Println("goat would eat the cabbage).")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/conditional_edges_example/main.go | examples/conditional_edges_example/main.go | //go:build ignore
// +build ignore
package main
import (
"context"
"fmt"
"strings"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms"
)
func main() {
fmt.Println("๐ Conditional Edges Example")
fmt.Println("============================\n")
// Example 1: Simple Intent Router
SimpleIntentRouter()
// Example 2: Multi-step Workflow with Conditions
MultiStepWorkflow()
// Example 3: Dynamic Tool Selection
DynamicToolSelection()
}
// SimpleIntentRouter demonstrates routing based on user intent
func SimpleIntentRouter() {
fmt.Println("1๏ธโฃ Intent-Based Routing")
fmt.Println("------------------------")
g := graph.NewStateGraph[[]llms.MessageContent]()
// Entry point - analyze intent
g.AddNode("analyze_intent", "analyze_intent", func(ctx context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) {
fmt.Printf(" Analyzing: %s\n", state[0].Parts[0].(llms.TextContent).Text)
return state, nil
})
// Different handlers for different intents
g.AddNode("handle_question", "handle_question", func(ctx context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) {
response := "I'll help answer your question about that."
fmt.Printf(" โ Question Handler: %s\n", response)
return append(state, llms.TextParts("ai", response)), nil
})
g.AddNode("handle_command", "handle_command", func(ctx context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) {
response := "Executing your command..."
fmt.Printf(" โก Command Handler: %s\n", response)
return append(state, llms.TextParts("ai", response)), nil
})
g.AddNode("handle_feedback", "handle_feedback", func(ctx context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) {
response := "Thank you for your feedback!"
fmt.Printf(" ๐ฌ Feedback Handler: %s\n", response)
return append(state, llms.TextParts("ai", response)), nil
})
// Conditional routing based on intent
g.AddConditionalEdge("analyze_intent", func(ctx context.Context, state []llms.MessageContent) string {
if len(state) > 0 {
text := state[0].Parts[0].(llms.TextContent).Text
text = strings.ToLower(text)
// Route based on keywords
if strings.Contains(text, "?") || strings.Contains(text, "what") || strings.Contains(text, "how") {
fmt.Println(" โ Routing to Question Handler")
return "handle_question"
}
if strings.Contains(text, "please") || strings.Contains(text, "could you") || strings.Contains(text, "run") {
fmt.Println(" โ Routing to Command Handler")
return "handle_command"
}
if strings.Contains(text, "thanks") || strings.Contains(text, "good") || strings.Contains(text, "bad") {
fmt.Println(" โ Routing to Feedback Handler")
return "handle_feedback"
}
}
// Default to question handler
fmt.Println(" โ Default: Routing to Question Handler")
return "handle_question"
})
// All handlers go to END
g.AddEdge("handle_question", graph.END)
g.AddEdge("handle_command", graph.END)
g.AddEdge("handle_feedback", graph.END)
g.SetEntryPoint("analyze_intent")
// Compile and test with different inputs
runnable, _ := g.Compile()
ctx := context.Background()
// Test different intents
testInputs := []string{
"What is the weather today?",
"Please run the diagnostic tool",
"Thanks for your help!",
}
for _, input := range testInputs {
fmt.Printf("\n๐ Input: %s\n", input)
messages := []llms.MessageContent{llms.TextParts("human", input)}
result, _ := runnable.Invoke(ctx, messages)
fmt.Printf(" Response: %s\n", result[len(result)-1].Parts[0].(llms.TextContent).Text)
}
fmt.Println()
}
// MultiStepWorkflow demonstrates a workflow with conditional branching
func MultiStepWorkflow() {
fmt.Println("2๏ธโฃ Multi-Step Workflow with Conditions")
fmt.Println("---------------------------------------")
g := graph.NewStateGraph[map[string]any]()
// Data validation step
g.AddNode("validate", "validate", func(ctx context.Context, data map[string]any) (map[string]any, error) {
fmt.Printf(" Validating data: %v\n", data)
// Check if data is valid
if value, ok := data["value"].(int); ok && value > 0 {
data["valid"] = true
} else {
data["valid"] = false
}
return data, nil
})
// Process valid data
g.AddNode("process", "process", func(ctx context.Context, data map[string]any) (map[string]any, error) {
fmt.Println(" โ
Processing valid data...")
data["result"] = data["value"].(int) * 2
data["status"] = "processed"
return data, nil
})
// Handle invalid data
g.AddNode("handle_error", "handle_error", func(ctx context.Context, data map[string]any) (map[string]any, error) {
fmt.Println(" โ Handling invalid data...")
data["status"] = "error"
data["error"] = "Invalid input value"
return data, nil
})
// Store results
g.AddNode("store", "store", func(ctx context.Context, data map[string]any) (map[string]any, error) {
fmt.Printf(" ๐พ Storing result: %v\n", data["result"])
return data, nil
})
// Conditional edge after validation
g.AddConditionalEdge("validate", func(ctx context.Context, data map[string]any) string {
if valid, ok := data["valid"].(bool); ok && valid {
fmt.Println(" โ Data is valid, proceeding to process")
return "process"
}
fmt.Println(" โ Data is invalid, handling error")
return "handle_error"
})
// Conditional edge after processing
g.AddConditionalEdge("process", func(ctx context.Context, data map[string]any) string {
if result, ok := data["result"].(int); ok && result > 100 {
fmt.Println(" โ Large result, storing...")
return "store"
}
fmt.Println(" โ Small result, skipping storage")
return graph.END
})
g.AddEdge("handle_error", graph.END)
g.AddEdge("store", graph.END)
g.SetEntryPoint("validate")
// Test the workflow
runnable, _ := g.Compile()
ctx := context.Background()
testCases := []map[string]any{
{"value": 60}, // Valid, large result -> will be stored
{"value": 10}, // Valid, small result -> won't be stored
{"value": -5}, // Invalid -> error handling
}
for i, testData := range testCases {
fmt.Printf("\n Test %d: Input = %v\n", i+1, testData)
result, _ := runnable.Invoke(ctx, testData)
fmt.Printf(" Final State: %v\n", result)
}
fmt.Println()
}
// DynamicToolSelection demonstrates selecting tools based on task requirements
func DynamicToolSelection() {
fmt.Println("3๏ธโฃ Dynamic Tool Selection")
fmt.Println("-------------------------")
g := graph.NewStateGraph[string]()
// Analyze task requirements
g.AddNode("analyze_task", "analyze_task", func(ctx context.Context, task string) (string, error) {
fmt.Printf(" Analyzing task: %s\n", task)
return task, nil
})
// Different tools
g.AddNode("calculator", "calculator", func(ctx context.Context, task string) (string, error) {
fmt.Println(" ๐งฎ Using Calculator Tool")
return task + " -> Result: 42", nil
})
g.AddNode("web_search", "web_search", func(ctx context.Context, task string) (string, error) {
fmt.Println(" ๐ Using Web Search Tool")
return task + " -> Found 10 relevant results", nil
})
g.AddNode("code_generator", "code_generator", func(ctx context.Context, task string) (string, error) {
fmt.Println(" ๐ป Using Code Generator Tool")
return task + " -> Generated code snippet", nil
})
g.AddNode("translator", "translator", func(ctx context.Context, task string) (string, error) {
fmt.Println(" ๐ Using Translator Tool")
return task + " -> Translated to target language", nil
})
// Tool selection based on task keywords
g.AddConditionalEdge("analyze_task", func(ctx context.Context, task string) string {
taskLower := strings.ToLower(task)
if strings.Contains(taskLower, "calculate") || strings.Contains(taskLower, "compute") || strings.Contains(taskLower, "math") {
fmt.Println(" โ Selecting Calculator")
return "calculator"
}
if strings.Contains(taskLower, "search") || strings.Contains(taskLower, "find") || strings.Contains(taskLower, "lookup") {
fmt.Println(" โ Selecting Web Search")
return "web_search"
}
if strings.Contains(taskLower, "code") || strings.Contains(taskLower, "program") || strings.Contains(taskLower, "function") {
fmt.Println(" โ Selecting Code Generator")
return "code_generator"
}
if strings.Contains(taskLower, "translate") || strings.Contains(taskLower, "language") {
fmt.Println(" โ Selecting Translator")
return "translator"
}
// Default to web search
fmt.Println(" โ Default: Selecting Web Search")
return "web_search"
})
// All tools go to END
g.AddEdge("calculator", graph.END)
g.AddEdge("web_search", graph.END)
g.AddEdge("code_generator", graph.END)
g.AddEdge("translator", graph.END)
g.SetEntryPoint("analyze_task")
// Test with different tasks
runnable, _ := g.Compile()
ctx := context.Background()
tasks := []string{
"Calculate the compound interest",
"Search for best practices in Go",
"Generate code for sorting algorithm",
"Translate this to Spanish",
"Analyze market trends", // Will use default
}
for _, task := range tasks {
fmt.Printf("\n๐ Task: %s\n", task)
result, _ := runnable.Invoke(ctx, task)
fmt.Printf(" Output: %s\n", result)
}
fmt.Println("\nโ
Conditional Edges Examples Complete!")
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/basic_llm/main.go | examples/basic_llm/main.go | package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Initialize LLM
model, err := openai.New()
if err != nil {
log.Fatal(err)
}
g := graph.NewStateGraph[map[string]any]()
g.AddNode("generate", "generate", func(ctx context.Context, state map[string]any) (map[string]any, error) {
input, ok := state["input"].(string)
if !ok {
return nil, fmt.Errorf("invalid input")
}
response, err := model.Call(ctx, input)
if err != nil {
return nil, err
}
return map[string]any{"output": response}, nil
})
g.AddEdge("generate", graph.END)
g.SetEntryPoint("generate")
runnable, err := g.Compile()
if err != nil {
panic(err)
}
ctx := context.Background()
// Invoke with map state
res, err := runnable.Invoke(ctx, map[string]any{"input": "What is 1 + 1?"})
if err != nil {
panic(err)
}
fmt.Println("AI Response:", res["output"])
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
smallnest/langgraphgo | https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/examples/planning_agent/main.go | examples/planning_agent/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"github.com/smallnest/langgraphgo/graph"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/tools"
)
func main() {
if os.Getenv("OPENAI_API_KEY") == "" {
log.Fatal("OPENAI_API_KEY not set")
}
// Initialize LLM
opts := []openai.Option{}
if base := os.Getenv("OPENAI_API_BASE"); base != "" {
opts = append(opts, openai.WithBaseURL(base))
}
if modelName := os.Getenv("OPENAI_MODEL"); modelName != "" {
opts = append(opts, openai.WithModel(modelName))
}
model, err := openai.New(opts...)
if err != nil {
log.Fatal(err)
}
// Define custom nodes that can be used in the workflow
nodes := []graph.TypedNode[map[string]any]{
{
Name: "fetch_data",
Description: "Fetch user data from the database",
Function: fetchDataNode,
},
{
Name: "validate_data",
Description: "Validate the integrity and format of the data",
Function: validateDataNode,
},
{
Name: "transform_data",
Description: "Transform and normalize the data into JSON format",
Function: transformDataNode,
},
{
Name: "analyze_data",
Description: "Perform statistical analysis on the data",
Function: analyzeDataNode,
},
{
Name: "save_results",
Description: "Save the processed results to storage",
Function: saveResultsNode,
},
{
Name: "generate_report",
Description: "Generate a summary report from the analysis",
Function: generateReportNode,
},
}
// Create Planning Agent with verbose output
agent, err := prebuilt.CreatePlanningAgentMap(
model,
nodes,
[]tools.Tool{},
prebuilt.WithVerbose(true),
)
if err != nil {
log.Fatal(err)
}
// Example 1: Data processing workflow
fmt.Println("=== Example 1: Data Processing Workflow ===")
query1 := "Fetch user data, validate it, transform it to JSON, and save the results"
runAgent(agent, query1)
fmt.Println("\n=== Example 2: Data Analysis Workflow ===")
query2 := "Fetch data, analyze it, and generate a report"
runAgent(agent, query2)
fmt.Println("\n=== Example 3: Complete Pipeline ===")
query3 := "Fetch data, validate and transform it, analyze the results, and generate a comprehensive report"
runAgent(agent, query3)
}
func runAgent(agent *graph.StateRunnable[map[string]any], query string) {
fmt.Printf("\nUser Query: %s\n\n", query)
initialState := map[string]any{
"messages": []llms.MessageContent{
llms.TextParts(llms.ChatMessageTypeHuman, query),
},
}
res, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Printf("Error: %v\n", err)
return
}
// Print final result
messages := res["messages"].([]llms.MessageContent)
fmt.Println("\n--- Execution Result ---")
for i, msg := range messages {
if msg.Role == llms.ChatMessageTypeHuman {
continue // Skip user message
}
for _, part := range msg.Parts {
if textPart, ok := part.(llms.TextContent); ok {
fmt.Printf("Step %d: %s\n", i, textPart.Text)
}
}
}
fmt.Println("------------------------")
}
// Node implementations
func fetchDataNode(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("๐ฅ Fetching data from database...")
// Simulate data fetching
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data fetched: 1000 user records retrieved")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
}
func validateDataNode(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("โ
Validating data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data validation passed: all records valid")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
}
func transformDataNode(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("๐ Transforming data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data transformed to JSON format successfully")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
}
func analyzeDataNode(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("๐ Analyzing data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Analysis complete: avg_age=32.5, total_users=1000, active_rate=78%")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
}
func saveResultsNode(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("๐พ Saving results...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Results saved to database successfully")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
}
func generateReportNode(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("๐ Generating report...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Report generated: summary.pdf created with all analysis results")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
}
| go | MIT | 600df7fe3e6254f2329f606732feaecfbd52d9f2 | 2026-01-07T10:38:05.929544Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.