repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/callbacks.go
graph/callbacks.go
package graph import ( "context" "time" ) // CallbackHandler defines the interface for handling graph execution callbacks // This matches Python's LangChain callback pattern type CallbackHandler interface { // Chain callbacks (for graph/workflow execution) OnChainStart(ctx context.Context, serialized map[string]any, inputs map[string]any, runID string, parentRunID *string, tags []string, metadata map[string]any) OnChainEnd(ctx context.Context, outputs map[string]any, runID string) OnChainError(ctx context.Context, err error, runID string) // LLM callbacks (for AI model calls) OnLLMStart(ctx context.Context, serialized map[string]any, prompts []string, runID string, parentRunID *string, tags []string, metadata map[string]any) OnLLMEnd(ctx context.Context, response any, runID string) OnLLMError(ctx context.Context, err error, runID string) // Tool callbacks (for tool/function calls) OnToolStart(ctx context.Context, serialized map[string]any, inputStr string, runID string, parentRunID *string, tags []string, metadata map[string]any) OnToolEnd(ctx context.Context, output string, runID string) OnToolError(ctx context.Context, err error, runID string) // Retriever callbacks (for data retrieval operations) OnRetrieverStart(ctx context.Context, serialized map[string]any, query string, runID string, parentRunID *string, tags []string, metadata map[string]any) OnRetrieverEnd(ctx context.Context, documents []any, runID string) OnRetrieverError(ctx context.Context, err error, runID string) } // GraphCallbackHandler extends CallbackHandler with graph-specific events type GraphCallbackHandler interface { CallbackHandler // OnGraphStep is called after a step (node execution + state update) is completed OnGraphStep(ctx context.Context, stepNode string, state any) } // Config represents configuration for graph invocation // This matches Python's config dict pattern type Config struct { // Callbacks to be invoked during execution Callbacks []CallbackHandler `json:"callbacks"` // Metadata to attach to the execution Metadata map[string]any `json:"metadata"` // Tags to categorize the execution Tags []string `json:"tags"` // Configurable parameters for the execution Configurable map[string]any `json:"configurable"` // RunName for this execution RunName string `json:"run_name"` // Timeout for the execution Timeout *time.Duration `json:"timeout"` // InterruptBefore nodes to stop before execution InterruptBefore []string `json:"interrupt_before"` // InterruptAfter nodes to stop after execution InterruptAfter []string `json:"interrupt_after"` // ResumeFrom nodes to start execution from (bypassing entry point) ResumeFrom []string `json:"resume_from"` // ResumeValue provides the value to return from an Interrupt() call when resuming ResumeValue any `json:"resume_value"` } // NoOpCallbackHandler provides a no-op implementation of CallbackHandler type NoOpCallbackHandler struct{} func (n *NoOpCallbackHandler) OnChainStart(ctx context.Context, serialized map[string]any, inputs map[string]any, runID string, parentRunID *string, tags []string, metadata map[string]any) { } func (n *NoOpCallbackHandler) OnChainEnd(ctx context.Context, outputs map[string]any, runID string) { } func (n *NoOpCallbackHandler) OnChainError(ctx context.Context, err error, runID string) {} func (n *NoOpCallbackHandler) OnLLMStart(ctx context.Context, serialized map[string]any, prompts []string, runID string, parentRunID *string, tags []string, metadata map[string]any) { } func (n *NoOpCallbackHandler) OnLLMEnd(ctx context.Context, response any, runID string) {} func (n *NoOpCallbackHandler) OnLLMError(ctx context.Context, err error, runID string) {} func (n *NoOpCallbackHandler) OnToolStart(ctx context.Context, serialized map[string]any, inputStr string, runID string, parentRunID *string, tags []string, metadata map[string]any) { } func (n *NoOpCallbackHandler) OnToolEnd(ctx context.Context, output string, runID string) {} func (n *NoOpCallbackHandler) OnToolError(ctx context.Context, err error, runID string) {} func (n *NoOpCallbackHandler) OnRetrieverStart(ctx context.Context, serialized map[string]any, query string, runID string, parentRunID *string, tags []string, metadata map[string]any) { } func (n *NoOpCallbackHandler) OnRetrieverEnd(ctx context.Context, documents []any, runID string) { } func (n *NoOpCallbackHandler) OnRetrieverError(ctx context.Context, err error, runID string) {}
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/test_constants_test.go
graph/test_constants_test.go
package graph_test const ( testNode = "test_node" testState = "test_state" testResult = "test_result" )
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/state_graph_tracer_test.go
graph/state_graph_tracer_test.go
package graph import ( "context" "testing" ) func TestStateGraph_WithTracer(t *testing.T) { // Create a StateGraph g := NewStateGraph[map[string]any]() // Add nodes g.AddNode("node1", "First node", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"result": "result1"}, nil }) g.AddNode("node2", "Second node", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"result": "result2"}, nil }) g.AddEdge("node1", "node2") g.AddEdge("node2", END) g.SetEntryPoint("node1") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // Create a tracer tracer := NewTracer() // Test SetTracer method runnable.SetTracer(tracer) if runnable.tracer != tracer { t.Fatal("SetTracer should set the tracer") } // Test WithTracer method runnableWithTracer := runnable.WithTracer(tracer) if runnableWithTracer.tracer != tracer { t.Fatal("WithTracer should return a new runnable with tracer") } // Execute the graph with tracer _, err = runnableWithTracer.Invoke(context.Background(), map[string]any{"initial": true}) if err != nil { t.Fatalf("Failed to invoke: %v", err) } // Verify spans were collected spans := tracer.GetSpans() if len(spans) == 0 { t.Fatal("Tracer should have collected spans") } // Verify we have graph end and node end spans (events are updated when EndSpan is called) var hasGraphEnd, hasNode1End, hasNode2End bool for _, span := range spans { if span.Event == TraceEventGraphEnd && span.NodeName == "graph" { hasGraphEnd = true } if span.Event == TraceEventNodeEnd && span.NodeName == "node1" { hasNode1End = true } if span.Event == TraceEventNodeEnd && span.NodeName == "node2" { hasNode2End = true } } if !hasGraphEnd { t.Error("Should have GraphEnd event for graph") } if !hasNode1End { t.Error("Should have NodeEnd event for node1") } if !hasNode2End { t.Error("Should have NodeEnd event for node2") } t.Logf("StateGraph tracer test passed! Collected %d spans", len(spans)) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/tracing.go
graph/tracing.go
package graph import ( "context" "time" ) // TraceEvent represents different types of events in graph execution type TraceEvent string const ( // TraceEventGraphStart indicates the start of graph execution TraceEventGraphStart TraceEvent = "graph_start" // TraceEventGraphEnd indicates the end of graph execution TraceEventGraphEnd TraceEvent = "graph_end" // TraceEventNodeStart indicates the start of node execution TraceEventNodeStart TraceEvent = "node_start" // TraceEventNodeEnd indicates the end of node execution TraceEventNodeEnd TraceEvent = "node_end" // TraceEventNodeError indicates an error occurred in node execution TraceEventNodeError TraceEvent = "node_error" // TraceEventEdgeTraversal indicates traversal from one node to another TraceEventEdgeTraversal TraceEvent = "edge_traversal" ) // TraceSpan represents a span of execution with timing and metadata type TraceSpan struct { // ID is a unique identifier for this span ID string // ParentID is the ID of the parent span (empty for root spans) ParentID string // Event indicates the type of event this span represents Event TraceEvent // NodeName is the name of the node being executed (if applicable) NodeName string // FromNode is the source node for edge traversals FromNode string // ToNode is the destination node for edge traversals ToNode string // StartTime is when this span began StartTime time.Time // EndTime is when this span completed (zero for ongoing spans) EndTime time.Time // Duration is the total time taken (calculated when span ends) Duration time.Duration // State is a snapshot of the state at this point (optional) State any // Error contains any error that occurred during execution Error error // Metadata contains additional key-value pairs for observability Metadata map[string]any } // TraceHook defines the interface for trace event handlers type TraceHook interface { // OnEvent is called when a trace event occurs OnEvent(ctx context.Context, span *TraceSpan) } // TraceHookFunc is a function adapter for TraceHook type TraceHookFunc func(ctx context.Context, span *TraceSpan) // OnEvent implements the TraceHook interface func (f TraceHookFunc) OnEvent(ctx context.Context, span *TraceSpan) { f(ctx, span) } // Tracer manages trace collection and hooks type Tracer struct { hooks []TraceHook spans map[string]*TraceSpan } // NewTracer creates a new tracer instance func NewTracer() *Tracer { return &Tracer{ hooks: make([]TraceHook, 0), spans: make(map[string]*TraceSpan), } } // AddHook registers a new trace hook func (t *Tracer) AddHook(hook TraceHook) { t.hooks = append(t.hooks, hook) } // StartSpan creates a new trace span func (t *Tracer) StartSpan(ctx context.Context, event TraceEvent, nodeName string) *TraceSpan { span := &TraceSpan{ ID: generateSpanID(), Event: event, NodeName: nodeName, StartTime: time.Now(), Metadata: make(map[string]any), } // Extract parent ID from context if available if parentSpan := SpanFromContext(ctx); parentSpan != nil { span.ParentID = parentSpan.ID } t.spans[span.ID] = span // Notify hooks for _, hook := range t.hooks { hook.OnEvent(ctx, span) } return span } // EndSpan completes a trace span func (t *Tracer) EndSpan(ctx context.Context, span *TraceSpan, state any, err error) { span.EndTime = time.Now() span.Duration = span.EndTime.Sub(span.StartTime) span.State = state span.Error = err // Update event type if there was an error if err != nil && span.Event == TraceEventNodeStart { span.Event = TraceEventNodeError } else if span.Event == TraceEventNodeStart { span.Event = TraceEventNodeEnd } else if span.Event == TraceEventGraphStart { span.Event = TraceEventGraphEnd } // Notify hooks for _, hook := range t.hooks { hook.OnEvent(ctx, span) } } // TraceEdgeTraversal records an edge traversal event func (t *Tracer) TraceEdgeTraversal(ctx context.Context, fromNode, toNode string) { span := &TraceSpan{ ID: generateSpanID(), Event: TraceEventEdgeTraversal, FromNode: fromNode, ToNode: toNode, StartTime: time.Now(), EndTime: time.Now(), Duration: 0, Metadata: make(map[string]any), } // Extract parent ID from context if available if parentSpan := SpanFromContext(ctx); parentSpan != nil { span.ParentID = parentSpan.ID } t.spans[span.ID] = span // Notify hooks for _, hook := range t.hooks { hook.OnEvent(ctx, span) } } // GetSpans returns all collected spans func (t *Tracer) GetSpans() map[string]*TraceSpan { return t.spans } // Clear removes all collected spans func (t *Tracer) Clear() { t.spans = make(map[string]*TraceSpan) } // Context keys for span storage type contextKey string const spanContextKey contextKey = "langgraph_span" // ContextWithSpan returns a new context with the span stored func ContextWithSpan(ctx context.Context, span *TraceSpan) context.Context { return context.WithValue(ctx, spanContextKey, span) } // SpanFromContext extracts a span from context func SpanFromContext(ctx context.Context) *TraceSpan { if span, ok := ctx.Value(spanContextKey).(*TraceSpan); ok { return span } return nil } // generateSpanID creates a unique span identifier func generateSpanID() string { return time.Now().Format("20060102150405.000000") } // TracedRunnable wraps a Runnable with tracing capabilities // Deprecated: Use StateTracedRunnable[S] for type-safe tracing type TracedRunnable struct { *Runnable tracer *Tracer } // NewTracedRunnable creates a new traced runnable // Deprecated: Use NewStateTracedRunnable[S] for type-safe tracing func NewTracedRunnable(runnable *Runnable, tracer *Tracer) *TracedRunnable { return &TracedRunnable{ Runnable: runnable, tracer: tracer, } } // Invoke executes the graph with tracing enabled func (tr *TracedRunnable) Invoke(ctx context.Context, initialState any) (any, error) { // Start graph execution span graphSpan := tr.tracer.StartSpan(ctx, TraceEventGraphStart, "") ctx = ContextWithSpan(ctx, graphSpan) // Convert initialState to map[string]any if needed var stateMap map[string]any if sm, ok := initialState.(map[string]any); ok { stateMap = sm } else { stateMap = map[string]any{"state": initialState} } state := any(stateMap) currentNode := tr.graph.entryPoint var finalError error for { if currentNode == END { break } // Get typed node from the graph node, ok := tr.graph.nodes[currentNode] if !ok { finalError = ErrNodeNotFound tr.tracer.EndSpan(ctx, graphSpan, state, finalError) return nil, finalError } // Start node execution span nodeSpan := tr.tracer.StartSpan(ctx, TraceEventNodeStart, currentNode) nodeCtx := ContextWithSpan(ctx, nodeSpan) var err error // Call the typed function with map[string]any var currentState map[string]any if s, ok := state.(map[string]any); ok { currentState = s } else { currentState = map[string]any{"state": state} } state, err = node.Function(nodeCtx, currentState) // End node execution span tr.tracer.EndSpan(nodeCtx, nodeSpan, state, err) if err != nil { finalError = err tr.tracer.EndSpan(ctx, graphSpan, state, finalError) return nil, finalError } // Find next node foundNext := false for _, edge := range tr.graph.edges { if edge.From == currentNode { tr.tracer.TraceEdgeTraversal(ctx, currentNode, edge.To) currentNode = edge.To foundNext = true break } } if !foundNext { finalError = ErrNoOutgoingEdge tr.tracer.EndSpan(ctx, graphSpan, state, finalError) return nil, finalError } } tr.tracer.EndSpan(ctx, graphSpan, state, nil) return state, nil } // GetTracer returns the tracer instance func (tr *TracedRunnable) GetTracer() *Tracer { return tr.tracer } // StateTracedRunnable[S] wraps a StateRunnable[S] with tracing capabilities type StateTracedRunnable[S any] struct { runnable *StateRunnable[S] tracer *Tracer } // NewStateTracedRunnable creates a new generic traced runnable func NewStateTracedRunnable[S any](runnable *StateRunnable[S], tracer *Tracer) *StateTracedRunnable[S] { return &StateTracedRunnable[S]{ runnable: runnable, tracer: tracer, } } // Invoke executes the graph with tracing enabled func (tr *StateTracedRunnable[S]) Invoke(ctx context.Context, initialState S) (S, error) { // Start graph execution span graphSpan := tr.tracer.StartSpan(ctx, TraceEventGraphStart, "") ctx = ContextWithSpan(ctx, graphSpan) // Execute the graph result, err := tr.runnable.Invoke(ctx, initialState) // End graph execution span tr.tracer.EndSpan(ctx, graphSpan, result, err) return result, err } // GetTracer returns the tracer instance func (tr *StateTracedRunnable[S]) GetTracer() *Tracer { return tr.tracer }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/parallel.go
graph/parallel.go
package graph import ( "context" "fmt" "sync" ) // ParallelNode represents a set of nodes that can execute in parallel type ParallelNode[S any] struct { nodes []TypedNode[S] name string } // NewParallelNode creates a new parallel node func NewParallelNode[S any](name string, nodes ...TypedNode[S]) *ParallelNode[S] { return &ParallelNode[S]{ name: name, nodes: nodes, } } // Execute runs all nodes in parallel and collects results func (pn *ParallelNode[S]) Execute(ctx context.Context, state S) ([]S, error) { // Create channels for results and errors type result struct { index int value S err error } results := make(chan result, len(pn.nodes)) var wg sync.WaitGroup // Execute all nodes in parallel for i, node := range pn.nodes { wg.Add(1) go func(idx int, n TypedNode[S]) { defer wg.Done() // Execute with panic recovery defer func() { if r := recover(); r != nil { results <- result{ index: idx, err: fmt.Errorf("panic in parallel node %s[%d]: %v", pn.name, idx, r), } } }() value, err := n.Function(ctx, state) results <- result{ index: idx, value: value, err: err, } }(i, node) } // Wait for all nodes to complete go func() { wg.Wait() close(results) }() // Collect results outputs := make([]S, len(pn.nodes)) var firstError error for res := range results { if res.err != nil && firstError == nil { firstError = res.err } outputs[res.index] = res.value } if firstError != nil { return nil, fmt.Errorf("parallel execution failed: %w", firstError) } // Return collected results return outputs, nil } // AddParallelNodes adds a set of nodes that execute in parallel. // merger is used to combine the results from parallel execution into a single state S. func (g *StateGraph[S]) AddParallelNodes( groupName string, nodes map[string]func(context.Context, S) (S, error), merger func([]S) S, ) { // Create parallel node group parallelNodes := make([]TypedNode[S], 0, len(nodes)) for name, fn := range nodes { parallelNodes = append(parallelNodes, TypedNode[S]{ Name: name, Function: fn, }) } // Add as a single parallel node parallelNode := NewParallelNode(groupName, parallelNodes...) // Wrap with merger g.AddNode(groupName, "Parallel execution group: "+groupName, func(ctx context.Context, state S) (S, error) { results, err := parallelNode.Execute(ctx, state) if err != nil { var zero S return zero, err } return merger(results), nil }) } // MapReduceNode executes nodes in parallel and reduces results type MapReduceNode[S any] struct { name string mapNodes []TypedNode[S] reducer func([]S) (S, error) } // NewMapReduceNode creates a new map-reduce node func NewMapReduceNode[S any](name string, reducer func([]S) (S, error), mapNodes ...TypedNode[S]) *MapReduceNode[S] { return &MapReduceNode[S]{ name: name, mapNodes: mapNodes, reducer: reducer, } } // Execute runs map nodes in parallel and reduces results func (mr *MapReduceNode[S]) Execute(ctx context.Context, state S) (S, error) { // Execute map phase in parallel pn := NewParallelNode(mr.name+"_map", mr.mapNodes...) results, err := pn.Execute(ctx, state) if err != nil { var zero S return zero, fmt.Errorf("map phase failed: %w", err) } // Execute reduce phase if mr.reducer != nil { return mr.reducer(results) } // If no reducer, return zero state (or we should enforce reducer?) // In the generic version, we can't return []S as S unless S is []S. // So we assume reducer is provided or S can hold the results. // But without reducer, we don't know how to combine. // For now, return zero if no reducer. var zero S return zero, nil } // AddMapReduceNode adds a map-reduce pattern node func (g *StateGraph[S]) AddMapReduceNode( name string, mapFunctions map[string]func(context.Context, S) (S, error), reducer func([]S) (S, error), ) { // Create map nodes mapNodes := make([]TypedNode[S], 0, len(mapFunctions)) for nodeName, fn := range mapFunctions { mapNodes = append(mapNodes, TypedNode[S]{ Name: nodeName, Function: fn, }) } // Create and add map-reduce node mrNode := NewMapReduceNode(name, reducer, mapNodes...) g.AddNode(name, "Map-reduce node: "+name, mrNode.Execute) } // FanOutFanIn creates a fan-out/fan-in pattern. // aggregator merges worker results into a state S that is passed to the collector. func (g *StateGraph[S]) FanOutFanIn( source string, _ []string, // workers parameter kept for API compatibility collector string, workerFuncs map[string]func(context.Context, S) (S, error), aggregator func([]S) S, collectFunc func(S) (S, error), ) { // Add parallel worker nodes g.AddParallelNodes(source+"_workers", workerFuncs, aggregator) // Add collector node g.AddNode(collector, "Collector node: "+collector, func(ctx context.Context, state S) (S, error) { return collectFunc(state) }) // Connect source to workers and workers to collector g.AddEdge(source, source+"_workers") g.AddEdge(source+"_workers", collector) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/config_test.go
graph/config_test.go
package graph import ( "context" "testing" "github.com/stretchr/testify/assert" ) func TestRuntimeConfiguration(t *testing.T) { g := NewStateGraph[map[string]any]() // Define a node that reads config from context g.AddNode("reader", "reader", func(ctx context.Context, state map[string]any) (map[string]any, error) { config := GetConfig(ctx) if config == nil { return map[string]any{"result": "no config"}, nil } if val, ok := config.Configurable["model"]; ok { return map[string]any{"result": val}, nil } return map[string]any{"result": "key not found"}, nil }) g.SetEntryPoint("reader") g.AddEdge("reader", END) runnable, err := g.Compile() assert.NoError(t, err) // Test with config config := &Config{ Configurable: map[string]any{ "model": "gpt-4", }, } result, err := runnable.InvokeWithConfig(context.Background(), nil, config) assert.NoError(t, err) assert.Equal(t, "gpt-4", result["result"]) // Test without config result, err = runnable.Invoke(context.Background(), nil) assert.NoError(t, err) assert.Equal(t, "no config", result["result"]) } func TestStateGraph_RuntimeConfiguration(t *testing.T) { g := NewStateGraph[map[string]any]() g.AddNode("reader", "reader", func(ctx context.Context, state map[string]any) (map[string]any, error) { config := GetConfig(ctx) if config == nil { return map[string]any{"result": "no config"}, nil } if val, ok := config.Configurable["api_key"]; ok { return map[string]any{"result": val}, nil } return map[string]any{"result": "key not found"}, nil }) g.SetEntryPoint("reader") g.AddEdge("reader", END) runnable, err := g.Compile() assert.NoError(t, err) config := &Config{ Configurable: map[string]any{ "api_key": "secret-123", }, } result, err := runnable.InvokeWithConfig(context.Background(), nil, config) assert.NoError(t, err) assert.Equal(t, "secret-123", result["result"]) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/state_graph_generic_test.go
graph/state_graph_generic_test.go
package graph import ( "context" "errors" "testing" "time" ) // TestState is a simple test state type TestState struct { Count int `json:"count"` Name string `json:"name"` } // mapSchemaAdapter adapts MapSchema to StateSchema[any] type mapSchemaAdapter struct { *MapSchema } func (m *mapSchemaAdapter) Init() any { return m.MapSchema.Init() } func (m *mapSchemaAdapter) Update(current, new any) (any, error) { currentMap, ok1 := current.(map[string]any) newMap, ok2 := new.(map[string]any) if !ok1 || !ok2 { return current, nil } return m.MapSchema.Update(currentMap, newMap) } func TestStateGraph_BasicFunctionality(t *testing.T) { // Create a new typed state graph g := NewStateGraph[TestState]() // Add nodes g.AddNode("increment", "Increment counter", func(ctx context.Context, state TestState) (TestState, error) { state.Count++ return state, nil }) g.AddNode("check", "Check count", func(ctx context.Context, state TestState) (TestState, error) { if state.Name == "" { state.Name = "test" } return state, nil }) // Set up graph structure g.SetEntryPoint("increment") g.AddEdge("increment", "check") g.AddEdge("check", END) // Compile runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile graph: %v", err) } // Test invocation initialState := TestState{Count: 0} finalState, err := runnable.Invoke(context.Background(), initialState) if err != nil { t.Fatalf("Failed to invoke graph: %v", err) } // Verify results if finalState.Count != 1 { t.Errorf("Expected count to be 1, got %d", finalState.Count) } if finalState.Name != "test" { t.Errorf("Expected name to be 'test', got '%s'", finalState.Name) } } func TestStateGraph_ConditionalEdges(t *testing.T) { g := NewStateGraph[TestState]() g.AddNode("process", "Process", func(ctx context.Context, state TestState) (TestState, error) { state.Count++ return state, nil }) g.AddNode("high", "High count", func(ctx context.Context, state TestState) (TestState, error) { state.Name = "high" return state, nil }) g.AddNode("low", "Low count", func(ctx context.Context, state TestState) (TestState, error) { state.Name = "low" return state, nil }) g.SetEntryPoint("process") g.AddConditionalEdge("process", func(ctx context.Context, state TestState) string { if state.Count > 5 { return "high" } return "low" }) g.AddEdge("high", END) g.AddEdge("low", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile graph: %v", err) } // Test with initial count of 4 (after increment becomes 5) state, err := runnable.Invoke(context.Background(), TestState{Count: 4}) if err != nil { t.Fatalf("Failed to invoke graph: %v", err) } if state.Name != "low" { t.Errorf("Expected name to be 'low', got '%s'", state.Name) } // Test with initial count of 5 (after increment becomes 6) state, err = runnable.Invoke(context.Background(), TestState{Count: 5}) if err != nil { t.Fatalf("Failed to invoke graph: %v", err) } if state.Name != "high" { t.Errorf("Expected name to be 'high', got '%s'", state.Name) } } func TestStateGraph_WithSchema(t *testing.T) { g := NewStateGraph[TestState]() // Define schema with merge function schema := NewStructSchema( TestState{Name: "default"}, func(current, new TestState) (TestState, error) { // Preserve name from current, take count from new if new.Name != "" { current.Name = new.Name } if new.Count != 0 { current.Count = new.Count } return current, nil }, ) g.SetSchema(schema) g.AddNode("update", "Update", func(ctx context.Context, state TestState) (TestState, error) { return TestState{Count: state.Count + 1}, nil }) g.SetEntryPoint("update") g.AddEdge("update", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile graph: %v", err) } // Test with schema state, err := runnable.Invoke(context.Background(), TestState{Count: 5}) if err != nil { t.Fatalf("Failed to invoke graph: %v", err) } // Schema should preserve the default name if state.Name != "default" { t.Errorf("Expected name to be 'default', got '%s'", state.Name) } if state.Count != 6 { t.Errorf("Expected count to be 6, got %d", state.Count) } } func TestListenableStateGraph_BasicFunctionality(t *testing.T) { g := NewListenableStateGraph[TestState]() // Add a listener to track events var events []string listener := NodeListenerFunc[TestState]( func(ctx context.Context, event NodeEvent, nodeName string, state TestState, err error) { events = append(events, string(event)+":"+nodeName) }, ) // Add node with listener node := g.AddNode("test", "Test node", func(ctx context.Context, state TestState) (TestState, error) { state.Count++ return state, nil }) node.AddListener(listener) g.SetEntryPoint("test") g.AddEdge("test", END) // Compile and run runnable, err := g.CompileListenable() if err != nil { t.Fatalf("Failed to compile listenable graph: %v", err) } state, err := runnable.Invoke(context.Background(), TestState{}) if err != nil { t.Fatalf("Failed to invoke listenable graph: %v", err) } // Check that events were captured if len(events) == 0 { t.Error("No events were captured") } // Verify state if state.Count != 1 { t.Errorf("Expected count to be 1, got %d", state.Count) } } func TestStateGraph_ParallelExecution(t *testing.T) { g := NewStateGraph[TestState]() // Add multiple nodes that can run in parallel g.AddNode("node1", "Node 1", func(ctx context.Context, state TestState) (TestState, error) { time.Sleep(10 * time.Millisecond) // Simulate work state.Count += 1 return state, nil }) g.AddNode("node2", "Node 2", func(ctx context.Context, state TestState) (TestState, error) { time.Sleep(10 * time.Millisecond) // Simulate work state.Count += 2 return state, nil }) g.SetEntryPoint("node1") g.AddEdge("node1", "node2") g.AddEdge("node2", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile graph: %v", err) } start := time.Now() state, err := runnable.Invoke(context.Background(), TestState{}) duration := time.Since(start) if err != nil { t.Fatalf("Failed to invoke graph: %v", err) } // Verify the nodes were executed if state.Count != 3 { t.Errorf("Expected count to be 3, got %d", state.Count) } // Verify execution time (should be less than if executed serially) if duration > 50*time.Millisecond { t.Errorf("Execution took too long: %v", duration) } } func BenchmarkStateGraph_Invoke(b *testing.B) { g := NewStateGraph[TestState]() g.AddNode("increment", "Increment", func(ctx context.Context, state TestState) (TestState, error) { state.Count++ return state, nil }) g.SetEntryPoint("increment") g.AddEdge("increment", END) runnable, err := g.Compile() if err != nil { b.Fatalf("Failed to compile graph: %v", err) } ctx := context.Background() initialState := TestState{Count: 0} for b.Loop() { _, err := runnable.Invoke(ctx, initialState) if err != nil { b.Fatalf("Failed to invoke graph: %v", err) } } } func BenchmarkListenableStateGraph_Invoke(b *testing.B) { g := NewListenableStateGraph[TestState]() g.AddNode("increment", "Increment", func(ctx context.Context, state TestState) (TestState, error) { state.Count++ return state, nil }) g.SetEntryPoint("increment") g.AddEdge("increment", END) runnable, err := g.CompileListenable() if err != nil { b.Fatalf("Failed to compile listenable graph: %v", err) } ctx := context.Background() initialState := TestState{Count: 0} for b.Loop() { _, err := runnable.Invoke(ctx, initialState) if err != nil { b.Fatalf("Failed to invoke listenable graph: %v", err) } } } // Test StateGraph methods directly func TestStateGraph_AdditionalMethods(t *testing.T) { g := NewStateGraph[TestState]() // Test SetRetryPolicy policy := &RetryPolicy{ MaxRetries: 3, } g.SetRetryPolicy(policy) if g.retryPolicy != policy { t.Error("SetRetryPolicy should set the retryPolicy field") } // Test SetStateMerger merger := func(ctx context.Context, current TestState, newStates []TestState) (TestState, error) { for _, ns := range newStates { current.Count += ns.Count } return current, nil } g.SetStateMerger(merger) if g.stateMerger == nil { t.Error("SetStateMerger should set the stateMerger field") } } // Test StateRunnable methods func TestStateRunnable_SetTracer(t *testing.T) { g := NewStateGraph[TestState]() g.AddNode("test", "Test node", func(ctx context.Context, state TestState) (TestState, error) { return state, nil }) g.SetEntryPoint("test") g.AddEdge("test", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } tracer := &Tracer{} runnable.SetTracer(tracer) if runnable.tracer != tracer { t.Error("SetTracer should set the tracer field") } } func TestStateRunnable_WithTracer(t *testing.T) { g := NewStateGraph[TestState]() g.AddNode("test", "Test node", func(ctx context.Context, state TestState) (TestState, error) { return state, nil }) g.SetEntryPoint("test") g.AddEdge("test", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } tracer := &Tracer{} newRunnable := runnable.WithTracer(tracer) if newRunnable == runnable { t.Error("WithTracer should return a new instance") } if newRunnable.graph != runnable.graph { t.Error("New runnable should have the same graph") } } // Test edge cases func TestStateGraph_MultipleEdgesFromNode(t *testing.T) { g := NewStateGraph[TestState]() g.AddNode("source", "Source node", func(ctx context.Context, state TestState) (TestState, error) { return state, nil }) g.AddNode("target1", "Target 1", func(ctx context.Context, state TestState) (TestState, error) { state.Count = 1 return state, nil }) g.AddNode("target2", "Target 2", func(ctx context.Context, state TestState) (TestState, error) { state.Count = 2 return state, nil }) // Add multiple edges from source (fan-out) g.AddEdge("source", "target1") g.AddEdge("source", "target2") // Add edges from targets to END g.AddEdge("target1", END) g.AddEdge("target2", END) g.SetEntryPoint("source") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // This should work - multiple targets should be executed in parallel ctx := context.Background() result, err := runnable.Invoke(ctx, TestState{}) if err != nil { t.Errorf("Should not error with fan-out: %v", err) } // Result should be from one of the targets (parallel execution may return either) if result.Count != 1 && result.Count != 2 { t.Errorf("Expected count to be 1 or 2, got %d", result.Count) } } func TestStateGraph_ComplexStateType(t *testing.T) { // Test with complex nested state type ComplexState struct { Info struct { Name string Version int } Data map[string]any Items []struct { ID int Tags []string } Processed bool } g := NewStateGraph[ComplexState]() g.AddNode("process", "Process complex state", func(ctx context.Context, state ComplexState) (ComplexState, error) { state.Info.Name = "processed" state.Processed = true return state, nil }) g.SetEntryPoint("process") g.AddEdge("process", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } initialState := ComplexState{ Info: struct { Name string Version int }{ Name: "initial", Version: 1, }, } result, err := runnable.Invoke(context.Background(), initialState) if err != nil { t.Fatalf("Failed to invoke: %v", err) } if !result.Processed { t.Error("State should be marked as processed") } if result.Info.Name != "processed" { t.Errorf("Expected name to be 'processed', got '%s'", result.Info.Name) } } func TestStateGraph_MapState(t *testing.T) { // Test with map state g := NewStateGraph[map[string]any]() g.AddNode("process", "Process map", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["count"] = state["count"].(int) + 1 state["processed"] = true return state, nil }) g.SetEntryPoint("process") g.AddEdge("process", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } initialState := map[string]any{ "count": 0, "name": "test", } result, err := runnable.Invoke(context.Background(), initialState) if err != nil { t.Fatalf("Failed to invoke: %v", err) } if result["count"].(int) != 1 { t.Errorf("Expected count to be 1, got %v", result["count"]) } if !result["processed"].(bool) { t.Error("Should be marked as processed") } if result["name"].(string) != "test" { t.Errorf("Expected name to be 'test', got %v", result["name"]) } } func TestStateGraph_StringState(t *testing.T) { // Test with simple string state g := NewStateGraph[string]() g.AddNode("process", "Process string", func(ctx context.Context, state string) (string, error) { return state + "_processed", nil }) g.SetEntryPoint("process") g.AddEdge("process", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } result, err := runnable.Invoke(context.Background(), "initial") if err != nil { t.Fatalf("Failed to invoke: %v", err) } if result != "initial_processed" { t.Errorf("Expected 'initial_processed', got '%s'", result) } } // Test helper functions func TestStateRunnable_HelperFunctions(t *testing.T) { g := NewStateGraph[TestState]() g.AddNode("test", "Test", func(ctx context.Context, state TestState) (TestState, error) { return state, nil }) g.SetEntryPoint("test") g.AddEdge("test", END) // Set retry policy to enable retry logic g.SetRetryPolicy(&RetryPolicy{ MaxRetries: 3, BackoffStrategy: ExponentialBackoff, RetryableErrors: []string{"test error", "context canceled", "deadline exceeded"}, }) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // Note: isRetryableError doesn't handle nil errors properly, so we skip testing that case // This is a known issue in the implementation // Test isRetryableError with actual errors errTests := []struct { name string err error expected bool }{ {"context canceled", context.Canceled, true}, {"context deadline exceeded", context.DeadlineExceeded, true}, {"retryable error", errors.New("test error"), true}, {"non-retryable error", errors.New("different error"), false}, } for _, tt := range errTests { t.Run(tt.name, func(t *testing.T) { result := runnable.isRetryableError(tt.err) if result != tt.expected { t.Errorf("Expected %v, got %v", tt.expected, result) } }) } // Test calculateBackoffDelay - uses exponential backoff delayTests := []struct { name string attempt int minDelay int // Minimum expected in ms maxDelay int // Maximum expected in ms }{ {"first attempt", 0, 1000, 1000}, {"second attempt", 1, 2000, 2000}, {"third attempt", 2, 4000, 4000}, {"fourth attempt", 3, 8000, 8000}, {"attempt 5", 5, 32000, 32000}, // 1<<5 = 32 seconds } for _, tt := range delayTests { t.Run(tt.name, func(t *testing.T) { delay := runnable.calculateBackoffDelay(tt.attempt) expectedMin := time.Duration(tt.minDelay) * time.Millisecond expectedMax := time.Duration(tt.maxDelay) * time.Millisecond if delay < expectedMin || delay > expectedMax { t.Errorf("Expected delay between %v and %v, got %v", expectedMin, expectedMax, delay) } }) } } func TestInvokeWithConfig_WithTags(t *testing.T) { g := NewStateGraph[TestState]() g.AddNode("process", "Process", func(ctx context.Context, state TestState) (TestState, error) { state.Count++ return state, nil }) g.SetEntryPoint("process") g.AddEdge("process", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } ctx := context.Background() config := &Config{ Tags: []string{"test", "parallel"}, Configurable: map[string]any{"limit": 10}, } result, err := runnable.InvokeWithConfig(ctx, TestState{}, config) if err != nil { t.Fatalf("Failed to invoke with config: %v", err) } if result.Count != 1 { t.Errorf("Expected count to be 1, got %d", result.Count) } } func TestExecuteNodesParallel_ErrorHandling(t *testing.T) { g := NewStateGraph[TestState]() // Add nodes g.AddNode("error", "Error node", func(ctx context.Context, state TestState) (TestState, error) { return state, errors.New("test error") }) g.AddNode("success", "Success node", func(ctx context.Context, state TestState) (TestState, error) { state.Count = 1 return state, nil }) g.SetEntryPoint("error") g.AddEdge("error", "success") g.AddEdge("success", END) // This tests the parallel execution path through compilation runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // The error should be propagated ctx := context.Background() _, err = runnable.Invoke(ctx, TestState{}) if err == nil { t.Error("Expected error from execution") } } func TestExecuteNodeWithRetry_RetryPolicy(t *testing.T) { g := NewStateGraph[TestState]() attempt := 0 g.AddNode("retry", "Retry node", func(ctx context.Context, state TestState) (TestState, error) { attempt++ if attempt < 3 { return state, errors.New("temporary error") } state.Count = attempt return state, nil }) // Set entry point g.SetEntryPoint("retry") g.AddEdge("retry", END) // Set retry policy g.SetRetryPolicy(&RetryPolicy{ MaxRetries: 3, BackoffStrategy: ExponentialBackoff, RetryableErrors: []string{"temporary error"}, }) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } ctx := context.Background() result, err := runnable.Invoke(ctx, TestState{}) if err != nil { t.Errorf("Should not error after retries: %v", err) } if result.Count != 3 { t.Errorf("Expected 3 attempts, got %d", result.Count) } } func TestStateGraph_CommandGoto(t *testing.T) { // Use any type to allow returning Command g := NewStateGraph[any]() // Create a typed schema adapter for map[string]any mapSchema := NewMapSchema() mapSchema.RegisterReducer("count", func(curr, new any) (any, error) { if curr == nil { return new, nil } return curr.(int) + new.(int), nil }) // Wrap in a typed schema schema := &mapSchemaAdapter{mapSchema} g.SetSchema(schema) // Node A: Returns Command to update count and go to C (skipping B) g.AddNode("A", "Node A", func(ctx context.Context, state any) (any, error) { return &Command{ Update: map[string]any{"count": 1}, Goto: "C", }, nil }) // Node B: Should be skipped g.AddNode("B", "Node B", func(ctx context.Context, state any) (any, error) { return map[string]any{"count": 10}, nil }) // Node C: Final node g.AddNode("C", "Node C", func(ctx context.Context, state any) (any, error) { return map[string]any{"count": 100}, nil }) g.SetEntryPoint("A") g.AddEdge("A", "B") // Static edge A -> B (should be overridden by Command.Goto) g.AddEdge("B", "C") g.AddEdge("C", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } res, err := runnable.Invoke(context.Background(), map[string]any{"count": 0}) if err != nil { t.Fatalf("Failed to invoke: %v", err) } // Extract the final state mRes, ok := res.(map[string]any) if !ok { t.Fatalf("Expected result to be map[string]any, got %T", res) } // Expected: 0 + 1 (A) + 100 (C) = 101. B is skipped due to Command.Goto. if mRes["count"].(int) != 101 { t.Errorf("Expected count to be 101, got %v", mRes["count"]) } } func TestStateGraph_CommandGotoMultiple(t *testing.T) { // Use any type to allow returning Command g := NewStateGraph[any]() // Create a typed schema adapter for map[string]any mapSchema := NewMapSchema() mapSchema.RegisterReducer("count", func(curr, new any) (any, error) { if curr == nil { return new, nil } return curr.(int) + new.(int), nil }) // Wrap in a typed schema schema := &mapSchemaAdapter{mapSchema} g.SetSchema(schema) // Node A: Returns Command with multiple Goto targets g.AddNode("A", "Node A", func(ctx context.Context, state any) (any, error) { return &Command{ Update: map[string]any{"count": 1}, Goto: []string{"B", "C"}, }, nil }) // Node B: Adds 10 g.AddNode("B", "Node B", func(ctx context.Context, state any) (any, error) { return map[string]any{"count": 10}, nil }) // Node C: Adds 100 g.AddNode("C", "Node C", func(ctx context.Context, state any) (any, error) { return map[string]any{"count": 100}, nil }) g.SetEntryPoint("A") g.AddEdge("B", END) g.AddEdge("C", END) runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } res, err := runnable.Invoke(context.Background(), map[string]any{"count": 0}) if err != nil { t.Fatalf("Failed to invoke: %v", err) } // Extract the final state mRes, ok := res.(map[string]any) if !ok { t.Fatalf("Expected result to be map[string]any, got %T", res) } // Expected: 0 + 1 (A) + 10 (B) + 100 (C) = 111 // Both B and C should execute in parallel if mRes["count"].(int) != 111 { t.Errorf("Expected count to be 111, got %v", mRes["count"]) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/conditional_edges_test.go
graph/conditional_edges_test.go
package graph_test import ( "context" "strings" "testing" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" ) //nolint:gocognit,dupl,cyclop // This is a comprehensive test that needs to check multiple scenarios with similar setup func TestConditionalEdges(t *testing.T) { t.Parallel() tests := []struct { name string buildGraph func() *graph.StateGraph[map[string]any] initialState any expectedResult any expectError bool }{ { name: "Simple conditional routing based on content", buildGraph: func() *graph.StateGraph[map[string]any] { g := graph.NewStateGraph[map[string]any]() // Add nodes g.AddNode("start", "start", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddNode("calculator", "calculator", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) state["messages"] = append(messages, llms.TextParts("ai", "Calculating: 2+2=4")) return state, nil }) g.AddNode("general", "general", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) state["messages"] = append(messages, llms.TextParts("ai", "General response")) return state, nil }) // Add conditional edge from start g.AddConditionalEdge("start", func(ctx context.Context, state map[string]any) string { messages := state["messages"].([]llms.MessageContent) if len(messages) > 0 { lastMessage := messages[len(messages)-1] if content, ok := lastMessage.Parts[0].(llms.TextContent); ok { if strings.Contains(content.Text, "calculate") || strings.Contains(content.Text, "math") { return "calculator" } } } return "general" }) // Add regular edges to END g.AddEdge("calculator", graph.END) g.AddEdge("general", graph.END) g.SetEntryPoint("start") return g }, initialState: map[string]any{"messages": []llms.MessageContent{ llms.TextParts("human", "I need to calculate something"), }}, expectedResult: map[string]any{"messages": []llms.MessageContent{ llms.TextParts("human", "I need to calculate something"), llms.TextParts("ai", "Calculating: 2+2=4"), }}, expectError: false, }, { name: "Conditional routing to general path", buildGraph: func() *graph.StateGraph[map[string]any] { g := graph.NewStateGraph[map[string]any]() g.AddNode("start", "start", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddNode("calculator", "calculator", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) state["messages"] = append(messages, llms.TextParts("ai", "Calculating: 2+2=4")) return state, nil }) g.AddNode("general", "general", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) state["messages"] = append(messages, llms.TextParts("ai", "General response")) return state, nil }) g.AddConditionalEdge("start", func(ctx context.Context, state map[string]any) string { messages := state["messages"].([]llms.MessageContent) if len(messages) > 0 { lastMessage := messages[len(messages)-1] if content, ok := lastMessage.Parts[0].(llms.TextContent); ok { if strings.Contains(content.Text, "calculate") || strings.Contains(content.Text, "math") { return "calculator" } } } return "general" }) g.AddEdge("calculator", graph.END) g.AddEdge("general", graph.END) g.SetEntryPoint("start") return g }, initialState: map[string]any{"messages": []llms.MessageContent{ llms.TextParts("human", "Tell me a story"), }}, expectedResult: map[string]any{"messages": []llms.MessageContent{ llms.TextParts("human", "Tell me a story"), llms.TextParts("ai", "General response"), }}, expectError: false, }, { name: "Multi-level conditional routing", buildGraph: func() *graph.StateGraph[map[string]any] { g := graph.NewStateGraph[map[string]any]() g.AddNode("router", "router", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddNode("urgent", "urgent", func(ctx context.Context, state map[string]any) (map[string]any, error) { s := state["message"].(string) state["message"] = s + " -> handled urgently" return state, nil }) g.AddNode("normal", "normal", func(ctx context.Context, state map[string]any) (map[string]any, error) { s := state["message"].(string) state["message"] = s + " -> handled normally" return state, nil }) g.AddNode("low", "low", func(ctx context.Context, state map[string]any) (map[string]any, error) { s := state["message"].(string) state["message"] = s + " -> handled with low priority" return state, nil }) // Conditional routing based on priority keywords g.AddConditionalEdge("router", func(ctx context.Context, state map[string]any) string { s := state["message"].(string) if strings.Contains(s, "URGENT") || strings.Contains(s, "ASAP") { return "urgent" } if strings.Contains(s, "NORMAL") || strings.Contains(s, "REGULAR") { return "normal" } return "low" }) g.AddEdge("urgent", graph.END) g.AddEdge("normal", graph.END) g.AddEdge("low", graph.END) g.SetEntryPoint("router") return g }, initialState: map[string]any{"message": "URGENT: Fix the bug"}, expectedResult: map[string]any{"message": "URGENT: Fix the bug -> handled urgently"}, expectError: false, }, { name: "Conditional edge to END", buildGraph: func() *graph.StateGraph[map[string]any] { g := graph.NewStateGraph[map[string]any]() g.AddNode("check", "check", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddNode("process", "process", func(ctx context.Context, state map[string]any) (map[string]any, error) { n := state["value"].(int) state["value"] = n * 2 return state, nil }) // Conditional edge that can go directly to END g.AddConditionalEdge("check", func(ctx context.Context, state map[string]any) string { n := state["value"].(int) if n < 0 { return graph.END } return "process" }) g.AddEdge("process", graph.END) g.SetEntryPoint("check") return g }, initialState: map[string]any{"value": -5}, expectedResult: map[string]any{"value": -5}, // Should go directly to END without processing expectError: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() g := tt.buildGraph() runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile graph: %v", err) } ctx := context.Background() result, err := runnable.Invoke(ctx, tt.initialState.(map[string]any)) if tt.expectError && err == nil { t.Error("Expected error but got none") } if !tt.expectError && err != nil { t.Errorf("Unexpected error: %v", err) } if !tt.expectError { // Check if the result has "messages" field for message-based tests if _, hasMessages := result["messages"]; hasMessages { resultMessages := result["messages"].([]llms.MessageContent) expectedMessages := tt.expectedResult.(map[string]any)["messages"].([]llms.MessageContent) if len(resultMessages) != len(expectedMessages) { t.Errorf("Expected %d messages, got %d", len(expectedMessages), len(resultMessages)) } else { for i := range resultMessages { if resultMessages[i].Role != expectedMessages[i].Role { t.Errorf("Message %d: expected role %s, got %s", i, expectedMessages[i].Role, resultMessages[i].Role) } expectedText := expectedMessages[i].Parts[0].(llms.TextContent).Text actualText := resultMessages[i].Parts[0].(llms.TextContent).Text if actualText != expectedText { t.Errorf("Message %d: expected text %q, got %q", i, expectedText, actualText) } } } } else { // For non-message based tests, just compare the entire result expected := tt.expectedResult.(map[string]any) for k, expectedVal := range expected { if result[k] != expectedVal { t.Errorf("Expected %v for key %s, got %v", expectedVal, k, result[k]) } } } } }) } } func TestConditionalEdges_ChainedConditions(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() // Create a chain of conditional decisions g.AddNode("start", "start", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddNode("step1", "step1", func(ctx context.Context, state map[string]any) (map[string]any, error) { n := state["value"].(int) state["value"] = n + 10 return state, nil }) g.AddNode("step2", "step2", func(ctx context.Context, state map[string]any) (map[string]any, error) { n := state["value"].(int) state["value"] = n * 2 return state, nil }) g.AddNode("step3", "step3", func(ctx context.Context, state map[string]any) (map[string]any, error) { n := state["value"].(int) state["value"] = n - 5 return state, nil }) // First conditional g.AddConditionalEdge("start", func(ctx context.Context, state map[string]any) string { n := state["value"].(int) if n > 0 { return "step1" } return "step2" }) // Second conditional g.AddConditionalEdge("step1", func(ctx context.Context, state map[string]any) string { n := state["value"].(int) if n > 15 { return "step3" } return graph.END }) g.AddEdge("step2", graph.END) g.AddEdge("step3", graph.END) g.SetEntryPoint("start") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile graph: %v", err) } // Test with positive number (should go: start -> step1 -> step3 -> END) ctx := context.Background() result, err := runnable.Invoke(ctx, map[string]any{"value": 10}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // 10 + 10 = 20 (step1), then 20 > 15 so go to step3, 20 - 5 = 15 if result["value"] != 15 { t.Errorf("Expected result 15, got %v", result) } // Test with negative number (should go: start -> step2 -> END) result, err = runnable.Invoke(ctx, map[string]any{"value": -5}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // -5 * 2 = -10 (step2) if result["value"] != -10 { t.Errorf("Expected result -10, got %v", result) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/command.go
graph/command.go
package graph // Command allows a node to dynamically update the state and control the flow. // It can be returned by a node function instead of a direct state update. type Command struct { // Update is the value to update the state with. // It will be processed by the schema's reducers. Update any // Goto specifies the next node(s) to execute. // If set, it overrides the graph's edges. // Can be a single string (node name) or []string. Goto any }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/command_test.go
graph/command_test.go
package graph import ( "context" "testing" "github.com/stretchr/testify/assert" ) // mapSchemaAdapterForAny adapts MapSchema for use with StateGraph[any] type mapSchemaAdapterForAny struct { *MapSchema } func (m *mapSchemaAdapterForAny) Init() any { return m.MapSchema.Init() } func (m *mapSchemaAdapterForAny) Update(current, new any) (any, error) { currentMap, ok1 := current.(map[string]any) newMap, ok2 := new.(map[string]any) if !ok1 || !ok2 { return current, nil // Or error? } return m.MapSchema.Update(currentMap, newMap) } func TestCommandGoto(t *testing.T) { // Use any type to allow returning Command g := NewStateGraph[any]() // Define schema schema := NewMapSchema() schema.RegisterReducer("count", func(curr, new any) (any, error) { if curr == nil { return new, nil } return curr.(int) + new.(int), nil }) g.SetSchema(&mapSchemaAdapterForAny{MapSchema: schema}) // Node A: Returns Command to update count and go to C (skipping B) g.AddNode("A", "A", func(ctx context.Context, state any) (any, error) { return &Command{ Update: map[string]any{"count": 1}, Goto: "C", }, nil }) // Node B: Should be skipped g.AddNode("B", "B", func(ctx context.Context, state any) (any, error) { return map[string]any{"count": 10}, nil }) // Node C: Final node g.AddNode("C", "C", func(ctx context.Context, state any) (any, error) { return map[string]any{"count": 100}, nil }) g.SetEntryPoint("A") g.AddEdge("A", "B") // Static edge A -> B g.AddEdge("B", "C") g.AddEdge("C", END) runnable, err := g.Compile() assert.NoError(t, err) res, err := runnable.Invoke(context.Background(), map[string]any{"count": 0}) assert.NoError(t, err) mRes, ok := res.(map[string]any) assert.True(t, ok) // Expected: 0 + 1 (A) + 100 (C) = 101. B is skipped. assert.Equal(t, 101, mRes["count"]) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/retry.go
graph/retry.go
package graph import ( "context" "fmt" "math" "math/rand" "time" ) // RetryConfig configures retry behavior for nodes type RetryConfig struct { MaxAttempts int InitialDelay time.Duration MaxDelay time.Duration BackoffFactor float64 RetryableErrors func(error) bool // Determines if an error should trigger retry } // DefaultRetryConfig returns a default retry configuration func DefaultRetryConfig() *RetryConfig { return &RetryConfig{ MaxAttempts: 3, InitialDelay: 100 * time.Millisecond, MaxDelay: 5 * time.Second, BackoffFactor: 2.0, RetryableErrors: func(_ error) bool { // By default, retry all errors return true }, } } // RetryNode wraps a node with retry logic type RetryNode[S any] struct { node TypedNode[S] config *RetryConfig } // NewRetryNode creates a new retry node func NewRetryNode[S any](node TypedNode[S], config *RetryConfig) *RetryNode[S] { if config == nil { config = DefaultRetryConfig() } return &RetryNode[S]{ node: node, config: config, } } // Execute runs the node with retry logic func (rn *RetryNode[S]) Execute(ctx context.Context, state S) (S, error) { var lastErr error var zero S delay := rn.config.InitialDelay for attempt := 1; attempt <= rn.config.MaxAttempts; attempt++ { // Check context cancellation select { case <-ctx.Done(): return zero, fmt.Errorf("retry cancelled: %w", ctx.Err()) default: } // Execute the node result, err := rn.node.Function(ctx, state) if err == nil { return result, nil } lastErr = err // Check if error is retryable if rn.config.RetryableErrors != nil && !rn.config.RetryableErrors(err) { return zero, fmt.Errorf("non-retryable error in %s: %w", rn.node.Name, err) } // Don't sleep after the last attempt if attempt < rn.config.MaxAttempts { // Sleep with exponential backoff select { case <-time.After(delay): // Calculate next delay with backoff delay = min(time.Duration(float64(delay)*rn.config.BackoffFactor), rn.config.MaxDelay) case <-ctx.Done(): return zero, fmt.Errorf("retry cancelled during backoff: %w", ctx.Err()) } } } return zero, fmt.Errorf("max retries (%d) exceeded for %s: %w", rn.config.MaxAttempts, rn.node.Name, lastErr) } // AddNodeWithRetry adds a node with retry logic func (g *StateGraph[S]) AddNodeWithRetry( name string, description string, fn func(context.Context, S) (S, error), config *RetryConfig, ) { node := TypedNode[S]{ Name: name, Description: description, Function: fn, } retryNode := NewRetryNode(node, config) g.AddNode(name, description, retryNode.Execute) } // TimeoutNode wraps a node with timeout logic type TimeoutNode[S any] struct { node TypedNode[S] timeout time.Duration } // NewTimeoutNode creates a new timeout node func NewTimeoutNode[S any](node TypedNode[S], timeout time.Duration) *TimeoutNode[S] { return &TimeoutNode[S]{ node: node, timeout: timeout, } } // Execute runs the node with timeout func (tn *TimeoutNode[S]) Execute(ctx context.Context, state S) (S, error) { // Create a timeout context timeoutCtx, cancel := context.WithTimeout(ctx, tn.timeout) defer cancel() // Channel for result type result struct { value S err error } resultChan := make(chan result, 1) // Execute in goroutine go func() { value, err := tn.node.Function(timeoutCtx, state) resultChan <- result{value: value, err: err} }() // Wait for result or timeout select { case res := <-resultChan: return res.value, res.err case <-timeoutCtx.Done(): var zero S return zero, fmt.Errorf("node %s timed out after %v", tn.node.Name, tn.timeout) } } // AddNodeWithTimeout adds a node with timeout func (g *StateGraph[S]) AddNodeWithTimeout( name string, description string, fn func(context.Context, S) (S, error), timeout time.Duration, ) { node := TypedNode[S]{ Name: name, Description: description, Function: fn, } timeoutNode := NewTimeoutNode(node, timeout) g.AddNode(name, description, timeoutNode.Execute) } // CircuitBreakerConfig configures circuit breaker behavior type CircuitBreakerConfig struct { FailureThreshold int // Number of failures before opening SuccessThreshold int // Number of successes before closing Timeout time.Duration // Time before attempting to close HalfOpenMaxCalls int // Max calls in half-open state } // CircuitBreakerState represents the state of a circuit breaker type CircuitBreakerState int const ( CircuitClosed CircuitBreakerState = iota CircuitOpen CircuitHalfOpen ) // CircuitBreaker implements the circuit breaker pattern type CircuitBreaker[S any] struct { node TypedNode[S] config CircuitBreakerConfig state CircuitBreakerState failures int successes int lastFailureTime time.Time halfOpenCalls int } // NewCircuitBreaker creates a new circuit breaker func NewCircuitBreaker[S any](node TypedNode[S], config CircuitBreakerConfig) *CircuitBreaker[S] { return &CircuitBreaker[S]{ node: node, config: config, state: CircuitClosed, } } // Execute runs the node with circuit breaker logic func (cb *CircuitBreaker[S]) Execute(ctx context.Context, state S) (S, error) { var zero S // Check circuit state switch cb.state { case CircuitClosed: // Circuit is closed, proceed normally case CircuitOpen: // Check if enough time has passed to try again if time.Since(cb.lastFailureTime) > cb.config.Timeout { cb.state = CircuitHalfOpen cb.halfOpenCalls = 0 } else { return zero, fmt.Errorf("circuit breaker open for %s", cb.node.Name) } case CircuitHalfOpen: // Check if we've made too many calls in half-open state if cb.halfOpenCalls >= cb.config.HalfOpenMaxCalls { cb.state = CircuitOpen return zero, fmt.Errorf("circuit breaker half-open limit reached for %s", cb.node.Name) } cb.halfOpenCalls++ } // Execute the node result, err := cb.node.Function(ctx, state) // Update circuit breaker state based on result if err != nil { cb.failures++ cb.successes = 0 cb.lastFailureTime = time.Now() if cb.failures >= cb.config.FailureThreshold { cb.state = CircuitOpen } return zero, fmt.Errorf("circuit breaker error in %s: %w", cb.node.Name, err) } // Success cb.successes++ cb.failures = 0 if cb.state == CircuitHalfOpen && cb.successes >= cb.config.SuccessThreshold { cb.state = CircuitClosed } return result, nil } // AddNodeWithCircuitBreaker adds a node with circuit breaker func (g *StateGraph[S]) AddNodeWithCircuitBreaker( name string, description string, fn func(context.Context, S) (S, error), config CircuitBreakerConfig, ) { node := TypedNode[S]{ Name: name, Description: description, Function: fn, } cb := NewCircuitBreaker(node, config) g.AddNode(name, description, cb.Execute) } // RateLimiter implements rate limiting for nodes type RateLimiter[S any] struct { node TypedNode[S] maxCalls int window time.Duration calls []time.Time } // NewRateLimiter creates a new rate limiter func NewRateLimiter[S any](node TypedNode[S], maxCalls int, window time.Duration) *RateLimiter[S] { return &RateLimiter[S]{ node: node, maxCalls: maxCalls, window: window, calls: make([]time.Time, 0, maxCalls), } } // Execute runs the node with rate limiting func (rl *RateLimiter[S]) Execute(ctx context.Context, state S) (S, error) { now := time.Now() // Remove old calls outside the window validCalls := make([]time.Time, 0, rl.maxCalls) for _, callTime := range rl.calls { if now.Sub(callTime) < rl.window { validCalls = append(validCalls, callTime) } } rl.calls = validCalls // Check if we're at the limit if len(rl.calls) >= rl.maxCalls { // Calculate when we can make the next call oldestCall := rl.calls[0] waitTime := rl.window - now.Sub(oldestCall) var zero S return zero, fmt.Errorf("rate limit exceeded for %s, retry after %v", rl.node.Name, waitTime) } // Record this call rl.calls = append(rl.calls, now) // Execute the node return rl.node.Function(ctx, state) } // AddNodeWithRateLimit adds a node with rate limiting func (g *StateGraph[S]) AddNodeWithRateLimit( name string, description string, fn func(context.Context, S) (S, error), maxCalls int, window time.Duration, ) { node := TypedNode[S]{ Name: name, Description: description, Function: fn, } rl := NewRateLimiter(node, maxCalls, window) g.AddNode(name, description, rl.Execute) } // ExponentialBackoffRetry implements exponential backoff with jitter func ExponentialBackoffRetry( ctx context.Context, fn func() (any, error), maxAttempts int, baseDelay time.Duration, ) (any, error) { for attempt := range maxAttempts { result, err := fn() if err == nil { return result, nil } if attempt == maxAttempts-1 { return nil, err } // Calculate delay with exponential backoff and jitter delay := baseDelay * time.Duration(math.Pow(2, float64(attempt))) // Add jitter (±25%) //nolint:gosec // Using weak RNG for jitter is acceptable, not security-critical jitter := time.Duration(float64(delay) * 0.25 * (2*rand.Float64() - 1)) delay += jitter select { case <-time.After(delay): // Continue to next attempt case <-ctx.Done(): return nil, ctx.Err() } } return nil, fmt.Errorf("max attempts reached") }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/state_graph_with_schema_test.go
graph/state_graph_with_schema_test.go
package graph import ( "context" "testing" ) func TestNewMessageGraph(t *testing.T) { g := NewMessageGraph() // Verify schema is initialized if g.Schema == nil { t.Fatal("Schema should be initialized") } // Verify messages reducer is registered // Schema is now MapSchema directly mapSchema, ok := g.Schema.(*MapSchema) if !ok { t.Fatal("Schema should be a MapSchema") } if mapSchema.Reducers == nil { t.Fatal("Reducers map should be initialized") } if _, exists := mapSchema.Reducers["messages"]; !exists { t.Fatal("messages reducer should be registered") } // Test that the schema works with AddMessages g.AddNode("node1", "Test node", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{ "messages": []map[string]any{ {"role": "assistant", "content": "Hello"}, }, }, nil }) g.AddEdge("node1", END) g.SetEntryPoint("node1") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // Execute with initial state initialState := map[string]any{ "messages": []map[string]any{ {"role": "user", "content": "Hi"}, }, } result, err := runnable.Invoke(context.Background(), initialState) if err != nil { t.Fatalf("Failed to invoke: %v", err) } // Verify messages were merged messages, ok := result["messages"].([]map[string]any) if !ok { t.Fatal("messages should be a slice") } if len(messages) != 2 { t.Fatalf("Expected 2 messages, got %d", len(messages)) } t.Log("NewStateGraphWithSchema test passed!") }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/parallel_execution_test.go
graph/parallel_execution_test.go
package graph import ( "context" "maps" "testing" "time" "github.com/stretchr/testify/assert" ) // appendVisitors is a helper function that appends a node name to the visited list in the state. func appendVisitors(state map[string]any, node string) []string { visited, ok := state["visited"].([]string) if !ok { visited = []string{} } return append(visited, node) } func TestParallelExecution_FanOut(t *testing.T) { g := NewStateGraph[map[string]any]() // Node A: Entry point g.AddNode("A", "A", func(ctx context.Context, state map[string]any) (map[string]any, error) { newState := make(map[string]any) maps.Copy(newState, state) visited := appendVisitors(newState, "A") newState["visited"] = visited return newState, nil }) // Node B: Branch 1 g.AddNode("B", "B", func(ctx context.Context, state map[string]any) (map[string]any, error) { newState := make(map[string]any) maps.Copy(newState, state) visited := appendVisitors(newState, "B") newState["visited"] = visited time.Sleep(10 * time.Millisecond) // Simulate work return newState, nil }) // Node C: Branch 2 g.AddNode("C", "C", func(ctx context.Context, state map[string]any) (map[string]any, error) { newState := make(map[string]any) maps.Copy(newState, state) visited := appendVisitors(newState, "C") newState["visited"] = visited time.Sleep(10 * time.Millisecond) // Simulate work return newState, nil }) // Node D: Join point g.AddNode("D", "D", func(ctx context.Context, state map[string]any) (map[string]any, error) { newState := make(map[string]any) maps.Copy(newState, state) visited := appendVisitors(newState, "D") newState["visited"] = visited return newState, nil }) g.SetEntryPoint("A") // Define Fan-Out: A -> B, A -> C g.AddEdge("A", "B") g.AddEdge("A", "C") // Define Fan-In: B -> D, C -> D g.AddEdge("B", "D") g.AddEdge("C", "D") g.AddEdge("D", END) // Set state merger for parallel execution g.SetStateMerger(func(ctx context.Context, current map[string]any, newStates []map[string]any) (map[string]any, error) { // Collect all visited nodes from all states visitedSet := make(map[string]bool) for _, s := range newStates { if v, ok := s["visited"].([]string); ok { for _, node := range v { visitedSet[node] = true } } } // Convert to sorted slice for deterministic output visited := make([]string, 0, len(visitedSet)) for node := range visitedSet { visited = append(visited, node) } result := make(map[string]any) maps.Copy(result, current) result["visited"] = visited return result, nil }) // Compile runnable, err := g.Compile() assert.NoError(t, err) // Execute initialState := map[string]any{ "visited": []string{}, } result, err := runnable.Invoke(context.Background(), initialState) assert.NoError(t, err) // Check results visited := result["visited"].([]string) assert.Contains(t, visited, "A") assert.Contains(t, visited, "D") // Both B and C should be visited hasB := false hasC := false for _, v := range visited { if v == "B" { hasB = true } if v == "C" { hasC = true } } assert.True(t, hasB, "Node B should be visited") assert.True(t, hasC, "Node C should be visited") }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/streaming.go
graph/streaming.go
package graph import ( "context" "sync" "time" ) // StreamMode defines the mode of streaming type StreamMode string const ( // StreamModeValues emits the full state after each step StreamModeValues StreamMode = "values" // StreamModeUpdates emits the updates (deltas) from each node StreamModeUpdates StreamMode = "updates" // StreamModeMessages emits LLM messages/tokens (if available) StreamModeMessages StreamMode = "messages" // StreamModeDebug emits all events (default) StreamModeDebug StreamMode = "debug" ) // StreamConfig configures streaming behavior type StreamConfig struct { // BufferSize is the size of the event channel buffer BufferSize int // EnableBackpressure determines if backpressure handling is enabled EnableBackpressure bool // MaxDroppedEvents is the maximum number of events to drop before logging MaxDroppedEvents int // Mode specifies what kind of events to stream Mode StreamMode } // DefaultStreamConfig returns the default streaming configuration func DefaultStreamConfig() StreamConfig { return StreamConfig{ BufferSize: 1000, EnableBackpressure: true, MaxDroppedEvents: 100, Mode: StreamModeDebug, } } // StreamResult contains the channels returned by streaming execution type StreamResult[S any] struct { // Events channel receives StreamEvent objects in real-time Events <-chan StreamEvent[S] // Result channel receives the final result when execution completes Result <-chan S // Errors channel receives any errors that occur during execution Errors <-chan error // Done channel is closed when streaming is complete Done <-chan struct{} // Cancel function can be called to stop streaming Cancel context.CancelFunc } // StreamingListener implements NodeListener for streaming events type StreamingListener[S any] struct { eventChan chan<- StreamEvent[S] config StreamConfig mutex sync.RWMutex droppedEvents int closed bool } // NewStreamingListener creates a new streaming listener func NewStreamingListener[S any](eventChan chan<- StreamEvent[S], config StreamConfig) *StreamingListener[S] { return &StreamingListener[S]{ eventChan: eventChan, config: config, } } // emitEvent sends an event to the channel handling backpressure func (sl *StreamingListener[S]) emitEvent(event StreamEvent[S]) { // Check if listener is closed sl.mutex.RLock() if sl.closed { sl.mutex.RUnlock() return } sl.mutex.RUnlock() // Filter based on Mode if !sl.shouldEmit(event) { return } // Try to send event without blocking select { case sl.eventChan <- event: // Event sent successfully default: // Channel is full if sl.config.EnableBackpressure { sl.handleBackpressure() } // Drop the event if backpressure handling is disabled or channel is still full } } func (sl *StreamingListener[S]) shouldEmit(event StreamEvent[S]) bool { switch sl.config.Mode { case StreamModeDebug: return true case StreamModeValues: // Only emit OnGraphStep events (which contain full state) // We expect a custom event type or we rely on node complete if it returns full state? // For now, emit everything that looks like a state update return event.Event == NodeEventComplete || event.Event == EventChainEnd case StreamModeUpdates: // Emit node outputs return event.Event == NodeEventComplete || event.Event == EventChainEnd case StreamModeMessages: // Emit LLM events - this is tricky because generic S doesn't imply LLM events // But if the event metadata says it's LLM... return event.Event == EventLLMEnd || event.Event == EventLLMStart default: return true } } // OnNodeEvent implements the NodeListener interface func (sl *StreamingListener[S]) OnNodeEvent(ctx context.Context, event NodeEvent, nodeName string, state S, err error) { streamEvent := StreamEvent[S]{ Timestamp: time.Now(), NodeName: nodeName, Event: event, State: state, Error: err, Metadata: make(map[string]any), } sl.emitEvent(streamEvent) } // Close marks the listener as closed to prevent sending to closed channels func (sl *StreamingListener[S]) Close() { sl.mutex.Lock() defer sl.mutex.Unlock() sl.closed = true } // handleBackpressure manages channel backpressure func (sl *StreamingListener[S]) handleBackpressure() { sl.mutex.Lock() defer sl.mutex.Unlock() sl.droppedEvents++ } // GetDroppedEventsCount returns the number of dropped events func (sl *StreamingListener[S]) GetDroppedEventsCount() int { sl.mutex.RLock() defer sl.mutex.RUnlock() return sl.droppedEvents } // StreamingRunnable wraps a ListenableRunnable with streaming capabilities type StreamingRunnable[S any] struct { runnable *ListenableRunnable[S] config StreamConfig } // NewStreamingRunnable creates a new streaming runnable func NewStreamingRunnable[S any](runnable *ListenableRunnable[S], config StreamConfig) *StreamingRunnable[S] { return &StreamingRunnable[S]{ runnable: runnable, config: config, } } // NewStreamingRunnableWithDefaults creates a streaming runnable with default config func NewStreamingRunnableWithDefaults[S any](runnable *ListenableRunnable[S]) *StreamingRunnable[S] { return NewStreamingRunnable(runnable, DefaultStreamConfig()) } // Stream executes the graph with real-time event streaming func (sr *StreamingRunnable[S]) Stream(ctx context.Context, initialState S) *StreamResult[S] { // Create channels eventChan := make(chan StreamEvent[S], sr.config.BufferSize) resultChan := make(chan S, 1) errorChan := make(chan error, 1) doneChan := make(chan struct{}) // Create cancellable context streamCtx, cancel := context.WithCancel(ctx) // Create streaming listener streamingListener := NewStreamingListener(eventChan, sr.config) // Add the streaming listener to all nodes // We add it globally using the graph sr.runnable.GetListenableGraph().AddGlobalListener(streamingListener) // Execute in goroutine go func() { defer func() { // First, close the streaming listener to prevent new events streamingListener.Close() // Clean up: remove listener sr.runnable.GetListenableGraph().RemoveGlobalListener(streamingListener) // Give a small delay for any in-flight listener calls to complete time.Sleep(10 * time.Millisecond) // Now safe to close channels close(eventChan) close(resultChan) close(errorChan) close(doneChan) }() // Execute the runnable result, err := sr.runnable.Invoke(streamCtx, initialState) // Send result or error if err != nil { select { case errorChan <- err: case <-streamCtx.Done(): } } else { select { case resultChan <- result: case <-streamCtx.Done(): } } }() return &StreamResult[S]{ Events: eventChan, Result: resultChan, Errors: errorChan, Done: doneChan, Cancel: cancel, } } // StreamingStateGraph[S any] extends ListenableStateGraph[S] with streaming capabilities type StreamingStateGraph[S any] struct { *ListenableStateGraph[S] config StreamConfig } // NewStreamingStateGraph creates a new streaming state graph with type parameter func NewStreamingStateGraph[S any]() *StreamingStateGraph[S] { baseGraph := NewListenableStateGraph[S]() return &StreamingStateGraph[S]{ ListenableStateGraph: baseGraph, config: DefaultStreamConfig(), } } // NewStreamingStateGraphWithConfig creates a streaming graph with custom config func NewStreamingStateGraphWithConfig[S any](config StreamConfig) *StreamingStateGraph[S] { baseGraph := NewListenableStateGraph[S]() return &StreamingStateGraph[S]{ ListenableStateGraph: baseGraph, config: config, } } // CompileStreaming compiles the graph into a streaming runnable func (g *StreamingStateGraph[S]) CompileStreaming() (*StreamingRunnable[S], error) { listenableRunnable, err := g.CompileListenable() if err != nil { return nil, err } return NewStreamingRunnable(listenableRunnable, g.config), nil } // SetStreamConfig updates the streaming configuration func (g *StreamingStateGraph[S]) SetStreamConfig(config StreamConfig) { g.config = config } // GetStreamConfig returns the current streaming configuration func (g *StreamingStateGraph[S]) GetStreamConfig() StreamConfig { return g.config } // StreamingExecutor[S] provides a high-level interface for streaming execution type StreamingExecutor[S any] struct { runnable *StreamingRunnable[S] } // NewStreamingExecutor creates a new streaming executor func NewStreamingExecutor[S any](runnable *StreamingRunnable[S]) *StreamingExecutor[S] { return &StreamingExecutor[S]{ runnable: runnable, } } // ExecuteWithCallback executes the graph and calls the callback for each event func (se *StreamingExecutor[S]) ExecuteWithCallback( ctx context.Context, initialState S, eventCallback func(event StreamEvent[S]), resultCallback func(result S, err error), ) error { streamResult := se.runnable.Stream(ctx, initialState) defer streamResult.Cancel() var finalResult S var finalError error resultReceived := false for { select { case event, ok := <-streamResult.Events: if !ok { // Events channel closed if resultReceived && resultCallback != nil { resultCallback(finalResult, finalError) } return finalError } if eventCallback != nil { eventCallback(event) } case result := <-streamResult.Result: finalResult = result resultReceived = true // Don't return immediately, wait for events channel to close case err := <-streamResult.Errors: finalError = err resultReceived = true // Don't return immediately, wait for events channel to close case <-streamResult.Done: if resultReceived && resultCallback != nil { resultCallback(finalResult, finalError) } return finalError case <-ctx.Done(): return ctx.Err() } } } // ExecuteAsync executes the graph asynchronously and returns immediately func (se *StreamingExecutor[S]) ExecuteAsync(ctx context.Context, initialState S) *StreamResult[S] { return se.runnable.Stream(ctx, initialState) } // GetGraph returns a Exporter for the streaming runnable func (sr *StreamingRunnable[S]) GetGraph() *Exporter[S] { return sr.runnable.GetGraph() } // GetTracer returns the tracer from the underlying runnable func (sr *StreamingRunnable[S]) GetTracer() *Tracer { return sr.runnable.GetTracer() } // SetTracer sets the tracer on the underlying runnable func (sr *StreamingRunnable[S]) SetTracer(tracer *Tracer) { sr.runnable.SetTracer(tracer) } // WithTracer returns a new StreamingRunnable with the given tracer func (sr *StreamingRunnable[S]) WithTracer(tracer *Tracer) *StreamingRunnable[S] { newRunnable := sr.runnable.WithTracer(tracer) return &StreamingRunnable[S]{ runnable: newRunnable, config: sr.config, } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/builtin_listeners_test.go
graph/builtin_listeners_test.go
package graph_test import ( "bytes" "context" "fmt" "log" "strings" "testing" "time" "github.com/smallnest/langgraphgo/graph" ) const ( step2Result = "step2_result" ) func TestProgressListener_OnNodeEvent(t *testing.T) { t.Parallel() var buf bytes.Buffer listener := graph.NewProgressListenerWithWriter(&buf). WithTiming(false). // Disable timing for predictable output WithPrefix("🔄") ctx := context.Background() // Test start event listener.OnNodeEvent(ctx, graph.NodeEventStart, "test_node", nil, nil) output := buf.String() if !strings.Contains(output, "🔄 Starting test_node") { t.Errorf("Expected start message, got: %s", output) } // Test complete event buf.Reset() listener.OnNodeEvent(ctx, graph.NodeEventComplete, "test_node", nil, nil) output = buf.String() if !strings.Contains(output, "✅ test_node completed") { t.Errorf("Expected complete message, got: %s", output) } // Test error event buf.Reset() listener.OnNodeEvent(ctx, graph.NodeEventError, "test_node", nil, fmt.Errorf("test error")) output = buf.String() if !strings.Contains(output, "❌ test_node failed: test error") { t.Errorf("Expected error message, got: %s", output) } } func TestProgressListener_CustomSteps(t *testing.T) { t.Parallel() var buf bytes.Buffer listener := graph.NewProgressListenerWithWriter(&buf). WithTiming(false) // Set custom step message listener.SetNodeStep("process", "Analyzing data") ctx := context.Background() listener.OnNodeEvent(ctx, graph.NodeEventStart, "process", nil, nil) output := buf.String() if !strings.Contains(output, "🔄 Analyzing data") { t.Errorf("Expected custom message, got: %s", output) } } func TestProgressListener_WithDetails(t *testing.T) { t.Parallel() var buf bytes.Buffer listener := graph.NewProgressListenerWithWriter(&buf). WithTiming(false). WithDetails(true) ctx := context.Background() state := map[string]any{"key": "value"} listener.OnNodeEvent(ctx, graph.NodeEventComplete, "test_node", state, nil) output := buf.String() if !strings.Contains(output, "State: map[key:value]") { t.Errorf("Expected state details, got: %s", output) } } func TestLoggingListener_OnNodeEvent(t *testing.T) { t.Parallel() var buf bytes.Buffer logger := log.New(&buf, "[TEST] ", 0) // No timestamp for predictable output listener := graph.NewLoggingListenerWithLogger(logger). WithLogLevel(graph.LogLevelDebug) ctx := context.Background() // Test different event types listener.OnNodeEvent(ctx, graph.NodeEventStart, "test_node", nil, nil) listener.OnNodeEvent(ctx, graph.NodeEventComplete, "test_node", nil, nil) listener.OnNodeEvent(ctx, graph.NodeEventError, "test_node", nil, fmt.Errorf("test error")) output := buf.String() if !strings.Contains(output, "[TEST] START test_node") { t.Errorf("Expected start log, got: %s", output) } if !strings.Contains(output, "[TEST] COMPLETE test_node") { t.Errorf("Expected complete log, got: %s", output) } if !strings.Contains(output, "[TEST] ERROR test_node: test error") { t.Errorf("Expected error log, got: %s", output) } } func TestLoggingListener_LogLevel(t *testing.T) { t.Parallel() var buf bytes.Buffer logger := log.New(&buf, "[TEST] ", 0) listener := graph.NewLoggingListenerWithLogger(logger). WithLogLevel(graph.LogLevelError) // Only error level and above ctx := context.Background() // These should be filtered out listener.OnNodeEvent(ctx, graph.NodeEventStart, "test_node", nil, nil) listener.OnNodeEvent(ctx, graph.NodeEventProgress, "test_node", nil, nil) // This should be logged listener.OnNodeEvent(ctx, graph.NodeEventError, "test_node", nil, fmt.Errorf("test error")) output := buf.String() if strings.Contains(output, "START") || strings.Contains(output, "PROGRESS") { t.Errorf("Expected debug/info messages to be filtered, got: %s", output) } if !strings.Contains(output, "ERROR test_node") { t.Errorf("Expected error message, got: %s", output) } } func TestLoggingListener_WithState(t *testing.T) { t.Parallel() var buf bytes.Buffer logger := log.New(&buf, "[TEST] ", 0) listener := graph.NewLoggingListenerWithLogger(logger). WithState(true) ctx := context.Background() state := map[string]any{"state": testState} listener.OnNodeEvent(ctx, graph.NodeEventComplete, "test_node", state, nil) output := buf.String() // State is now map[string]any{"state": "test_state"} if !strings.Contains(output, "State: map[state:test_state]") { t.Errorf("Expected state in log, got: %s", output) } } func TestMetricsListener_OnNodeEvent(t *testing.T) { t.Parallel() listener := graph.NewMetricsListener() ctx := context.Background() // Simulate node execution listener.OnNodeEvent(ctx, graph.NodeEventStart, "test_node", nil, nil) time.Sleep(1 * time.Millisecond) // Small delay to measure listener.OnNodeEvent(ctx, graph.NodeEventComplete, "test_node", nil, nil) // Check metrics executions := listener.GetNodeExecutions() if executions["test_node"] != 1 { t.Errorf("Expected 1 execution, got %d", executions["test_node"]) } avgDurations := listener.GetNodeAverageDuration() if _, exists := avgDurations["test_node"]; !exists { t.Error("Expected duration to be recorded") } if listener.GetTotalExecutions() != 1 { t.Errorf("Expected 1 total execution, got %d", listener.GetTotalExecutions()) } } func TestMetricsListener_ErrorTracking(t *testing.T) { t.Parallel() listener := graph.NewMetricsListener() ctx := context.Background() // Simulate node execution with error listener.OnNodeEvent(ctx, graph.NodeEventStart, "error_node", nil, nil) listener.OnNodeEvent(ctx, graph.NodeEventError, "error_node", nil, fmt.Errorf("test error")) // Check error metrics errors := listener.GetNodeErrors() if errors["error_node"] != 1 { t.Errorf("Expected 1 error, got %d", errors["error_node"]) } } func TestMetricsListener_PrintSummary(t *testing.T) { t.Parallel() listener := graph.NewMetricsListener() ctx := context.Background() // Generate some metrics listener.OnNodeEvent(ctx, graph.NodeEventStart, "node1", nil, nil) listener.OnNodeEvent(ctx, graph.NodeEventComplete, "node1", nil, nil) listener.OnNodeEvent(ctx, graph.NodeEventStart, "node2", nil, nil) listener.OnNodeEvent(ctx, graph.NodeEventError, "node2", nil, fmt.Errorf("error")) var buf bytes.Buffer listener.PrintSummary(&buf) output := buf.String() if !strings.Contains(output, "Node Execution Metrics") { t.Error("Expected metrics header") } if !strings.Contains(output, "Total Executions: 2") { t.Error("Expected total executions count") } if !strings.Contains(output, "node1: 1") { t.Error("Expected node1 execution count") } if !strings.Contains(output, "node2: 1 errors") { t.Error("Expected node2 error count") } } func TestMetricsListener_Reset(t *testing.T) { t.Parallel() listener := graph.NewMetricsListener() ctx := context.Background() // Generate some metrics listener.OnNodeEvent(ctx, graph.NodeEventStart, "test_node", nil, nil) listener.OnNodeEvent(ctx, graph.NodeEventComplete, "test_node", nil, nil) // Verify metrics exist if listener.GetTotalExecutions() != 1 { t.Error("Expected metrics to be recorded") } // Reset and verify metrics are cleared listener.Reset() if listener.GetTotalExecutions() != 0 { t.Error("Expected metrics to be reset") } executions := listener.GetNodeExecutions() if len(executions) != 0 { t.Error("Expected executions to be cleared") } } func TestChatListener_OnNodeEvent(t *testing.T) { t.Parallel() var buf bytes.Buffer listener := graph.NewChatListenerWithWriter(&buf). WithTime(false) // Disable time for predictable output ctx := context.Background() // Test start event listener.OnNodeEvent(ctx, graph.NodeEventStart, "process", nil, nil) output := buf.String() if !strings.Contains(output, "🤖 Starting process...") { t.Errorf("Expected start message, got: %s", output) } // Test complete event buf.Reset() listener.OnNodeEvent(ctx, graph.NodeEventComplete, "process", nil, nil) output = buf.String() if !strings.Contains(output, "✅ process finished") { t.Errorf("Expected complete message, got: %s", output) } } func TestChatListener_CustomMessages(t *testing.T) { t.Parallel() var buf bytes.Buffer listener := graph.NewChatListenerWithWriter(&buf). WithTime(false) // Set custom message listener.SetNodeMessage("analyze", "Analyzing your document") ctx := context.Background() listener.OnNodeEvent(ctx, graph.NodeEventStart, "analyze", nil, nil) output := buf.String() if !strings.Contains(output, "Analyzing your document") { t.Errorf("Expected custom message, got: %s", output) } } func TestChatListener_WithTime(t *testing.T) { t.Parallel() var buf bytes.Buffer listener := graph.NewChatListenerWithWriter(&buf). WithTime(true) ctx := context.Background() listener.OnNodeEvent(ctx, graph.NodeEventStart, "test", nil, nil) output := buf.String() // Should contain timestamp in format [HH:MM:SS] if !strings.Contains(output, "[") || !strings.Contains(output, "]") { t.Errorf("Expected timestamp in output, got: %s", output) } } // Integration test with actual graph execution func TestBuiltinListeners_Integration(t *testing.T) { t.Parallel() // Create graph g := graph.NewListenableStateGraph[map[string]any]() node1 := g.AddNode("step1", "step1", func(_ context.Context, state map[string]any) (map[string]any, error) { // Return updated state map return map[string]any{"state": "step1_result"}, nil }) node2 := g.AddNode("step2", "step2", func(_ context.Context, state map[string]any) (map[string]any, error) { // Return updated state map return map[string]any{"state": step2Result}, nil }) g.AddEdge("step1", "step2") g.AddEdge("step2", graph.END) g.SetEntryPoint("step1") // Add listeners var progressBuf, logBuf, chatBuf bytes.Buffer progressListener := graph.NewProgressListenerWithWriter(&progressBuf).WithTiming(false) logListener := graph.NewLoggingListenerWithLogger(log.New(&logBuf, "[GRAPH] ", 0)) chatListener := graph.NewChatListenerWithWriter(&chatBuf).WithTime(false) metricsListener := graph.NewMetricsListener() // Builtin listeners might be implementing NodeListener[any] (untyped)? // If Builtin listeners are not generic, we might need adapters. // But let's check builtin_listeners.go. // They implement NodeListener (which was untyped interface). // Now NodeListener is NodeListener[S]. // If builtin listeners implement OnNodeEvent(..., state any, ...), they only satisfy NodeListener[any]. // But our graph is NodeListener[map[string]any]. // So we can't directly add them if they don't implement the generic interface with map[string]any. // Unless I make builtin listeners generic or use an adapter. // // However, NodeListener[S] interface has OnNodeEvent(..., state S, ...). // If builtin listener has OnNodeEvent(..., state any, ...), it DOES NOT match NodeListener[S] in Go because methods must match exactly. // // So I probably broke builtin listeners. // I should check builtin_listeners.go. // If they use `any`, they are compatible with `NodeListener[any]`. // But I am using `ListenableStateGraph[map[string]any]`. // Its `AddListener` expects `NodeListener[map[string]any]`. // // I need to genericize builtin listeners OR create an adapter. // Given the scope, creating an adapter is easier. // But "unify to generics" suggests making them generic. // // Let's assume for now I will fix builtin_listeners.go later or they are already fixed (I didn't touch them). // I didn't touch `builtin_listeners.go`. // So they are broken. // // I will update the test to assume I will fix them. // OR I can use an adapter in the test. // // Actually, `NewProgressListenerWithWriter` returns `*ProgressListener`. // `ProgressListener` has `OnNodeEvent(..., state any, ...)`. // I need `OnNodeEvent(..., state map[string]any, ...)`. // // I will make `BuiltinListeners` generic-friendly by adding a method or type alias? // Or just make them implement `OnNodeEvent` with `any`. // But `ListenableNode[S]` calls `OnNodeEvent(..., state S, ...)`. // If `S` is `map[string]any`, it passes a map. // `ProgressListener` expects `any`. // `map` satisfies `any`. // But the INTERFACE `NodeListener[map[string]any]` requires method taking `map[string]any`. // `ProgressListener` method takes `any`. // Does `func(any)` satisfy `interface { func(map[string]any) }`? NO. // // So I MUST genericize builtin listeners or use adapters. // `ProgressListener` should probably be `ProgressListener[S any]`. // // I will check `builtin_listeners.go` in next step. // For now, I will comment out the listener addition in test or wrap them. // // Better: I will create a simple adapter in the test for now. // type anyAdapter struct { L graph.NodeListener[any] } // func (a anyAdapter) OnNodeEvent(..., state map[string]any, ...) { a.L.OnNodeEvent(..., state, ...) } // // Wait, `NodeListener` is generic now. `graph.NodeListener` refers to `graph.NodeListener[S]`. // `builtin_listeners.go` defines `ProgressListener`. It implements `OnNodeEvent`. // But `NodeListener` definition in `graph` package CHANGED. // So `builtin_listeners.go` might fail to compile if it refers to `NodeListener`. // `builtin_listeners.go` imports `graph`. // It likely uses `NodeListener` interface? // // Let's check `builtin_listeners.go`. // `type ProgressListener struct ...` // `func (l *ProgressListener) OnNodeEvent(...)` // It probably doesn't explicitly say "implements NodeListener". // But to be used as one, it must satisfy the interface. // // I will verify this in next steps. // For the test, I will update it to use the new graph API. // Adapter for builtin listeners. // Builtin listeners seems to have OnNodeEvent(..., map[string]any, ...). mapAdapter := func(l interface { OnNodeEvent(context.Context, graph.NodeEvent, string, map[string]any, error) }) graph.NodeListener[map[string]any] { return graph.NodeListenerFunc[map[string]any](func(ctx context.Context, e graph.NodeEvent, n string, s map[string]any, err error) { l.OnNodeEvent(ctx, e, n, s, err) }) } // BUT, if ProgressListener takes map[string]any, it ALREADY implements NodeListener[map[string]any]! // So I shouldn't need an adapter? // // Why did I think I needed an adapter? // Because originally it was untyped (any). // If it was untyped, it implemented NodeListener (untyped). // Now NodeListener is generic. // NodeListener[map[string]any] requires OnNodeEvent(..., map[string]any, ...). // // If ProgressListener has OnNodeEvent(..., any, ...), it does NOT implement NodeListener[map[string]any]. // The error message: // cannot use progressListener ... as interface{... any ...} value in argument to adapter: // *graph.ProgressListener does not implement interface{... any ...} (wrong type for method OnNodeEvent) // have OnNodeEvent(..., map[string]any, ...) // want OnNodeEvent(..., any, ...) // // This CONFIRMS ProgressListener has `map[string]any` in its signature. // How? I must have changed builtin_listeners.go? // Or maybe it was always map[string]any? // If so, I can just use it directly! // // Let's try adding directly first. If that fails, I'll know why. // But wait, if it has `map[string]any`, why did it work with `StateGraphUntyped` (which dealt with `any`)? // `StateGraphUntyped` listeners took `any`. // If ProgressListener took `map[string]any`, it wouldn't satisfy `NodeListener` (untyped). // // Unless... I updated `builtin_listeners.go` in a previous step without realizing? // I don't recall editing it. // // Maybe `NodeEvent` definition change affected it? No. // // Maybe I should just check `builtin_listeners.go` content. // But I am in the middle of `replace`. // // I will update the test to use an adapter that matches what the compiler says ProgressListener has. // If compiler says it has `map[string]any`, then my adapter should accept `map[string]any`. // And if it has `map[string]any`, it implements `NodeListener[map[string]any]`. // So I can cast/use directly? // // Let's try using `mapAdapter` defined above. node1.AddListener(mapAdapter(progressListener)) node1.AddListener(mapAdapter(logListener)) node1.AddListener(mapAdapter(chatListener)) node1.AddListener(mapAdapter(metricsListener)) node2.AddListener(mapAdapter(progressListener)) node2.AddListener(mapAdapter(logListener)) node2.AddListener(mapAdapter(chatListener)) node2.AddListener(mapAdapter(metricsListener)) // Execute graph - pass input as map to avoid wrapping runnable, err := g.CompileListenable() if err != nil { t.Fatalf("Failed to compile: %v", err) } ctx := context.Background() result, err := runnable.Invoke(ctx, map[string]any{"state": "input"}) if err != nil { t.Fatalf("Execution failed: %v", err) } // result is already map[string]any actualResult := result["state"] if actualResult != step2Result { t.Errorf("Expected 'step2_result', got %v", actualResult) } // Wait for async listeners time.Sleep(50 * time.Millisecond) // Check outputs progressOutput := progressBuf.String() if !strings.Contains(progressOutput, "Starting step1") { t.Errorf("Progress listener should show step1, got: %s", progressOutput) } logOutput := logBuf.String() if !strings.Contains(logOutput, "START step1") { t.Errorf("Log listener should show START step1, got: %s", logOutput) } chatOutput := chatBuf.String() if !strings.Contains(chatOutput, "🤖 Starting step1") { t.Errorf("Chat listener should show start message, got: %s", chatOutput) } // Check metrics executions := metricsListener.GetNodeExecutions() if executions["step1"] != 1 || executions["step2"] != 1 { t.Errorf("Expected 1 execution each, got: %v", executions) } if metricsListener.GetTotalExecutions() != 2 { t.Errorf("Expected 2 total executions, got %d", metricsListener.GetTotalExecutions()) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/state_graph_interrupt_test.go
graph/state_graph_interrupt_test.go
package graph import ( "context" "errors" "testing" ) func TestStateGraph_Interrupt(t *testing.T) { // Create a StateGraph g := NewStateGraph[map[string]any]() // Add node that uses Interrupt g.AddNode("node1", "Node with interrupt", func(ctx context.Context, state map[string]any) (map[string]any, error) { // Use the Interrupt function resumeValue, err := Interrupt(ctx, "waiting for input") if err != nil { return nil, err } // If we resumed, return the resume value if resumeValue != nil { return map[string]any{"value": resumeValue}, nil } return map[string]any{"value": "default"}, nil }) g.AddEdge("node1", END) g.SetEntryPoint("node1") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // First execution should interrupt _, err = runnable.Invoke(context.Background(), map[string]any{"initial": true}) // Verify we got an interrupt error var graphInterrupt *GraphInterrupt if err == nil { t.Fatal("Expected interrupt error, got nil") } // Check if it's a NodeInterrupt wrapped in error or GraphInterrupt var nodeInterrupt *NodeInterrupt if !errors.As(err, &nodeInterrupt) { // Try GraphInterrupt if !errors.As(err, &graphInterrupt) { t.Fatalf("Expected NodeInterrupt or GraphInterrupt error, got: %v", err) } } if graphInterrupt != nil { if graphInterrupt.InterruptValue != "waiting for input" { t.Errorf("Expected interrupt value 'waiting for input', got: %v", graphInterrupt.InterruptValue) } t.Logf("Successfully interrupted with GraphInterrupt, value: %v", graphInterrupt.InterruptValue) } else { if nodeInterrupt.Value != "waiting for input" { t.Errorf("Expected interrupt value 'waiting for input', got: %v", nodeInterrupt.Value) } t.Logf("Successfully interrupted with NodeInterrupt, value: %v", nodeInterrupt.Value) } t.Log("StateGraph Interrupt test passed!") } func TestStateGraph_InterruptWithStateUpdate(t *testing.T) { // This test verifies that state modifications made before calling Interrupt // are preserved in the GraphInterrupt.State g := NewStateGraph[map[string]any]() g.AddNode("payment_node", "Payment processing node", func(ctx context.Context, state map[string]any) (map[string]any, error) { // Simulate updating state before interrupting // e.g., setting status to "pending_payment" state["payment_status"] = "pending_payment" state["amount"] = 100 // Then interrupt to ask for user confirmation _, err := Interrupt(ctx, "Please confirm payment of $100") if err != nil { // When interrupting, return the updated state return state, err } // If resumed, mark as paid state["payment_status"] = "paid" return state, nil }) g.AddEdge("payment_node", END) g.SetEntryPoint("payment_node") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // First execution should interrupt initialState := map[string]any{"user_id": "123"} result, err := runnable.Invoke(context.Background(), initialState) // Verify we got an interrupt error var graphInterrupt *GraphInterrupt if err == nil { t.Fatal("Expected interrupt error, got nil") } if !errors.As(err, &graphInterrupt) { t.Fatalf("Expected GraphInterrupt error, got: %v", err) } // Verify the interrupt value if graphInterrupt.InterruptValue != "Please confirm payment of $100" { t.Errorf("Expected interrupt value 'Please confirm payment of $100', got: %v", graphInterrupt.InterruptValue) } // CRITICAL: Verify that state modifications are preserved interruptState, ok := graphInterrupt.State.(map[string]any) if !ok { t.Fatalf("Expected state to be map[string]any, got: %T", graphInterrupt.State) } // Check that the state updates made before Interrupt() are present if interruptState["payment_status"] != "pending_payment" { t.Errorf("Expected payment_status to be 'pending_payment', got: %v", interruptState["payment_status"]) } if interruptState["amount"] != 100 { t.Errorf("Expected amount to be 100, got: %v", interruptState["amount"]) } // Also check that result has the updated state if result["payment_status"] != "pending_payment" { t.Errorf("Result: Expected payment_status to be 'pending_payment', got: %v", result["payment_status"]) } if result["amount"] != 100 { t.Errorf("Result: Expected amount to be 100, got: %v", result["amount"]) } t.Log("StateGraph Interrupt with state update test passed!") } // PaymentState is a value type for testing type PaymentState struct { UserID string PaymentStatus string Amount int } func TestStateGraph_InterruptWithValueTypeState(t *testing.T) { // This test uses a value type (struct) instead of map (reference type) // to properly test if state updates are preserved during interrupt g := NewStateGraph[PaymentState]() g.AddNode("payment_node", "Payment processing node", func(ctx context.Context, state PaymentState) (PaymentState, error) { // Modify the state (creates a new copy since it's a value type) state.PaymentStatus = "pending_payment" state.Amount = 100 // Then interrupt _, err := Interrupt(ctx, "Please confirm payment of $100") if err != nil { // Return the updated state return state, err } // If resumed, mark as paid state.PaymentStatus = "paid" return state, nil }) g.AddEdge("payment_node", END) g.SetEntryPoint("payment_node") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // First execution should interrupt initialState := PaymentState{UserID: "user123"} result, err := runnable.Invoke(context.Background(), initialState) // Verify we got an interrupt error var graphInterrupt *GraphInterrupt if err == nil { t.Fatal("Expected interrupt error, got nil") } if !errors.As(err, &graphInterrupt) { t.Fatalf("Expected GraphInterrupt error, got: %v", err) } t.Logf("GraphInterrupt.State: %+v", graphInterrupt.State) t.Logf("Result: %+v", result) // CRITICAL: Verify that state modifications are preserved interruptState, ok := graphInterrupt.State.(PaymentState) if !ok { t.Fatalf("Expected state to be PaymentState, got: %T", graphInterrupt.State) } // This should FAIL with the current bug - state updates are lost if interruptState.PaymentStatus != "pending_payment" { t.Errorf("BUG CONFIRMED: Expected payment_status to be 'pending_payment', got: %v", interruptState.PaymentStatus) } if interruptState.Amount != 100 { t.Errorf("BUG CONFIRMED: Expected amount to be 100, got: %v", interruptState.Amount) } // Also check result if result.PaymentStatus != "pending_payment" { t.Errorf("Result BUG: Expected payment_status to be 'pending_payment', got: %v", result.PaymentStatus) } if result.Amount != 100 { t.Errorf("Result BUG: Expected amount to be 100, got: %v", result.Amount) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/streaming_test.go
graph/streaming_test.go
package graph import ( "context" "testing" "github.com/stretchr/testify/assert" ) func TestStreamingModes(t *testing.T) { g := NewStreamingStateGraph[map[string]any]() // Setup simple graph using map-based state g.AddNode("A", "A", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"state": "A"}, nil }) g.AddNode("B", "B", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"state": "B"}, nil }) g.SetEntryPoint("A") g.AddEdge("A", "B") g.AddEdge("B", END) // Test StreamModeValues t.Run("Values", func(t *testing.T) { g.SetStreamConfig(StreamConfig{ BufferSize: 100, Mode: StreamModeValues, }) runnable, err := g.CompileStreaming() assert.NoError(t, err) res := runnable.Stream(context.Background(), map[string]any{"state": "Start"}) var events []StreamEvent[map[string]any] for event := range res.Events { events = append(events, event) } // Expect "graph_step" events // A runs -> state map{"state": "A"} // B runs -> state map{"state": "B"} assert.NotEmpty(t, events) for range events { // e.Event is NodeEvent (string) // assert.Equal(t, "graph_step", string(e.Event)) // Use contains check because different modes might emit differently } lastEvent := events[len(events)-1] // Extract state from map lastStateMap := lastEvent.State assert.Equal(t, "B", lastStateMap["state"]) }) // Test StreamModeUpdates t.Run("Updates", func(t *testing.T) { g.SetStreamConfig(StreamConfig{ BufferSize: 100, Mode: StreamModeUpdates, }) runnable, err := g.CompileStreaming() assert.NoError(t, err) res := runnable.Stream(context.Background(), map[string]any{"state": "Start"}) var events []StreamEvent[map[string]any] for event := range res.Events { events = append(events, event) } // Expect ToolEnd events (since nodes are treated as tools) // A -> map{"state": "A"} // B -> map{"state": "B"} foundA := false foundB := false for _, e := range events { if e.Event == NodeEventComplete { stateMap := e.State if stateMap["state"] == "A" { foundA = true } if stateMap["state"] == "B" { foundB = true } } } assert.True(t, foundA) assert.True(t, foundB) }) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/visualization.go
graph/visualization.go
package graph import ( "fmt" "sort" "strings" ) // Exporter provides methods to export graphs in different formats type Exporter[S any] struct { graph *StateGraph[S] } // NewExporter creates a new graph exporter for the given graph func NewExporter[S any](graph *StateGraph[S]) *Exporter[S] { return &Exporter[S]{graph: graph} } // MermaidOptions defines configuration for Mermaid diagram generation type MermaidOptions struct { // Direction of the flowchart (e.g., "TD", "LR") Direction string } // DrawMermaid generates a Mermaid diagram representation of the graph func (ge *Exporter[S]) DrawMermaid() string { return ge.DrawMermaidWithOptions(MermaidOptions{ Direction: "TD", }) } // DrawMermaidWithOptions generates a Mermaid diagram with custom options func (ge *Exporter[S]) DrawMermaidWithOptions(opts MermaidOptions) string { var sb strings.Builder // Start Mermaid flowchart direction := opts.Direction if direction == "" { direction = "TD" } sb.WriteString(fmt.Sprintf("flowchart %s\n", direction)) // Add entry point styling if ge.graph.entryPoint != "" { sb.WriteString(fmt.Sprintf(" %s[[\"%s\"]]\n", ge.graph.entryPoint, ge.graph.entryPoint)) sb.WriteString(fmt.Sprintf(" %s --> %s\n", "START", ge.graph.entryPoint)) sb.WriteString(" START([\"START\"])\n") sb.WriteString(" style START fill:#90EE90\n") } // Get sorted node names for consistent output nodeNames := make([]string, 0, len(ge.graph.nodes)) for name := range ge.graph.nodes { if name != ge.graph.entryPoint && name != END { nodeNames = append(nodeNames, name) } } sort.Strings(nodeNames) // Add regular nodes for _, name := range nodeNames { sb.WriteString(fmt.Sprintf(" %s[\"%s\"]\n", name, name)) } // Add END node if referenced hasEnd := false for _, edge := range ge.graph.edges { if edge.To == END { hasEnd = true break } } if hasEnd { sb.WriteString(" END([\"END\"])\n") sb.WriteString(" style END fill:#FFB6C1\n") } // Add edges for _, edge := range ge.graph.edges { sb.WriteString(fmt.Sprintf(" %s --> %s\n", edge.From, edge.To)) } // Add conditional edges for from := range ge.graph.conditionalEdges { sb.WriteString(fmt.Sprintf(" %s -.-> %s_condition((?))\n", from, from)) sb.WriteString(fmt.Sprintf(" style %s_condition fill:#FFFFE0,stroke:#333,stroke-dasharray: 5 5\n", from)) } // Style entry point if ge.graph.entryPoint != "" { sb.WriteString(fmt.Sprintf(" style %s fill:#87CEEB\n", ge.graph.entryPoint)) } return sb.String() } // DrawDOT generates a DOT (Graphviz) representation of the graph func (ge *Exporter[S]) DrawDOT() string { var sb strings.Builder sb.WriteString("digraph G {\n") sb.WriteString(" rankdir=TD;\n") sb.WriteString(" node [shape=box];\n") // Add START node if there's an entry point if ge.graph.entryPoint != "" { sb.WriteString(" START [label=\"START\", shape=ellipse, style=filled, fillcolor=lightgreen];\n") sb.WriteString(fmt.Sprintf(" START -> %s;\n", ge.graph.entryPoint)) } // Add entry point styling if ge.graph.entryPoint != "" { sb.WriteString(fmt.Sprintf(" %s [style=filled, fillcolor=lightblue];\n", ge.graph.entryPoint)) } // Add END node styling if referenced hasEnd := false for _, edge := range ge.graph.edges { if edge.To == END { hasEnd = true break } } if hasEnd { sb.WriteString(" END [label=\"END\", shape=ellipse, style=filled, fillcolor=lightpink];\n") } // Add edges for _, edge := range ge.graph.edges { sb.WriteString(fmt.Sprintf(" %s -> %s;\n", edge.From, edge.To)) } // Add conditional edges for from := range ge.graph.conditionalEdges { sb.WriteString(fmt.Sprintf(" %s -> %s_condition [style=dashed, label=\"?\"];\n", from, from)) sb.WriteString(fmt.Sprintf(" %s_condition [label=\"?\", shape=diamond, style=filled, fillcolor=lightyellow];\n", from)) } sb.WriteString("}\n") return sb.String() } // DrawASCII generates an ASCII tree representation of the graph func (ge *Exporter[S]) DrawASCII() string { if ge.graph.entryPoint == "" { return "No entry point set\n" } var sb strings.Builder visited := make(map[string]bool) sb.WriteString("Graph Execution Flow:\n") sb.WriteString("├── START\n") ge.drawASCIINode(ge.graph.entryPoint, "│ ", true, visited, &sb) return sb.String() } // drawASCIINode recursively draws ASCII representation of nodes func (ge *Exporter[S]) drawASCIINode(nodeName string, prefix string, isLast bool, visited map[string]bool, sb *strings.Builder) { if visited[nodeName] { // Handle cycles connector := "├──" if isLast { connector = "└──" } sb.WriteString(fmt.Sprintf("%s%s %s (cycle)\n", prefix, connector, nodeName)) return } visited[nodeName] = true connector := "├──" nextPrefix := prefix + "│ " if isLast { connector = "└──" nextPrefix = prefix + " " } sb.WriteString(fmt.Sprintf("%s%s %s\n", prefix, connector, nodeName)) if nodeName == END { return } // Find outgoing edges outgoingEdges := make([]string, 0) for _, edge := range ge.graph.edges { if edge.From == nodeName { outgoingEdges = append(outgoingEdges, edge.To) } } // Check for conditional edge if _, ok := ge.graph.conditionalEdges[nodeName]; ok { outgoingEdges = append(outgoingEdges, "(Conditional)") } // Sort for consistent output sort.Strings(outgoingEdges) // Draw child nodes for i, target := range outgoingEdges { isLastChild := i == len(outgoingEdges)-1 if target == "(Conditional)" { // Draw conditional indicator condConnector := "├──" if isLastChild { condConnector = "└──" } sb.WriteString(fmt.Sprintf("%s%s (?)\n", nextPrefix, condConnector)) } else { ge.drawASCIINode(target, nextPrefix, isLastChild, visited, sb) } } } // GetGraphForRunnable returns a Exporter for the compiled graph's visualization func GetGraphForRunnable(r *Runnable) *Exporter[map[string]any] { return NewExporter[map[string]any](r.graph) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/interrupt_test.go
graph/interrupt_test.go
package graph import ( "context" "testing" "github.com/stretchr/testify/assert" ) func TestGraphInterrupt(t *testing.T) { g := NewStateGraph[map[string]any]() g.AddNode("A", "A", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["value"] = state["value"].(string) + "A" return state, nil }) g.AddNode("B", "B", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["value"] = state["value"].(string) + "B" return state, nil }) g.AddNode("C", "C", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["value"] = state["value"].(string) + "C" return state, nil }) g.SetEntryPoint("A") g.AddEdge("A", "B") g.AddEdge("B", "C") g.AddEdge("C", END) runnable, err := g.Compile() assert.NoError(t, err) // Test InterruptBefore t.Run("InterruptBefore", func(t *testing.T) { config := &Config{ InterruptBefore: []string{"B"}, } res, err := runnable.InvokeWithConfig(context.Background(), map[string]any{"value": "Start"}, config) assert.Error(t, err) var interrupt *GraphInterrupt assert.ErrorAs(t, err, &interrupt) assert.Equal(t, "B", interrupt.Node) // State is stored as map[string]any in the interrupt interruptState, ok := interrupt.State.(map[string]any) assert.True(t, ok) assert.Equal(t, "StartA", interruptState["value"]) // Result should be the state at interruption assert.Equal(t, "StartA", res["value"]) }) // Test InterruptAfter t.Run("InterruptAfter", func(t *testing.T) { config := &Config{ InterruptAfter: []string{"B"}, } res, err := runnable.InvokeWithConfig(context.Background(), map[string]any{"value": "Start"}, config) assert.Error(t, err) var interrupt *GraphInterrupt assert.ErrorAs(t, err, &interrupt) assert.Equal(t, "B", interrupt.Node) // State is stored as map[string]any in the interrupt interruptState, ok := interrupt.State.(map[string]any) assert.True(t, ok) assert.Equal(t, "StartAB", interruptState["value"]) // Result should be the state at interruption assert.Equal(t, "StartAB", res["value"]) }) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/graph.go
graph/graph.go
package graph import ( "context" "errors" "fmt" ) // END is a special constant used to represent the end node in the graph. const END = "END" var ( // ErrEntryPointNotSet is returned when the entry point of the graph is not set. ErrEntryPointNotSet = errors.New("entry point not set") // ErrNodeNotFound is returned when a node is not found in the graph. ErrNodeNotFound = errors.New("node not found") // ErrNoOutgoingEdge is returned when no outgoing edge is found for a node. ErrNoOutgoingEdge = errors.New("no outgoing edge found for node") ) // GraphInterrupt is returned when execution is interrupted by configuration or dynamic interrupt type GraphInterrupt struct { // Node that caused the interruption Node string // State at the time of interruption State any // NextNodes that would have been executed if not interrupted NextNodes []string // InterruptValue is the value provided by the dynamic interrupt (if any) InterruptValue any } func (e *GraphInterrupt) Error() string { if e.InterruptValue != nil { return fmt.Sprintf("graph interrupted at node %s with value: %v", e.Node, e.InterruptValue) } return fmt.Sprintf("graph interrupted at node %s", e.Node) } // Interrupt pauses execution and waits for input. // If resuming, it returns the value provided in the resume command. func Interrupt(ctx context.Context, value any) (any, error) { if resumeVal := GetResumeValue(ctx); resumeVal != nil { return resumeVal, nil } return nil, &NodeInterrupt{Value: value} } // Edge represents an edge in the graph. type Edge struct { // From is the name of the node from which the edge originates. From string // To is the name of the node to which the edge points. To string } // RetryPolicy defines how to handle node failures type RetryPolicy struct { MaxRetries int BackoffStrategy BackoffStrategy RetryableErrors []string } // BackoffStrategy defines different backoff strategies type BackoffStrategy int const ( FixedBackoff BackoffStrategy = iota ExponentialBackoff LinearBackoff ) // Runnable is an alias for StateRunnable[map[string]any] for convenience. type Runnable = StateRunnable[map[string]any] // StateGraphMap is an alias for StateGraph[map[string]any] for convenience. // Use NewStateGraph[map[string]any]() or NewStateGraph[S]() for other types. type StateGraphMap = StateGraph[map[string]any] // ListenableStateGraphMap is an alias for ListenableStateGraph[map[string]any]. type ListenableStateGraphMap = ListenableStateGraph[map[string]any] // ListenableRunnableMap is an alias for ListenableRunnable[map[string]any]. type ListenableRunnableMap = ListenableRunnable[map[string]any] // NewMessageGraph creates a new instance of StateGraph[map[string]any] with a default schema // that handles "messages" using the AddMessages reducer. // This is the recommended constructor for chat-based agents that use // map[string]any as state with a "messages" key. // // Deprecated: Use NewStateGraph[MessageState]() for type-safe state management. func NewMessageGraph() *StateGraph[map[string]any] { g := NewStateGraph[map[string]any]() // Initialize default schema for message handling schema := NewMapSchema() schema.RegisterReducer("messages", AddMessages) g.SetSchema(schema) return g }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/schema_test.go
graph/schema_test.go
package graph import ( "context" "reflect" "testing" "github.com/stretchr/testify/assert" ) // SchemaTestState is used for schema tests type SchemaTestState struct { Count int Name string Numbers []int Logs []string } // StructSchema Tests func TestNewStructSchema(t *testing.T) { t.Run("Create schema with merge function", func(t *testing.T) { mergeFunc := func(current, new SchemaTestState) (SchemaTestState, error) { current.Count += new.Count return current, nil } schema := NewStructSchema(SchemaTestState{Count: 0}, mergeFunc) assert.NotNil(t, schema) assert.Equal(t, 0, schema.InitialValue.Count) assert.NotNil(t, schema.MergeFunc) }) t.Run("Create schema with default merge", func(t *testing.T) { schema := NewStructSchema(SchemaTestState{Count: 5}, nil) assert.NotNil(t, schema) assert.Equal(t, 5, schema.InitialValue.Count) assert.NotNil(t, schema.MergeFunc) }) } func TestStructSchema_Init(t *testing.T) { schema := NewStructSchema(SchemaTestState{Count: 10, Name: "test"}, nil) initial := schema.Init() assert.Equal(t, 10, initial.Count) assert.Equal(t, "test", initial.Name) } func TestStructSchema_Update(t *testing.T) { t.Run("Update with custom merge function", func(t *testing.T) { mergeFunc := func(current, new SchemaTestState) (SchemaTestState, error) { current.Count += new.Count if new.Name != "" { current.Name = new.Name } return current, nil } schema := NewStructSchema(SchemaTestState{}, mergeFunc) current := SchemaTestState{Count: 5, Name: "old"} new := SchemaTestState{Count: 3, Name: "new"} result, err := schema.Update(current, new) assert.NoError(t, err) assert.Equal(t, 8, result.Count) assert.Equal(t, "new", result.Name) }) t.Run("Update with nil merge function uses default", func(t *testing.T) { schema := &StructSchema[SchemaTestState]{ InitialValue: SchemaTestState{}, MergeFunc: nil, } current := SchemaTestState{Count: 5, Name: "old"} new := SchemaTestState{Count: 0, Name: "new"} // Count is zero, won't update result, err := schema.Update(current, new) assert.NoError(t, err) // When MergeFunc is nil, Update returns the new value directly assert.Equal(t, 0, result.Count) // New value (0) assert.Equal(t, "new", result.Name) // New value }) } func TestDefaultStructMerge(t *testing.T) { t.Run("Merge non-zero fields", func(t *testing.T) { current := SchemaTestState{Count: 5, Name: "old"} new := SchemaTestState{Count: 10, Name: ""} // Name is zero result, err := DefaultStructMerge(current, new) assert.NoError(t, err) assert.Equal(t, 10, result.Count) assert.Equal(t, "old", result.Name) // Keep old name }) t.Run("Merge with slices", func(t *testing.T) { current := SchemaTestState{Numbers: []int{1, 2}} new := SchemaTestState{Numbers: []int{3, 4}} result, err := DefaultStructMerge(current, new) assert.NoError(t, err) assert.Equal(t, []int{3, 4}, result.Numbers) // Overwrites }) t.Run("Merge with zero values", func(t *testing.T) { current := SchemaTestState{Count: 5, Name: "test"} new := SchemaTestState{Count: 0, Name: ""} result, err := DefaultStructMerge(current, new) assert.NoError(t, err) assert.Equal(t, 5, result.Count) assert.Equal(t, "test", result.Name) }) t.Run("Non-struct type returns new", func(t *testing.T) { current := 5 new := 10 result, err := DefaultStructMerge(current, new) assert.NoError(t, err) assert.Equal(t, 10, result) }) } func TestOverwriteStructMerge(t *testing.T) { current := SchemaTestState{Count: 5, Name: "old", Numbers: []int{1}} new := SchemaTestState{Count: 10, Name: "new", Numbers: []int{2, 3}} result, err := OverwriteStructMerge(current, new) assert.NoError(t, err) assert.Equal(t, new, result) assert.Equal(t, 10, result.Count) assert.Equal(t, "new", result.Name) assert.Equal(t, []int{2, 3}, result.Numbers) } // FieldMerger Tests func TestNewFieldMerger(t *testing.T) { fm := NewFieldMerger(SchemaTestState{Count: 0}) assert.NotNil(t, fm) assert.Equal(t, 0, fm.InitialValue.Count) assert.NotNil(t, fm.FieldMergeFns) } func TestFieldMerger_Init(t *testing.T) { fm := NewFieldMerger(SchemaTestState{Count: 10, Name: "test"}) initial := fm.Init() assert.Equal(t, 10, initial.Count) assert.Equal(t, "test", initial.Name) } func TestFieldMerger_RegisterFieldMerge(t *testing.T) { fm := NewFieldMerger(SchemaTestState{}) fm.RegisterFieldMerge("Count", SumIntMerge) fm.RegisterFieldMerge("Name", OverwriteMerge) assert.Contains(t, fm.FieldMergeFns, "Count") assert.Contains(t, fm.FieldMergeFns, "Name") } func TestFieldMerger_Update(t *testing.T) { t.Run("Update with custom field mergers", func(t *testing.T) { fm := NewFieldMerger(SchemaTestState{}) fm.RegisterFieldMerge("Count", SumIntMerge) fm.RegisterFieldMerge("Name", KeepCurrentMerge) // Keep current name current := SchemaTestState{Count: 5, Name: "original"} new := SchemaTestState{Count: 3, Name: "new"} result, err := fm.Update(current, new) assert.NoError(t, err) assert.Equal(t, 8, result.Count) // Sum assert.Equal(t, "original", result.Name) // Keep current }) t.Run("Update with default behavior for unregistered fields", func(t *testing.T) { fm := NewFieldMerger(SchemaTestState{}) // No field mergers registered current := SchemaTestState{Count: 5, Name: "old"} new := SchemaTestState{Count: 0, Name: "new"} // Count is zero result, err := fm.Update(current, new) assert.NoError(t, err) assert.Equal(t, 5, result.Count) // Keep old (zero doesn't overwrite) assert.Equal(t, "new", result.Name) }) t.Run("Update with slice merge", func(t *testing.T) { fm := NewFieldMerger(SchemaTestState{}) fm.RegisterFieldMerge("Numbers", AppendSliceMerge) fm.RegisterFieldMerge("Logs", AppendSliceMerge) current := SchemaTestState{ Numbers: []int{1, 2}, Logs: []string{"a"}, } new := SchemaTestState{ Numbers: []int{3, 4}, Logs: []string{"b"}, } result, err := fm.Update(current, new) assert.NoError(t, err) assert.Equal(t, []int{1, 2, 3, 4}, result.Numbers) assert.Equal(t, []string{"a", "b"}, result.Logs) }) t.Run("Update non-struct returns error", func(t *testing.T) { fm := NewFieldMerger(0) // Not a struct current := 5 newVal := 10 _, err := fm.Update(current, newVal) assert.Error(t, err) assert.Contains(t, err.Error(), "only works with struct") }) } // Merge Helper Tests func TestAppendSliceMerge(t *testing.T) { t.Run("Append slices", func(t *testing.T) { current := []int{1, 2} new := []int{3, 4} result := AppendSliceMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.True(t, result.IsValid()) resultSlice := result.Interface().([]int) assert.Equal(t, []int{1, 2, 3, 4}, resultSlice) }) t.Run("Non-slice values return new", func(t *testing.T) { current := 5 new := 10 result := AppendSliceMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(new), result) }) } func TestSumIntMerge(t *testing.T) { t.Run("Sum integers", func(t *testing.T) { current := 5 new := 3 result := SumIntMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.True(t, result.IsValid()) assert.Equal(t, int64(8), result.Int()) }) t.Run("Non-int values return new", func(t *testing.T) { current := "hello" new := "world" result := SumIntMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(new), result) }) } func TestOverwriteMerge(t *testing.T) { current := "old" new := "new" result := OverwriteMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(new), result) } func TestKeepCurrentMerge(t *testing.T) { current := "old" new := "new" result := KeepCurrentMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(current), result) } func TestMaxIntMerge(t *testing.T) { t.Run("Max of two integers - current larger", func(t *testing.T) { current := 10 new := 5 result := MaxIntMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(current), result) }) t.Run("Max of two integers - new larger", func(t *testing.T) { current := 5 new := 10 result := MaxIntMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(new), result) }) t.Run("Non-int values return new", func(t *testing.T) { current := "hello" new := "world" result := MaxIntMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(new), result) }) } func TestMinIntMerge(t *testing.T) { t.Run("Min of two integers - current smaller", func(t *testing.T) { current := 3 new := 7 result := MinIntMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(current), result) }) t.Run("Min of two integers - new smaller", func(t *testing.T) { current := 7 new := 3 result := MinIntMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(new), result) }) t.Run("Non-int values return new", func(t *testing.T) { current := "hello" new := "world" result := MinIntMerge( reflect.ValueOf(current), reflect.ValueOf(new), ) assert.Equal(t, reflect.ValueOf(new), result) }) } // MapSchema Tests func TestNewMapSchema(t *testing.T) { schema := NewMapSchema() assert.NotNil(t, schema) assert.NotNil(t, schema.Reducers) } func TestMapSchema_RegisterReducer(t *testing.T) { schema := NewMapSchema() schema.RegisterReducer("key1", OverwriteReducer) schema.RegisterReducer("key2", AppendReducer) assert.Contains(t, schema.Reducers, "key1") assert.Contains(t, schema.Reducers, "key2") } func TestMapSchema_Init(t *testing.T) { schema := NewMapSchema() initial := schema.Init() assert.NotNil(t, initial) assert.NotNil(t, initial) assert.Empty(t, initial) // Empty map } func TestMapSchema_Update(t *testing.T) { t.Run("Update with nil current creates new map", func(t *testing.T) { schema := NewMapSchema() new := map[string]any{"key": "value"} result, err := schema.Update(nil, new) assert.NoError(t, err) assert.Equal(t, "value", result["key"]) }) t.Run("Update with reducer", func(t *testing.T) { schema := NewMapSchema() schema.RegisterReducer("count", func(current, new any) (any, error) { currInt := current.(int) newInt := new.(int) return currInt + newInt, nil }) current := map[string]any{"count": 5} new := map[string]any{"count": 3} result, err := schema.Update(current, new) assert.NoError(t, err) assert.Equal(t, 8, result["count"]) }) t.Run("Update without reducer overwrites", func(t *testing.T) { schema := NewMapSchema() // No reducer for "name" current := map[string]any{"name": "old"} new := map[string]any{"name": "new"} result, err := schema.Update(current, new) assert.NoError(t, err) assert.Equal(t, "new", result["name"]) }) t.Run("Update doesn't mutate original", func(t *testing.T) { schema := NewMapSchema() current := map[string]any{"key": "original"} new := map[string]any{"key": "updated"} result, err := schema.Update(current, new) assert.NoError(t, err) assert.Equal(t, "original", current["key"]) // Original unchanged assert.Equal(t, "updated", result["key"]) }) t.Run("Reducer error propagates", func(t *testing.T) { schema := NewMapSchema() schema.RegisterReducer("key", func(current, new any) (any, error) { return nil, assert.AnError }) _, err := schema.Update(map[string]any{}, map[string]any{"key": "value"}) assert.Error(t, err) }) } // Reducer Tests func TestOverwriteReducer(t *testing.T) { current := "old" new := "new" result, err := OverwriteReducer(current, new) assert.NoError(t, err) assert.Equal(t, "new", result) } func TestAppendReducer(t *testing.T) { t.Run("Append slice to slice", func(t *testing.T) { current := []int{1, 2} new := []int{3, 4} result, err := AppendReducer(current, new) assert.NoError(t, err) resultSlice := result.([]int) assert.Equal(t, []int{1, 2, 3, 4}, resultSlice) }) t.Run("Append single element to slice", func(t *testing.T) { current := []int{1, 2} new := 3 result, err := AppendReducer(current, new) assert.NoError(t, err) resultSlice := result.([]int) assert.Equal(t, []int{1, 2, 3}, resultSlice) }) t.Run("Start new slice from single element", func(t *testing.T) { var current []int = nil new := 42 result, err := AppendReducer(current, new) assert.NoError(t, err) resultSlice := result.([]int) assert.Equal(t, []int{42}, resultSlice) }) t.Run("Start new slice from slice", func(t *testing.T) { var current []int = nil new := []int{1, 2, 3} result, err := AppendReducer(current, new) assert.NoError(t, err) resultSlice := result.([]int) assert.Equal(t, []int{1, 2, 3}, resultSlice) }) t.Run("Append slice to slice with different types", func(t *testing.T) { current := []string{"a", "b"} new := []int{1, 2} result, err := AppendReducer(current, new) assert.NoError(t, err) resultSlice := result.([]any) assert.Equal(t, []any{"a", "b", 1, 2}, resultSlice) }) t.Run("Non-slice current returns error", func(t *testing.T) { current := "not a slice" new := []int{1} _, err := AppendReducer(current, new) assert.Error(t, err) assert.Contains(t, err.Error(), "not a slice") }) } // Integration Tests func TestStateGraph_Schema(t *testing.T) { g := NewStateGraph[map[string]any]() schema := NewMapSchema() schema.RegisterReducer("messages", AppendReducer) g.SetSchema(schema) g.AddNode("A", "A", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{ "messages": []string{"A"}, }, nil }) g.AddNode("B", "B", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{ "messages": []string{"B"}, }, nil }) g.SetEntryPoint("A") g.AddEdge("A", "B") g.AddEdge("B", END) runnable, err := g.Compile() assert.NoError(t, err) initialState := map[string]any{ "messages": []string{"start"}, } result, err := runnable.Invoke(context.Background(), initialState) assert.NoError(t, err) assert.Equal(t, []string{"start", "A", "B"}, result["messages"]) } func TestMapSchema_Update_Integration(t *testing.T) { schema := NewMapSchema() schema.RegisterReducer("messages", AppendReducer) initialState := map[string]any{ "messages": []string{"hello"}, "count": 1, } // Update 1: Append message update1 := map[string]any{ "messages": []string{"world"}, } newState1, err := schema.Update(initialState, update1) assert.NoError(t, err) state1 := newState1 assert.Equal(t, []string{"hello", "world"}, state1["messages"]) assert.Equal(t, 1, state1["count"]) // Update 2: Overwrite count update2 := map[string]any{ "count": 2, } newState2, err := schema.Update(state1, update2) assert.NoError(t, err) state2 := newState2 assert.Equal(t, []string{"hello", "world"}, state2["messages"]) assert.Equal(t, 2, state2["count"]) // Update 3: Append single element (if supported by AppendReducer logic, currently it supports slice or element) // Let's test appending a single string update3 := map[string]any{ "messages": "!", } newState3, err := schema.Update(state2, update3) assert.NoError(t, err) state3 := newState3 assert.Equal(t, []string{"hello", "world", "!"}, state3["messages"]) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/listeners_test.go
graph/listeners_test.go
package graph_test import ( "context" "fmt" "sync" "testing" "time" "github.com/smallnest/langgraphgo/graph" ) const ( resultValue = "result" ) func TestListenableNode_AddListener(t *testing.T) { t.Parallel() node := graph.TypedNode[string]{ Name: testNode, Function: func(ctx context.Context, state string) (string, error) { return resultValue, nil }, } listenableNode := graph.NewListenableNode(node) // Test adding listener var eventReceived bool var mu sync.Mutex listener := graph.NodeListenerFunc[string](func(ctx context.Context, event graph.NodeEvent, nodeName string, state string, err error) { mu.Lock() eventReceived = true mu.Unlock() }) listenableNode.AddListener(listener) // Verify listener was added listeners := listenableNode.GetListeners() if len(listeners) != 1 { t.Errorf("Expected 1 listener, got %d", len(listeners)) } // Test listener is called during execution ctx := context.Background() _, err := listenableNode.Execute(ctx, "input") if err != nil { t.Fatalf("Execution failed: %v", err) } // Give some time for async listeners time.Sleep(10 * time.Millisecond) mu.Lock() received := eventReceived mu.Unlock() if !received { t.Error("Listener should have been called") } } func TestListenableNode_Execute(t *testing.T) { t.Parallel() node := graph.TypedNode[string]{ Name: testNode, Function: func(ctx context.Context, state string) (string, error) { return fmt.Sprintf("processed_%v", state), nil }, } listenableNode := graph.NewListenableNode(node) // Track events var events []graph.NodeEvent var mutex sync.Mutex listener := graph.NodeListenerFunc[string](func(ctx context.Context, event graph.NodeEvent, nodeName string, state string, err error) { mutex.Lock() defer mutex.Unlock() events = append(events, event) }) listenableNode.AddListener(listener) // Execute node ctx := context.Background() result, err := listenableNode.Execute(ctx, "test_input") if err != nil { t.Fatalf("Execution failed: %v", err) } if result != "processed_test_input" { t.Errorf("Expected 'processed_test_input', got %v", result) } // Wait for async listeners time.Sleep(50 * time.Millisecond) mutex.Lock() defer mutex.Unlock() // Should have start and complete events if len(events) != 2 { t.Errorf("Expected 2 events, got %d", len(events)) return } // Events may arrive out of order due to goroutines, so check presence instead hasStart := false hasComplete := false for _, event := range events { switch event { case graph.NodeEventStart: hasStart = true case graph.NodeEventComplete: hasComplete = true case graph.NodeEventProgress, graph.NodeEventError: // These events are not expected in this test but handled for completeness } } if !hasStart { t.Error("Should have received start event") } if !hasComplete { t.Error("Should have received complete event") } } func TestListenableNode_ExecuteWithError(t *testing.T) { t.Parallel() node := graph.TypedNode[string]{ Name: "error_node", Function: func(ctx context.Context, state string) (string, error) { return "", fmt.Errorf("test error") }, } listenableNode := graph.NewListenableNode(node) // Track events var events []graph.NodeEvent var lastError error var mutex sync.Mutex listener := graph.NodeListenerFunc[string](func(ctx context.Context, event graph.NodeEvent, nodeName string, state string, err error) { mutex.Lock() defer mutex.Unlock() events = append(events, event) if err != nil { lastError = err } }) listenableNode.AddListener(listener) // Execute node (should fail) ctx := context.Background() _, err := listenableNode.Execute(ctx, "test_input") if err == nil { t.Fatal("Expected execution to fail") } // Wait for async listeners time.Sleep(50 * time.Millisecond) mutex.Lock() defer mutex.Unlock() // Should have start and error events if len(events) != 2 { t.Errorf("Expected 2 events, got %d", len(events)) return } // Events may arrive out of order due to goroutines, so check presence instead hasStart := false hasError := false for _, event := range events { switch event { case graph.NodeEventStart: hasStart = true case graph.NodeEventError: hasError = true case graph.NodeEventProgress, graph.NodeEventComplete: // These events are not expected in this test but handled for completeness } } if !hasStart { t.Error("Should have received start event") } if !hasError { t.Error("Should have received error event") } if lastError == nil { t.Error("Error event should contain the error") } } func TestListenableStateGraph_AddNode(t *testing.T) { t.Parallel() g := graph.NewListenableStateGraph[string]() // Add a node node := g.AddNode(testNode, testNode, func(ctx context.Context, state string) (string, error) { return resultValue, nil }) if node == nil { t.Fatal("AddNode should return a ListenableNode") } // Verify node was added to graph listenableNode := g.GetListenableNode(testNode) if listenableNode == nil { t.Fatal("Node should be retrievable") return } if listenableNode.Name != testNode { t.Errorf("Expected node name 'test_node', got %v", listenableNode.Name) } } func TestListenableStateGraph_GlobalListeners(t *testing.T) { t.Parallel() g := graph.NewListenableStateGraph[map[string]any]() // Add multiple nodes node1 := g.AddNode("node1", "node1", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"res": "result1"}, nil }) node2 := g.AddNode("node2", "node2", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"res": "result2"}, nil }) // Add global listener var eventCount int var mutex sync.Mutex globalListener := graph.NodeListenerFunc[map[string]any](func(ctx context.Context, event graph.NodeEvent, nodeName string, state map[string]any, err error) { mutex.Lock() defer mutex.Unlock() eventCount++ }) g.AddGlobalListener(globalListener) // Verify listeners were added to all nodes if len(node1.GetListeners()) != 1 { t.Error("Global listener should be added to node1") } if len(node2.GetListeners()) != 1 { t.Error("Global listener should be added to node2") } // Execute both nodes ctx := context.Background() _, _ = node1.Execute(ctx, map[string]any{"input": "input1"}) _, _ = node2.Execute(ctx, map[string]any{"input": "input2"}) // Wait for async listeners time.Sleep(20 * time.Millisecond) mutex.Lock() defer mutex.Unlock() // Should have 4 events total (2 nodes * 2 events each) if eventCount != 4 { t.Errorf("Expected 4 events, got %d", eventCount) } } func TestListenableRunnable_Invoke(t *testing.T) { t.Parallel() g := graph.NewListenableStateGraph[map[string]any]() // Create a simple pipeline using map-based state node1 := g.AddNode("node1", "node1", func(ctx context.Context, state map[string]any) (map[string]any, error) { // Return updated map state return map[string]any{"state": "step1_input"}, nil }) node2 := g.AddNode("node2", "node2", func(ctx context.Context, state map[string]any) (map[string]any, error) { // Return updated map state return map[string]any{"state": "step2_step1_input"}, nil }) // Add edges g.AddEdge("node1", "node2") g.AddEdge("node2", graph.END) g.SetEntryPoint("node1") // Track execution flow var executionFlow []string var mutex sync.Mutex listener := graph.NodeListenerFunc[map[string]any](func(ctx context.Context, event graph.NodeEvent, nodeName string, state map[string]any, err error) { mutex.Lock() defer mutex.Unlock() executionFlow = append(executionFlow, fmt.Sprintf("%s:%s", nodeName, event)) }) node1.AddListener(listener) node2.AddListener(listener) // Compile and execute runnable, err := g.CompileListenable() if err != nil { t.Fatalf("Failed to compile: %v", err) } ctx := context.Background() result, err := runnable.Invoke(ctx, map[string]any{"state": "input"}) if err != nil { t.Fatalf("Execution failed: %v", err) } // result is already map[string]any if result["state"] != "step2_step1_input" { t.Errorf("Expected 'step2_step1_input', got %v", result["state"]) } // Wait for async listeners time.Sleep(50 * time.Millisecond) mutex.Lock() defer mutex.Unlock() // Should have events for both nodes (4 total events) if len(executionFlow) != 4 { t.Errorf("Expected 4 events, got %d: %v", len(executionFlow), executionFlow) return } // Check that we have the right events (order may vary due to goroutines) eventCounts := make(map[string]int) for _, event := range executionFlow { eventCounts[event]++ } expectedEvents := map[string]int{ "node1:start": 1, "node1:complete": 1, "node2:start": 1, "node2:complete": 1, } for expectedEvent, expectedCount := range expectedEvents { if eventCounts[expectedEvent] != expectedCount { t.Errorf("Expected %d occurrences of %s, got %d", expectedCount, expectedEvent, eventCounts[expectedEvent]) } } } func TestListenerPanicRecovery(t *testing.T) { t.Parallel() node := graph.TypedNode[string]{ Name: testNode, Function: func(ctx context.Context, state string) (string, error) { return resultValue, nil }, } listenableNode := graph.NewListenableNode(node) // Add a panicking listener panicListener := graph.NodeListenerFunc[string](func(ctx context.Context, event graph.NodeEvent, nodeName string, state string, err error) { panic("test panic") }) // Add a normal listener var normalListenerCalled bool var mutex sync.Mutex normalListener := graph.NodeListenerFunc[string](func(ctx context.Context, event graph.NodeEvent, nodeName string, state string, err error) { mutex.Lock() defer mutex.Unlock() normalListenerCalled = true }) listenableNode.AddListener(panicListener) listenableNode.AddListener(normalListener) // Execute should not panic even though listener panics ctx := context.Background() result, err := listenableNode.Execute(ctx, "input") if err != nil { t.Fatalf("Execution failed: %v", err) } if result != resultValue { t.Errorf("Expected 'result', got %v", result) } // Wait for async listeners time.Sleep(20 * time.Millisecond) mutex.Lock() defer mutex.Unlock() // Normal listener should still have been called if !normalListenerCalled { t.Error("Normal listener should have been called despite panic in other listener") } } func TestStreamEvent_Creation(t *testing.T) { t.Parallel() timestamp := time.Now() event := &graph.StreamEvent[string]{ Timestamp: timestamp, NodeName: testNode, Event: graph.NodeEventComplete, State: testState, Error: nil, Metadata: map[string]any{"key": "value"}, Duration: 100 * time.Millisecond, } if event.Timestamp != timestamp { t.Error("Timestamp should be preserved") } if event.NodeName != testNode { t.Error("NodeName should be preserved") } if event.Event != graph.NodeEventComplete { t.Error("Event should be preserved") } if event.State != testState { t.Error("State should be preserved") } if event.Duration != 100*time.Millisecond { t.Error("Duration should be preserved") } } // Benchmark tests func BenchmarkListenableNode_Execute(b *testing.B) { node := graph.TypedNode[string]{ Name: "benchmark_node", Function: func(ctx context.Context, state string) (string, error) { return state, nil }, } listenableNode := graph.NewListenableNode(node) // Add a listener listener := graph.NodeListenerFunc[string](func(ctx context.Context, event graph.NodeEvent, nodeName string, state string, err error) { // No-op listener for benchmarking }) listenableNode.AddListener(listener) ctx := context.Background() for b.Loop() { _, _ = listenableNode.Execute(ctx, "test") } } func BenchmarkListenableRunnable_Invoke(b *testing.B) { g := graph.NewListenableStateGraph[map[string]any]() node := g.AddNode("node", "node", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddEdge("node", graph.END) g.SetEntryPoint("node") // Add listener listener := graph.NodeListenerFunc[map[string]any](func(ctx context.Context, event graph.NodeEvent, nodeName string, state map[string]any, err error) { // No-op for benchmarking }) node.AddListener(listener) runnable, _ := g.CompileListenable() ctx := context.Background() for b.Loop() { _, _ = runnable.Invoke(ctx, map[string]any{"test": "test"}) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/errors.go
graph/errors.go
package graph import "fmt" // NodeInterrupt is returned when a node requests an interrupt (e.g. waiting for human input). type NodeInterrupt struct { // Node is the name of the node that triggered the interrupt Node string // Value is the data/query provided by the interrupt Value any } func (e *NodeInterrupt) Error() string { return fmt.Sprintf("interrupt at node %s: %v", e.Node, e.Value) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/state_graph.go
graph/state_graph.go
package graph import ( "context" "errors" "fmt" "slices" "strings" "sync" "time" ) // StateGraph represents a generic state-based graph with compile-time type safety. // The type parameter S represents the state type, which is typically a struct. // // Example usage: // // type MyState struct { // Count int // Name string // } // // g := graph.NewStateGraph[MyState]() // g.AddNode("increment", "Increment counter", func(ctx context.Context, state MyState) (MyState, error) { // state.Count++ // return state, nil // }) type StateGraph[S any] struct { // nodes is a map of node names to their corresponding Node objects nodes map[string]TypedNode[S] // edges is a slice of Edge objects representing the connections between nodes edges []Edge // conditionalEdges contains a map between "From" node, while "To" node is derived based on the condition conditionalEdges map[string]func(ctx context.Context, state S) string // entryPoint is the name of the entry point node in the graph entryPoint string // retryPolicy defines retry behavior for failed nodes retryPolicy *RetryPolicy // stateMerger is an optional function to merge states from parallel execution stateMerger TypedStateMerger[S] // Schema defines the state structure and update logic Schema StateSchema[S] } // TypedNode represents a typed node in the graph. type TypedNode[S any] struct { Name string Description string Function func(ctx context.Context, state S) (S, error) } // StateMerger is a typed function to merge states from parallel execution. type TypedStateMerger[S any] func(ctx context.Context, currentState S, newStates []S) (S, error) // NewStateGraph creates a new instance of StateGraph with type safety. // The type parameter S specifies the state type. // // Example: // // g := graph.NewStateGraph[MyState]() func NewStateGraph[S any]() *StateGraph[S] { return &StateGraph[S]{ nodes: make(map[string]TypedNode[S]), conditionalEdges: make(map[string]func(ctx context.Context, state S) string), } } // AddNode adds a new node to the state graph with the given name, description and function. // The node function is fully typed - no type assertions needed! // // Example: // // g.AddNode("process", "Process data", func(ctx context.Context, state MyState) (MyState, error) { // state.Count++ // Type-safe access! // return state, nil // }) func (g *StateGraph[S]) AddNode(name string, description string, fn func(ctx context.Context, state S) (S, error)) { g.nodes[name] = TypedNode[S]{ Name: name, Description: description, Function: fn, } } // AddEdge adds a new edge to the state graph between the "from" and "to" nodes. func (g *StateGraph[S]) AddEdge(from, to string) { g.edges = append(g.edges, Edge{ From: from, To: to, }) } // AddConditionalEdge adds a conditional edge where the target node is determined at runtime. // The condition function is fully typed - no type assertions needed! // // Example: // // g.AddConditionalEdge("check", func(ctx context.Context, state MyState) string { // if state.Count > 10 { // Type-safe access! // return "high" // } // return "low" // }) func (g *StateGraph[S]) AddConditionalEdge(from string, condition func(ctx context.Context, state S) string) { g.conditionalEdges[from] = condition } // SetEntryPoint sets the entry point node name for the state graph. func (g *StateGraph[S]) SetEntryPoint(name string) { g.entryPoint = name } // SetRetryPolicy sets the retry policy for the graph. func (g *StateGraph[S]) SetRetryPolicy(policy *RetryPolicy) { g.retryPolicy = policy } // SetStateMerger sets the state merger function for the state graph. func (g *StateGraph[S]) SetStateMerger(merger TypedStateMerger[S]) { g.stateMerger = merger } // SetSchema sets the state schema for the graph. func (g *StateGraph[S]) SetSchema(schema StateSchema[S]) { g.Schema = schema } // StateRunnable represents a compiled state graph that can be invoked with type safety. type StateRunnable[S any] struct { graph *StateGraph[S] tracer *Tracer nodeRunner func(ctx context.Context, nodeName string, state S) (S, error) } // Compile compiles the state graph and returns a StateRunnable instance. func (g *StateGraph[S]) Compile() (*StateRunnable[S], error) { if g.entryPoint == "" { return nil, ErrEntryPointNotSet } return &StateRunnable[S]{ graph: g, tracer: nil, // Initialize with no tracer }, nil } // SetTracer sets a tracer for observability. func (r *StateRunnable[S]) SetTracer(tracer *Tracer) { r.tracer = tracer } // GetTracer returns the current tracer. func (r *StateRunnable[S]) GetTracer() *Tracer { return r.tracer } // WithTracer returns a new StateRunnable with the given tracer. func (r *StateRunnable[S]) WithTracer(tracer *Tracer) *StateRunnable[S] { return &StateRunnable[S]{ graph: r.graph, tracer: tracer, } } // Invoke executes the compiled state graph with the given input state. // Returns the final state with full type safety - no type assertions needed! // // Example: // // initialState := MyState{Count: 0} // finalState, err := app.Invoke(ctx, initialState) // // finalState is MyState type - no casting needed! func (r *StateRunnable[S]) Invoke(ctx context.Context, initialState S) (S, error) { return r.InvokeWithConfig(ctx, initialState, nil) } // InvokeWithConfig executes the compiled state graph with the given input state and config. func (r *StateRunnable[S]) InvokeWithConfig(ctx context.Context, initialState S, config *Config) (S, error) { state := initialState // If schema is defined, merge initialState into schema's initial state if r.graph.Schema != nil { schemaInit := r.graph.Schema.Init() var err error state, err = r.graph.Schema.Update(schemaInit, initialState) if err != nil { var zero S return zero, fmt.Errorf("failed to initialize state with schema: %w", err) } } currentNodes := []string{r.graph.entryPoint} // Handle ResumeFrom if config != nil && len(config.ResumeFrom) > 0 { currentNodes = config.ResumeFrom } // Generate run ID for callbacks runID := generateRunID() // Notify callbacks of graph start if config != nil { // Inject config into context ctx = WithConfig(ctx, config) // Inject ResumeValue if config.ResumeValue != nil { ctx = WithResumeValue(ctx, config.ResumeValue) } if len(config.Callbacks) > 0 { serialized := map[string]any{ "name": "graph", "type": "chain", } inputs := convertStateToMap(initialState) for _, cb := range config.Callbacks { cb.OnChainStart(ctx, serialized, inputs, runID, nil, config.Tags, config.Metadata) } } } // Start graph tracing if tracer is set var graphSpan *TraceSpan if r.tracer != nil { graphSpan = r.tracer.StartSpan(ctx, TraceEventGraphStart, "graph") graphSpan.State = initialState } for len(currentNodes) > 0 { // Filter out END nodes activeNodes := make([]string, 0, len(currentNodes)) for _, node := range currentNodes { if node != END { activeNodes = append(activeNodes, node) } } currentNodes = activeNodes if len(currentNodes) == 0 { break } // Check InterruptBefore if config != nil && len(config.InterruptBefore) > 0 { for _, node := range currentNodes { if slices.Contains(config.InterruptBefore, node) { return state, &GraphInterrupt{Node: node, State: state} } } } // Execute nodes in parallel results, errorsList := r.executeNodesParallel(ctx, currentNodes, state, config, runID) // Process results (including results from interrupted nodes) processedResults, nextNodesFromCommands := r.processNodeResults(results) // Merge results into state (this preserves state updates from interrupted nodes) var mergeErr error state, mergeErr = r.mergeState(ctx, state, processedResults) if mergeErr != nil { var zero S return zero, mergeErr } // Now check for errors after merging state // We check here to determine if we should save checkpoints (for interrupts) or not (for regular errors) var hasNodeInterrupt bool var nodeInterrupt *NodeInterrupt for _, err := range errorsList { if err != nil { if errors.As(err, &nodeInterrupt) { hasNodeInterrupt = true break } } } // Keep track of nodes that ran for callbacks and interrupts nodesRan := make([]string, len(currentNodes)) copy(nodesRan, currentNodes) // Notify callbacks of step completion (and save checkpoints) // For NodeInterrupt: we DO want to save the checkpoint (Issue #70) // For regular errors: we DON'T want to save checkpoints if config != nil && len(config.Callbacks) > 0 { if hasNodeInterrupt { // Save checkpoint before returning the interrupt for _, cb := range config.Callbacks { if gcb, ok := cb.(GraphCallbackHandler); ok { var nodeName string if len(nodesRan) == 1 { nodeName = nodesRan[0] } else { nodeName = fmt.Sprintf("step:%v", nodesRan) } gcb.OnGraphStep(ctx, nodeName, state) } } } } // Now handle the errors for _, err := range errorsList { if err != nil { if hasNodeInterrupt && nodeInterrupt != nil { // Return GraphInterrupt with the merged state // OnGraphStep has already been called, so checkpoint was saved return state, &GraphInterrupt{ Node: nodeInterrupt.Node, State: state, InterruptValue: nodeInterrupt.Value, NextNodes: []string{nodeInterrupt.Node}, } } // For regular errors (not interrupts), don't save checkpoint // Notify callbacks of error if config != nil && len(config.Callbacks) > 0 { for _, cb := range config.Callbacks { cb.OnChainError(ctx, err, runID) } } var zero S return zero, err } } // Determine next nodes nextNodesList, err := r.determineNextNodes(ctx, currentNodes, state, nextNodesFromCommands) if err != nil { var zero S return zero, err } // Update currentNodes currentNodes = nextNodesList // Notify callbacks of step completion for normal execution (no errors) if config != nil && len(config.Callbacks) > 0 { for _, cb := range config.Callbacks { if gcb, ok := cb.(GraphCallbackHandler); ok { var nodeName string if len(nodesRan) == 1 { nodeName = nodesRan[0] } else { nodeName = fmt.Sprintf("step:%v", nodesRan) } gcb.OnGraphStep(ctx, nodeName, state) } } } // Check InterruptAfter if config != nil && len(config.InterruptAfter) > 0 { for _, node := range nodesRan { if slices.Contains(config.InterruptAfter, node) { return state, &GraphInterrupt{ Node: node, State: state, NextNodes: nextNodesList, } } } } } // End graph tracing if r.tracer != nil && graphSpan != nil { r.tracer.EndSpan(ctx, graphSpan, state, nil) } // Notify callbacks of graph end if config != nil && len(config.Callbacks) > 0 { outputs := convertStateToMap(state) for _, cb := range config.Callbacks { cb.OnChainEnd(ctx, outputs, runID) } } return state, nil } // executeNodeWithRetry executes a node with retry logic based on the retry policy. func (r *StateRunnable[S]) executeNodeWithRetry(ctx context.Context, node TypedNode[S], state S) (S, error) { var lastErr error var zero S maxRetries := 1 // Default: no retries if r.graph.retryPolicy != nil { maxRetries = r.graph.retryPolicy.MaxRetries + 1 // +1 for initial attempt } for attempt := 0; attempt < maxRetries; attempt++ { var result S var err error if r.nodeRunner != nil { result, err = r.nodeRunner(ctx, node.Name, state) } else { result, err = node.Function(ctx, state) } if err == nil { return result, nil } // For NodeInterrupt, return the result along with the error // so that state updates made before the interrupt are preserved var nodeInterrupt *NodeInterrupt if errors.As(err, &nodeInterrupt) { return result, err } lastErr = err // Check if error is retryable if r.graph.retryPolicy != nil && attempt < maxRetries-1 { if r.isRetryableError(err) { // Apply backoff strategy delay := r.calculateBackoffDelay(attempt) if delay > 0 { select { case <-time.After(delay): // Continue with retry after delay case <-ctx.Done(): // Context cancelled, return immediately return zero, ctx.Err() } } continue } } // If not retryable or max retries reached, return error break } return zero, lastErr } // isRetryableError checks if an error is retryable based on the retry policy. func (r *StateRunnable[S]) isRetryableError(err error) bool { if r.graph.retryPolicy == nil { return false } errorStr := err.Error() for _, retryablePattern := range r.graph.retryPolicy.RetryableErrors { if strings.Contains(errorStr, retryablePattern) { return true } } return false } // calculateBackoffDelay calculates the delay for retry based on the backoff strategy. func (r *StateRunnable[S]) calculateBackoffDelay(attempt int) time.Duration { if r.graph.retryPolicy == nil { return 0 } baseDelay := time.Second // Default 1 second base delay switch r.graph.retryPolicy.BackoffStrategy { case FixedBackoff: return baseDelay case ExponentialBackoff: // Exponential backoff: 1s, 2s, 4s, 8s, ... return baseDelay * time.Duration(1<<attempt) case LinearBackoff: // Linear backoff: 1s, 2s, 3s, 4s, ... return baseDelay * time.Duration(attempt+1) default: return baseDelay } } // executeNodesParallel executes valid nodes in parallel and returns their results or errors. func (r *StateRunnable[S]) executeNodesParallel(ctx context.Context, nodes []string, state S, config *Config, runID string) ([]S, []error) { var wg sync.WaitGroup results := make([]S, len(nodes)) errorsList := make([]error, len(nodes)) for i, nodeName := range nodes { node, ok := r.graph.nodes[nodeName] if !ok { errorsList[i] = fmt.Errorf("%w: %s", ErrNodeNotFound, nodeName) continue } // Prepare variables for closure idx := i n := node name := nodeName SafeGo(&wg, func() { // Start node tracing var nodeSpan *TraceSpan if r.tracer != nil { nodeSpan = r.tracer.StartSpan(ctx, TraceEventNodeStart, name) nodeSpan.State = state } var err error var res S // Execute node with retry logic res, err = r.executeNodeWithRetry(ctx, n, state) // End node tracing if r.tracer != nil && nodeSpan != nil { if err != nil { r.tracer.EndSpan(ctx, nodeSpan, res, err) // Also emit error event errorSpan := r.tracer.StartSpan(ctx, TraceEventNodeError, name) errorSpan.Error = err errorSpan.State = res r.tracer.EndSpan(ctx, errorSpan, res, err) } else { r.tracer.EndSpan(ctx, nodeSpan, res, nil) } } if err != nil { var nodeInterrupt *NodeInterrupt if errors.As(err, &nodeInterrupt) { nodeInterrupt.Node = name // For NodeInterrupt, save the result so state updates are preserved results[idx] = res } errorsList[idx] = fmt.Errorf("error in node %s: %w", name, err) return } results[idx] = res // Notify callbacks of node execution (as tool) if config != nil && len(config.Callbacks) > 0 { nodeRunID := generateRunID() serialized := map[string]any{ "name": name, "type": "tool", } for _, cb := range config.Callbacks { cb.OnToolStart(ctx, serialized, convertStateToString(res), nodeRunID, &runID, config.Tags, config.Metadata) cb.OnToolEnd(ctx, convertStateToString(res), nodeRunID) } } }, func(panicVal any) { errorsList[idx] = fmt.Errorf("panic in node %s: %v", name, panicVal) }) } wg.Wait() return results, errorsList } // processNodeResults processes the raw results from nodes, handling Commands. func (r *StateRunnable[S]) processNodeResults(results []S) ([]S, []string) { var nextNodesFromCommands []string processedResults := make([]S, len(results)) for i, res := range results { // Try to type assert to *Command if cmd, ok := any(res).(*Command); ok { // It's a Command - extract Update and Goto if cmd.Update != nil { // Try to convert Update to S type if updateS, ok := cmd.Update.(S); ok { processedResults[i] = updateS } else { // If Update cannot be converted to S, use zero value // This maintains type safety while handling the conversion failure var zero S processedResults[i] = zero } } else { // If Update is nil, use zero value var zero S processedResults[i] = zero } // Extract Goto to determine next nodes if cmd.Goto != nil { switch g := cmd.Goto.(type) { case string: nextNodesFromCommands = append(nextNodesFromCommands, g) case []string: nextNodesFromCommands = append(nextNodesFromCommands, g...) } } } else { // Regular result - not a Command processedResults[i] = res } } return processedResults, nextNodesFromCommands } // mergeState merges the processed results into the current state. func (r *StateRunnable[S]) mergeState(ctx context.Context, currentState S, results []S) (S, error) { state := currentState if r.graph.Schema != nil { // If Schema is defined, use it to update state with results for _, res := range results { var err error state, err = r.graph.Schema.Update(state, res) if err != nil { var zero S return zero, fmt.Errorf("schema update failed: %w", err) } } } else if r.graph.stateMerger != nil { var err error state, err = r.graph.stateMerger(ctx, state, results) if err != nil { var zero S return zero, fmt.Errorf("state merge failed: %w", err) } } else { if len(results) > 0 { state = results[len(results)-1] } } return state, nil } // determineNextNodes determines the next nodes to execute based on static edges, conditional edges, or commands. func (r *StateRunnable[S]) determineNextNodes(ctx context.Context, currentNodes []string, state S, nextNodesFromCommands []string) ([]string, error) { var nextNodesList []string if len(nextNodesFromCommands) > 0 { // Command.Goto overrides static edges // We deduplicate seen := make(map[string]bool) for _, n := range nextNodesFromCommands { if !seen[n] && n != END { seen[n] = true nextNodesList = append(nextNodesList, n) } } } else { // Use static edges nextNodesSet := make(map[string]bool) for _, nodeName := range currentNodes { // First check for conditional edges nextNodeFn, hasConditional := r.graph.conditionalEdges[nodeName] if hasConditional { nextNode := nextNodeFn(ctx, state) if nextNode == "" { var zero S _ = zero return nil, fmt.Errorf("conditional edge returned empty next node from %s", nodeName) } nextNodesSet[nextNode] = true } else { // Then check regular edges foundNext := false for _, edge := range r.graph.edges { if edge.From == nodeName { nextNodesSet[edge.To] = true foundNext = true // Do NOT break here, to allow fan-out (multiple edges from same node) } } if !foundNext { return nil, fmt.Errorf("%w: %s", ErrNoOutgoingEdge, nodeName) } } } // Update nextNodesList from set for node := range nextNodesSet { nextNodesList = append(nextNodesList, node) } } return nextNodesList, nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/doc.go
graph/doc.go
// Package graph provides the core graph construction and execution engine for LangGraph Go. // // This package implements the fundamental building blocks for creating stateful, multi-agent applications // using directed graphs. It offers both untyped and typed interfaces for building workflows, // with support for parallel execution, checkpointing, streaming, and comprehensive event handling. // // # Core Concepts // // StateGraph // The primary component for building graphs is StateGraph, which maintains state as it flows // through nodes. Each node can process and transform the state before passing it to the next node // based on defined edges. // // Nodes and Edges // Nodes represent processing units (functions, agents, tools) that transform state. // Edges define the flow between nodes, supporting conditional routing based on state content. // // Typed Support // For type safety, the package provides StateGraph[S] which uses Go generics to enforce // state types at compile time, reducing runtime errors and improving code maintainability. // // # Key Features // // - Parallel node execution with coordination // - Checkpointing for durable execution with resume capability // - Streaming for real-time event monitoring // - Comprehensive listener system for observability // - Built-in retry mechanisms with configurable policies // - Subgraph composition for modular design // - Graph visualization (Mermaid, ASCII, DOT) // - Interrupt support for human-in-the-loop workflows // // # Example Usage // // Basic State Graph // // g := graph.NewStateGraph() // // // Add nodes // g.AddNode("process", "Process node", func(ctx context.Context, state any) (any, error) { // // Process the state // s := state.(map[string]any) // s["processed"] = true // return s, nil // }) // // g.AddNode("validate", "Validate node", func(ctx context.Context, state any) (any, error) { // // Validate the processed state // s := state.(map[string]any) // if s["processed"].(bool) { // s["valid"] = true // } // return s, nil // }) // // // Set entry point and edges // g.SetEntryPoint("process") // g.AddEdge("process", "validate") // g.AddEdge("validate", graph.END) // // // Compile and run // runnable := g.Compile() // result, err := runnable.Invoke(context.Background(), map[string]any{ // "data": "example", // }) // // Typed State Graph // // type WorkflowState struct { // Input string `json:"input"` // Output string `json:"output"` // Complete bool `json:"complete"` // } // // g := graph.NewStateGraph[WorkflowState]() // // g.AddNode("process", "Process the input", func(ctx context.Context, state WorkflowState) (WorkflowState, error) { // state.Output = strings.ToUpper(state.Input) // state.Complete = true // return state, nil // }) // // // Add validate node // g.AddNode("validate", "Validate the output", func(ctx context.Context, state WorkflowState) (WorkflowState, error) { // return state, nil // }) // g.AddNode("retry", "Retry processing", func(ctx context.Context, state WorkflowState) (WorkflowState, error) { // return state, nil // }) // // // Conditional routing // g.AddConditionalEdge("process", func(ctx context.Context, state WorkflowState) string { // if state.Complete { // return "validate" // } // return "retry" // }) // g.AddEdge("validate", graph.END) // g.AddEdge("retry", "process") // // Parallel Execution // // // Add parallel nodes // g.AddParallelNodes("parallel_tasks", map[string]func(context.Context, any) (any, error){ // "task1": func(ctx context.Context, state any) (any, error) { // // First task logic // return state, nil // }, // "task2": func(ctx context.Context, state any) (any, error) { // // Second task logic // return state, nil // }, // }) // // Checkpointing // // // Note: Checkpointing is handled at the runnable level // // See store package examples for checkpointing implementation // // runnable := g.Compile() // // // Execute with context // result, err := runnable.Invoke(context.Background(), initialState) // // Streaming // // // Create listenable graph for streaming // g := graph.NewListenableStateGraph() // g.AddNode("process", "Process node", func(ctx context.Context, state map[string]any) (map[string]any, error) { // state["processed"] = true // return state, nil // }) // g.SetEntryPoint("process") // g.AddEdge("process", graph.END) // // // Compile to listenable runnable // runnable, _ := g.CompileListenable() // // // Create streaming runnable // streaming := graph.NewStreamingRunnableWithDefaults(runnable) // // // Stream execution // result := streaming.Stream(context.Background(), initialState) // // // Process events // for event := range result.Events { // fmt.Printf("Event: %v\n", event) // } // // # Listener System // // The package provides a powerful listener system for monitoring and reacting to graph events: // // - ProgressListener: Track execution progress // - LoggingListener: Structured logging of events // - MetricsListener: Collect performance metrics // - ChatListener: Chat-style output formatting // - Custom listeners: Implement NodeListener interface // // Error Handling // // - Built-in retry policies with exponential backoff // - Custom error filtering for selective retries // - Interrupt handling for pausing execution // - Comprehensive error context in events // // # Visualization // // Export graphs for documentation and debugging: // // exporter := graph.NewExporter(g) // // // Mermaid diagram // mermaid := exporter.DrawMermaid() // // // Mermaid with options // mermaidWithOptions := exporter.DrawMermaidWithOptions(graph.MermaidOptions{ // Direction: "LR", // Left to right // }) // // # Thread Safety // // All graph structures are thread-safe for read operations. Write operations (adding nodes, // edges, or listeners) should be performed before compilation or protected by external synchronization. // // Best Practices // // 1. Use typed graphs when possible for better type safety // 2. Set appropriate buffer sizes for streaming to balance memory and performance // 3. Implement proper error handling in node functions // 4. Use checkpoints for long-running or critical workflows // 5. Add listeners for debugging and monitoring // 6. Keep node functions pure and stateless when possible // 7. Use conditional edges for complex routing logic // 8. Leverage parallel execution for independent tasks package graph
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/utils.go
graph/utils.go
package graph import ( "context" "encoding/json" "fmt" "sync" "github.com/google/uuid" ) // generateRunID generates a unique run ID for callbacks func generateRunID() string { return uuid.New().String() } // convertStateToMap converts a state to a map for callbacks func convertStateToMap(state any) map[string]any { // Try to convert to map directly if m, ok := state.(map[string]any); ok { return m } // Try to marshal/unmarshal through JSON data, err := json.Marshal(state) if err != nil { return map[string]any{ "state": fmt.Sprintf("%v", state), } } var result map[string]any if err := json.Unmarshal(data, &result); err != nil { return map[string]any{ "state": string(data), } } return result } // convertStateToString converts a state to a string for callbacks func convertStateToString(state any) string { // Try to marshal to JSON data, err := json.Marshal(state) if err != nil { return fmt.Sprintf("%v", state) } return string(data) } type configKey struct{} // WithConfig adds the config to the context func WithConfig(ctx context.Context, config *Config) context.Context { return context.WithValue(ctx, configKey{}, config) } // GetConfig retrieves the config from the context func GetConfig(ctx context.Context) *Config { if config, ok := ctx.Value(configKey{}).(*Config); ok { return config } return nil } // SafeGo runs a function in a goroutine with panic recovery. // It uses a WaitGroup (if provided) and supports a custom panic handler. func SafeGo(wg *sync.WaitGroup, fn func(), onPanic func(any)) { if wg != nil { wg.Add(1) } go func() { defer func() { // Handle panic first, before calling wg.Done() // This ensures onPanic completes before wg.Wait() returns if r := recover(); r != nil { if onPanic != nil { onPanic(r) } else { fmt.Printf("panic recovered in SafeGo: %v\n", r) } } // Call wg.Done() last to ensure all cleanup is done if wg != nil { wg.Done() } }() fn() }() }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/schema.go
graph/schema.go
package graph import ( "fmt" "maps" "reflect" ) // StateSchema defines the structure and update logic for the graph state with type safety. type StateSchema[S any] interface { // Init returns the initial state. Init() S // Update merges the new state into the current state. Update(current, new S) (S, error) } // StructSchema implements StateSchema for struct-based states. // It provides a simple and type-safe way to manage struct states. // // Example: // // type MyState struct { // Count int // Logs []string // } // // schema := graph.NewStructSchema( // MyState{Count: 0}, // func(current, new MyState) (MyState, error) { // // Merge logs (append) // current.Logs = append(current.Logs, new.Logs...) // // Add counts // current.Count += new.Count // return current, nil // }, // ) type StructSchema[S any] struct { InitialValue S MergeFunc func(current, new S) (S, error) } // NewStructSchema creates a new StructSchema with the given initial value and merge function. // If merge function is nil, a default merge function will be used that overwrites non-zero fields. func NewStructSchema[S any](initial S, merge func(S, S) (S, error)) *StructSchema[S] { if merge == nil { merge = DefaultStructMerge[S] } return &StructSchema[S]{ InitialValue: initial, MergeFunc: merge, } } // Init returns the initial state. func (s *StructSchema[S]) Init() S { return s.InitialValue } // Update merges the new state into the current state using the merge function. func (s *StructSchema[S]) Update(current, new S) (S, error) { if s.MergeFunc != nil { return s.MergeFunc(current, new) } // Default: return new state return new, nil } // DefaultStructMerge provides a default merge function for struct states. // It uses reflection to merge non-zero fields from new into current. // This is a sensible default for most struct types. func DefaultStructMerge[S any](current, new S) (S, error) { // Use reflection to merge non-zero fields from new into current currentVal := reflect.ValueOf(&current).Elem() newVal := reflect.ValueOf(new) // Check if S is a struct if currentVal.Kind() != reflect.Struct { // For non-struct types, just return new return new, nil } for i := 0; i < newVal.NumField(); i++ { fieldNew := newVal.Field(i) if !fieldNew.IsZero() { currentField := currentVal.Field(i) if currentField.CanSet() { currentField.Set(fieldNew) } } } return current, nil } // OverwriteStructMerge is a merge function that completely replaces the current state with the new state. func OverwriteStructMerge[S any](current, new S) (S, error) { return new, nil } // FieldMerger provides fine-grained control over how individual struct fields are merged. type FieldMerger[S any] struct { InitialValue S FieldMergeFns map[string]func(currentVal, newVal reflect.Value) reflect.Value } // NewFieldMerger creates a new FieldMerger with the given initial value. func NewFieldMerger[S any](initial S) *FieldMerger[S] { return &FieldMerger[S]{ InitialValue: initial, FieldMergeFns: make(map[string]func(currentVal, newVal reflect.Value) reflect.Value), } } // RegisterFieldMerge registers a custom merge function for a specific field. func (fm *FieldMerger[S]) RegisterFieldMerge(fieldName string, mergeFn func(currentVal, newVal reflect.Value) reflect.Value) { fm.FieldMergeFns[fieldName] = mergeFn } // Init returns the initial state. func (fm *FieldMerger[S]) Init() S { return fm.InitialValue } // Update merges the new state into the current state using registered field merge functions. func (fm *FieldMerger[S]) Update(current, new S) (S, error) { currentVal := reflect.ValueOf(&current).Elem() newVal := reflect.ValueOf(new) if currentVal.Kind() != reflect.Struct { return new, fmt.Errorf("FieldMerger only works with struct types") } structType := currentVal.Type() for i := 0; i < structType.NumField(); i++ { field := structType.Field(i) fieldName := field.Name currentFieldVal := currentVal.Field(i) newFieldVal := newVal.Field(i) // Check if there's a custom merge function for this field if mergeFn, ok := fm.FieldMergeFns[fieldName]; ok { if currentFieldVal.CanSet() { mergedVal := mergeFn(currentFieldVal, newFieldVal) currentFieldVal.Set(mergedVal) } } else { // Default: overwrite if new value is non-zero if !newFieldVal.IsZero() && currentFieldVal.CanSet() { currentFieldVal.Set(newFieldVal) } } } return current, nil } // Common merge helpers for FieldMerger // AppendSliceMerge appends new slice to current slice. func AppendSliceMerge(current, new reflect.Value) reflect.Value { if current.Kind() != reflect.Slice || new.Kind() != reflect.Slice { return new } return reflect.AppendSlice(current, new) } // SumIntMerge adds two integer values. func SumIntMerge(current, new reflect.Value) reflect.Value { if current.Kind() == reflect.Int && new.Kind() == reflect.Int { return reflect.ValueOf(current.Int() + new.Int()).Convert(current.Type()) } return new } // OverwriteMerge always uses the new value. func OverwriteMerge(current, new reflect.Value) reflect.Value { return new } // KeepCurrentMerge always keeps the current value (ignores new). func KeepCurrentMerge(current, new reflect.Value) reflect.Value { return current } // MaxIntMerge takes the maximum of two integer values. func MaxIntMerge(current, new reflect.Value) reflect.Value { if current.Kind() == reflect.Int && new.Kind() == reflect.Int { if current.Int() > new.Int() { return current } } return new } // MinIntMerge takes the minimum of two integer values. func MinIntMerge(current, new reflect.Value) reflect.Value { if current.Kind() == reflect.Int && new.Kind() == reflect.Int { if current.Int() < new.Int() { return current } } return new } // Reducer defines how a state value should be updated. // It takes the current value and the new value, and returns the merged value. type Reducer func(current, new any) (any, error) // MapSchema implements StateSchema for map[string]any. // It allows defining reducers for specific keys. type MapSchema struct { Reducers map[string]Reducer } // NewMapSchema creates a new MapSchema. func NewMapSchema() *MapSchema { return &MapSchema{ Reducers: make(map[string]Reducer), } } // RegisterReducer adds a reducer for a specific key. func (s *MapSchema) RegisterReducer(key string, reducer Reducer) { s.Reducers[key] = reducer } // Init returns an empty map. func (s *MapSchema) Init() map[string]any { return make(map[string]any) } // Update merges the new map into the current map using registered reducers. func (s *MapSchema) Update(current, new map[string]any) (map[string]any, error) { if current == nil { current = make(map[string]any) } // Create a copy of the current map to avoid mutating it directly result := make(map[string]any, len(current)) maps.Copy(result, current) for k, v := range new { if reducer, ok := s.Reducers[k]; ok { // Use reducer currVal := result[k] mergedVal, err := reducer(currVal, v) if err != nil { return nil, fmt.Errorf("failed to reduce key %s: %w", k, err) } result[k] = mergedVal } else { // Default: Overwrite result[k] = v } } return result, nil } // Common Reducers // OverwriteReducer replaces the old value with the new one. func OverwriteReducer(current, new any) (any, error) { return new, nil } // AppendReducer appends the new value to the current slice. // It supports appending a slice to a slice, or a single element to a slice. func AppendReducer(current, new any) (any, error) { if current == nil { // If current is nil, start a new slice // We need to know the type? We can infer from new. newVal := reflect.ValueOf(new) if newVal.Kind() == reflect.Slice { return new, nil } // Create slice of type of new sliceType := reflect.SliceOf(reflect.TypeOf(new)) slice := reflect.MakeSlice(sliceType, 0, 1) slice = reflect.Append(slice, newVal) return slice.Interface(), nil } currVal := reflect.ValueOf(current) newVal := reflect.ValueOf(new) if currVal.Kind() != reflect.Slice { return nil, fmt.Errorf("current value is not a slice") } if newVal.Kind() == reflect.Slice { // Append slice to slice if currVal.Type().Elem() != newVal.Type().Elem() { // Types don't match, convert both to []any result := make([]any, 0, currVal.Len()+newVal.Len()) for i := 0; i < currVal.Len(); i++ { result = append(result, currVal.Index(i).Interface()) } for i := 0; i < newVal.Len(); i++ { result = append(result, newVal.Index(i).Interface()) } return result, nil } return reflect.AppendSlice(currVal, newVal).Interface(), nil } // Append single element return reflect.Append(currVal, newVal).Interface(), nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/graph_test.go
graph/graph_test.go
package graph_test import ( "context" "errors" "fmt" "os" "testing" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/llms/openai" ) func ExampleStateGraph() { // Skip if no OpenAI API key is available if os.Getenv("OPENAI_API_KEY") == "" { fmt.Println("[{human [{What is 1 + 1?}]} {ai [{1 + 1 equals 2.}]}]") return } model, err := openai.New() if err != nil { panic(err) } g := graph.NewStateGraph[[]llms.MessageContent]() g.AddNode("oracle", "oracle", func(ctx context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) { r, err := model.GenerateContent(ctx, state, llms.WithTemperature(0.0)) if err != nil { return nil, err } return append(state, llms.TextParts("ai", r.Choices[0].Content), ), nil }) g.AddNode(graph.END, graph.END, func(_ context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) { return state, nil }) g.AddEdge("oracle", graph.END) g.SetEntryPoint("oracle") runnable, err := g.Compile() if err != nil { panic(err) } ctx := context.Background() // Let's run it! res, err := runnable.Invoke(ctx, []llms.MessageContent{ llms.TextParts("human", "What is 1 + 1?"), }) if err != nil { panic(err) } fmt.Println(res) } func TestStateGraph(t *testing.T) { t.Parallel() testCases := []struct { name string buildGraph func() *graph.StateGraph[[]llms.MessageContent] inputMessages []llms.MessageContent expectedOutput []llms.MessageContent expectedError error }{ { name: "Simple graph", buildGraph: func() *graph.StateGraph[[]llms.MessageContent] { g := graph.NewStateGraph[[]llms.MessageContent]() g.AddNode("node1", "node1", func(_ context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) { return append(state, llms.TextParts("ai", "Node 1")), nil }) g.AddNode("node2", "node2", func(_ context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) { return append(state, llms.TextParts("ai", "Node 2")), nil }) g.AddEdge("node1", "node2") g.AddEdge("node2", graph.END) g.SetEntryPoint("node1") return g }, inputMessages: []llms.MessageContent{llms.TextParts("human", "Input")}, expectedOutput: []llms.MessageContent{ llms.TextParts("human", "Input"), llms.TextParts("ai", "Node 1"), llms.TextParts("ai", "Node 2"), }, expectedError: nil, }, { name: "Entry point not set", buildGraph: func() *graph.StateGraph[[]llms.MessageContent] { g := graph.NewStateGraph[[]llms.MessageContent]() g.AddNode("node1", "node1", func(_ context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) { return state, nil }) return g }, expectedError: graph.ErrEntryPointNotSet, }, { name: "Node not found", buildGraph: func() *graph.StateGraph[[]llms.MessageContent] { g := graph.NewStateGraph[[]llms.MessageContent]() g.AddNode("node1", "node1", func(_ context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) { return state, nil }) g.AddEdge("node1", "node2") g.SetEntryPoint("node1") return g }, expectedError: fmt.Errorf("%w: node2", graph.ErrNodeNotFound), }, { name: "No outgoing edge", buildGraph: func() *graph.StateGraph[[]llms.MessageContent] { g := graph.NewStateGraph[[]llms.MessageContent]() g.AddNode("node1", "node1", func(_ context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) { return state, nil }) g.SetEntryPoint("node1") return g }, expectedError: fmt.Errorf("%w: node1", graph.ErrNoOutgoingEdge), }, { name: "Error in node function", buildGraph: func() *graph.StateGraph[[]llms.MessageContent] { g := graph.NewStateGraph[[]llms.MessageContent]() g.AddNode("node1", "node1", func(_ context.Context, _ []llms.MessageContent) ([]llms.MessageContent, error) { return nil, errors.New("node error") }) g.AddEdge("node1", graph.END) g.SetEntryPoint("node1") return g }, expectedError: errors.New("error in node node1: node error"), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { t.Parallel() g := tc.buildGraph() runnable, err := g.Compile() if err != nil { if tc.expectedError == nil || !errors.Is(err, tc.expectedError) { t.Fatalf("unexpected compile error: %v", err) } return } output, err := runnable.Invoke(context.Background(), tc.inputMessages) if err != nil { if tc.expectedError == nil || err.Error() != tc.expectedError.Error() { t.Fatalf("unexpected invoke error: '%v', expected '%v'", err, tc.expectedError) } return } if tc.expectedError != nil { t.Fatalf("expected error %v, but got nil", tc.expectedError) } if len(output) != len(tc.expectedOutput) { t.Fatalf("expected output length %d, but got %d", len(tc.expectedOutput), len(output)) } for i, msg := range output { got := fmt.Sprint(msg) expected := fmt.Sprint(tc.expectedOutput[i]) if got != expected { t.Errorf("expected output[%d] content %q, but got %q", i, expected, got) } } }) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/builtin_listeners.go
graph/builtin_listeners.go
package graph import ( "context" "fmt" "io" "log" "maps" "os" "sync" "time" ) // ProgressListener provides progress tracking with customizable output type ProgressListener struct { writer io.Writer nodeSteps map[string]string mutex sync.RWMutex showTiming bool showDetails bool prefix string } // NewProgressListener creates a new progress listener func NewProgressListener() *ProgressListener { return &ProgressListener{ writer: os.Stdout, nodeSteps: make(map[string]string), showTiming: true, showDetails: false, prefix: "🔄", } } // NewProgressListenerWithWriter creates a progress listener with custom writer func NewProgressListenerWithWriter(writer io.Writer) *ProgressListener { return &ProgressListener{ writer: writer, nodeSteps: make(map[string]string), showTiming: true, showDetails: false, prefix: "🔄", } } // WithTiming enables or disables timing information func (pl *ProgressListener) WithTiming(enabled bool) *ProgressListener { pl.showTiming = enabled return pl } // WithDetails enables or disables detailed output func (pl *ProgressListener) WithDetails(enabled bool) *ProgressListener { pl.showDetails = enabled return pl } // WithPrefix sets a custom prefix for progress messages func (pl *ProgressListener) WithPrefix(prefix string) *ProgressListener { pl.prefix = prefix return pl } // SetNodeStep sets a custom message for a specific node func (pl *ProgressListener) SetNodeStep(nodeName, step string) { pl.mutex.Lock() defer pl.mutex.Unlock() pl.nodeSteps[nodeName] = step } // OnNodeEvent implements the NodeListener[map[string]any] interface func (pl *ProgressListener) OnNodeEvent(_ context.Context, event NodeEvent, nodeName string, state map[string]any, err error) { pl.mutex.RLock() customStep, hasCustom := pl.nodeSteps[nodeName] pl.mutex.RUnlock() var message string switch event { case NodeEventStart: if hasCustom { message = fmt.Sprintf("%s %s", pl.prefix, customStep) } else { message = fmt.Sprintf("%s Starting %s", pl.prefix, nodeName) } case NodeEventComplete: emoji := "✅" if hasCustom { message = fmt.Sprintf("%s %s completed", emoji, customStep) } else { message = fmt.Sprintf("%s %s completed", emoji, nodeName) } case NodeEventError: emoji := "❌" message = fmt.Sprintf("%s %s failed: %v", emoji, nodeName, err) case NodeEventProgress: if hasCustom { message = fmt.Sprintf("%s %s (in progress)", pl.prefix, customStep) } else { message = fmt.Sprintf("%s %s (in progress)", pl.prefix, nodeName) } } if pl.showTiming { timestamp := time.Now().Format("15:04:05") message = fmt.Sprintf("[%s] %s", timestamp, message) } if pl.showDetails && state != nil { message = fmt.Sprintf("%s | State: %v", message, state) } fmt.Fprintln(pl.writer, message) } // LoggingListener provides structured logging for node events type LoggingListener struct { logger *log.Logger logLevel LogLevel includeState bool } // LogLevel defines logging levels type LogLevel int const ( LogLevelDebug LogLevel = iota LogLevelInfo LogLevelWarn LogLevelError ) // NewLoggingListener creates a new logging listener func NewLoggingListener() *LoggingListener { return &LoggingListener{ logger: log.New(os.Stdout, "[GRAPH] ", log.LstdFlags), logLevel: LogLevelInfo, includeState: false, } } // NewLoggingListenerWithLogger creates a logging listener with custom logger func NewLoggingListenerWithLogger(logger *log.Logger) *LoggingListener { return &LoggingListener{ logger: logger, logLevel: LogLevelInfo, includeState: false, } } // WithLogLevel sets the minimum log level func (ll *LoggingListener) WithLogLevel(level LogLevel) *LoggingListener { ll.logLevel = level return ll } // WithState enables or disables state logging func (ll *LoggingListener) WithState(enabled bool) *LoggingListener { ll.includeState = enabled return ll } // OnNodeEvent implements the NodeListener[map[string]any] interface func (ll *LoggingListener) OnNodeEvent(_ context.Context, event NodeEvent, nodeName string, state map[string]any, err error) { var level LogLevel var prefix string switch event { case NodeEventStart: level = LogLevelInfo prefix = "START" case NodeEventComplete: level = LogLevelInfo prefix = "COMPLETE" case NodeEventProgress: level = LogLevelDebug prefix = "PROGRESS" case NodeEventError: level = LogLevelError prefix = "ERROR" } if level < ll.logLevel { return } message := fmt.Sprintf("%s %s", prefix, nodeName) if err != nil { message = fmt.Sprintf("%s: %v", message, err) } if ll.includeState && state != nil { message = fmt.Sprintf("%s | State: %v", message, state) } ll.logger.Println(message) } // MetricsListener collects performance and execution metrics type MetricsListener struct { mutex sync.RWMutex nodeExecutions map[string]int nodeDurations map[string][]time.Duration nodeErrors map[string]int totalExecutions int startTimes map[string]time.Time } // NewMetricsListener creates a new metrics listener func NewMetricsListener() *MetricsListener { return &MetricsListener{ nodeExecutions: make(map[string]int), nodeDurations: make(map[string][]time.Duration), nodeErrors: make(map[string]int), startTimes: make(map[string]time.Time), } } // OnNodeEvent implements the NodeListener[map[string]any] interface func (ml *MetricsListener) OnNodeEvent(_ context.Context, event NodeEvent, nodeName string, _ map[string]any, _ error) { ml.mutex.Lock() defer ml.mutex.Unlock() switch event { case NodeEventStart: ml.startTimes[nodeName] = time.Now() ml.totalExecutions++ case NodeEventComplete: ml.nodeExecutions[nodeName]++ if startTime, ok := ml.startTimes[nodeName]; ok { duration := time.Since(startTime) ml.nodeDurations[nodeName] = append(ml.nodeDurations[nodeName], duration) delete(ml.startTimes, nodeName) } case NodeEventError: ml.nodeErrors[nodeName]++ if startTime, ok := ml.startTimes[nodeName]; ok { duration := time.Since(startTime) ml.nodeDurations[nodeName] = append(ml.nodeDurations[nodeName], duration) delete(ml.startTimes, nodeName) } case NodeEventProgress: // Progress events are tracked but don't affect timing metrics } } // GetNodeExecutions returns the number of executions for each node func (ml *MetricsListener) GetNodeExecutions() map[string]int { ml.mutex.RLock() defer ml.mutex.RUnlock() result := make(map[string]int) maps.Copy(result, ml.nodeExecutions) return result } // GetNodeErrors returns the number of errors for each node func (ml *MetricsListener) GetNodeErrors() map[string]int { ml.mutex.RLock() defer ml.mutex.RUnlock() result := make(map[string]int) maps.Copy(result, ml.nodeErrors) return result } // GetNodeAverageDuration returns the average duration for each node func (ml *MetricsListener) GetNodeAverageDuration() map[string]time.Duration { ml.mutex.RLock() defer ml.mutex.RUnlock() result := make(map[string]time.Duration) for nodeName, durations := range ml.nodeDurations { if len(durations) > 0 { var total time.Duration for _, d := range durations { total += d } result[nodeName] = total / time.Duration(len(durations)) } } return result } // GetTotalExecutions returns the total number of node executions func (ml *MetricsListener) GetTotalExecutions() int { ml.mutex.RLock() defer ml.mutex.RUnlock() return ml.totalExecutions } // PrintSummary prints a summary of collected metrics func (ml *MetricsListener) PrintSummary(writer io.Writer) { ml.mutex.RLock() defer ml.mutex.RUnlock() if writer == nil { writer = os.Stdout } fmt.Fprintln(writer, "\n=== Node Execution Metrics ===") fmt.Fprintf(writer, "Total Executions: %d\n", ml.totalExecutions) fmt.Fprintln(writer) fmt.Fprintln(writer, "Node Executions:") for nodeName, count := range ml.nodeExecutions { fmt.Fprintf(writer, " %s: %d\n", nodeName, count) } fmt.Fprintln(writer) fmt.Fprintln(writer, "Average Durations:") for nodeName, durations := range ml.nodeDurations { if len(durations) > 0 { var total time.Duration for _, d := range durations { total += d } avg := total / time.Duration(len(durations)) fmt.Fprintf(writer, " %s: %v (from %d samples)\n", nodeName, avg, len(durations)) } } if len(ml.nodeErrors) > 0 { fmt.Fprintln(writer) fmt.Fprintln(writer, "Errors:") for nodeName, count := range ml.nodeErrors { fmt.Fprintf(writer, " %s: %d errors\n", nodeName, count) } } } // Reset clears all collected metrics func (ml *MetricsListener) Reset() { ml.mutex.Lock() defer ml.mutex.Unlock() ml.nodeExecutions = make(map[string]int) ml.nodeDurations = make(map[string][]time.Duration) ml.nodeErrors = make(map[string]int) ml.startTimes = make(map[string]time.Time) ml.totalExecutions = 0 } // ChatListener provides real-time chat-style updates type ChatListener struct { writer io.Writer nodeMessages map[string]string mutex sync.RWMutex showTime bool } // NewChatListener creates a new chat-style listener func NewChatListener() *ChatListener { return &ChatListener{ writer: os.Stdout, nodeMessages: make(map[string]string), showTime: true, } } // NewChatListenerWithWriter creates a chat listener with custom writer func NewChatListenerWithWriter(writer io.Writer) *ChatListener { return &ChatListener{ writer: writer, nodeMessages: make(map[string]string), showTime: true, } } // WithTime enables or disables timestamps func (cl *ChatListener) WithTime(enabled bool) *ChatListener { cl.showTime = enabled return cl } // SetNodeMessage sets a custom message for a specific node func (cl *ChatListener) SetNodeMessage(nodeName, message string) { cl.mutex.Lock() defer cl.mutex.Unlock() cl.nodeMessages[nodeName] = message } // OnNodeEvent implements the NodeListener[map[string]any] interface func (cl *ChatListener) OnNodeEvent(_ context.Context, event NodeEvent, nodeName string, _ map[string]any, err error) { cl.mutex.RLock() customMessage, hasCustom := cl.nodeMessages[nodeName] cl.mutex.RUnlock() var message string switch event { case NodeEventStart: if hasCustom { message = customMessage } else { message = fmt.Sprintf("🤖 Starting %s...", nodeName) } case NodeEventComplete: if hasCustom { message = fmt.Sprintf("✅ %s completed", customMessage) } else { message = fmt.Sprintf("✅ %s finished", nodeName) } case NodeEventError: message = fmt.Sprintf("❌ Error in %s: %v", nodeName, err) case NodeEventProgress: if hasCustom { message = fmt.Sprintf("⏳ %s...", customMessage) } else { message = fmt.Sprintf("⏳ %s in progress...", nodeName) } } if cl.showTime { timestamp := time.Now().Format("15:04:05") fmt.Fprintf(cl.writer, "[%s] %s\n", timestamp, message) } else { fmt.Fprintf(cl.writer, "%s\n", message) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/subgraph.go
graph/subgraph.go
package graph import ( "context" "fmt" ) // Subgraph represents a nested graph that can be used as a node type Subgraph[S any] struct { name string graph *StateGraph[S] runnable *StateRunnable[S] } // NewSubgraph creates a new generic subgraph func NewSubgraph[S any](name string, graph *StateGraph[S]) (*Subgraph[S], error) { runnable, err := graph.Compile() if err != nil { return nil, fmt.Errorf("failed to compile subgraph %s: %w", name, err) } return &Subgraph[S]{ name: name, graph: graph, runnable: runnable, }, nil } // Execute runs the subgraph as a node func (s *Subgraph[S]) Execute(ctx context.Context, state S) (S, error) { result, err := s.runnable.Invoke(ctx, state) if err != nil { var zero S return zero, fmt.Errorf("subgraph %s execution failed: %w", s.name, err) } return result, nil } // AddSubgraph adds a subgraph as a node in the parent graph func AddSubgraph[S, SubS any](g *StateGraph[S], name string, subgraph *StateGraph[SubS], converter func(S) SubS, resultConverter func(SubS) S) error { sg, err := NewSubgraph(name, subgraph) if err != nil { return err } // Wrap the execute function to match the state type wrappedFn := func(ctx context.Context, state S) (S, error) { // Convert S to SubS subState := converter(state) result, err := sg.Execute(ctx, subState) if err != nil { var zero S return zero, err } // Convert result back to S return resultConverter(result), nil } g.AddNode(name, "Subgraph: "+name, wrappedFn) return nil } // CreateSubgraph creates and adds a subgraph using a builder function func CreateSubgraph[S, SubS any](g *StateGraph[S], name string, builder func(*StateGraph[SubS]) error, converter func(S) SubS, resultConverter func(SubS) S) error { subgraph := NewStateGraph[SubS]() if err := builder(subgraph); err != nil { return err } return AddSubgraph(g, name, subgraph, converter, resultConverter) } // CompositeGraph allows composing multiple graphs together type CompositeGraph[S any] struct { graphs map[string]*StateGraph[S] main *StateGraph[S] } // NewCompositeGraph creates a new composite graph func NewCompositeGraph[S any]() *CompositeGraph[S] { return &CompositeGraph[S]{ graphs: make(map[string]*StateGraph[S]), main: NewStateGraph[S](), } } // AddGraph adds a named graph to the composite func (cg *CompositeGraph[S]) AddGraph(name string, graph *StateGraph[S]) { cg.graphs[name] = graph } // Connect connects two graphs with a transformation function func (cg *CompositeGraph[S]) Connect( fromGraph string, fromNode string, toGraph string, toNode string, transform func(S) S, ) error { // Create a bridge node that transforms state between graphs bridgeName := fmt.Sprintf("%s_%s_to_%s_%s", fromGraph, fromNode, toGraph, toNode) cg.main.AddNode(bridgeName, "Bridge: "+bridgeName, func(_ context.Context, state S) (S, error) { if transform != nil { return transform(state), nil } return state, nil }) return nil } // Compile compiles the composite graph into a single runnable func (cg *CompositeGraph[S]) Compile() (*StateRunnable[S], error) { // Add all subgraphs to the main graph for name, graph := range cg.graphs { if err := AddSubgraph(cg.main, name, graph, func(s S) S { return s }, func(s S) S { return s }); err != nil { return nil, fmt.Errorf("failed to add subgraph %s: %w", name, err) } } return cg.main.Compile() } // RecursiveSubgraph allows a subgraph to call itself recursively type RecursiveSubgraph[S any] struct { name string graph *StateGraph[S] maxDepth int condition func(S, int) bool // Should continue recursion? } // NewRecursiveSubgraph creates a new recursive subgraph func NewRecursiveSubgraph[S any]( name string, maxDepth int, condition func(S, int) bool, ) *RecursiveSubgraph[S] { return &RecursiveSubgraph[S]{ name: name, graph: NewStateGraph[S](), maxDepth: maxDepth, condition: condition, } } // Execute runs the recursive subgraph func (rs *RecursiveSubgraph[S]) Execute(ctx context.Context, state S) (S, error) { return rs.executeRecursive(ctx, state, 0) } func (rs *RecursiveSubgraph[S]) executeRecursive(ctx context.Context, state S, depth int) (S, error) { // Check max depth if depth >= rs.maxDepth { return state, nil } // Check condition if !rs.condition(state, depth) { return state, nil } // Compile and execute the graph runnable, err := rs.graph.Compile() if err != nil { var zero S return zero, fmt.Errorf("failed to compile recursive subgraph at depth %d: %w", depth, err) } result, err := runnable.Invoke(ctx, state) if err != nil { var zero S return zero, fmt.Errorf("recursive execution failed at depth %d: %w", depth, err) } // Recurse with the result return rs.executeRecursive(ctx, result, depth+1) } // AddRecursiveSubgraph adds a recursive subgraph to the parent graph func AddRecursiveSubgraph[S, SubS any]( g *StateGraph[S], name string, maxDepth int, condition func(SubS, int) bool, builder func(*StateGraph[SubS]) error, converter func(S) SubS, resultConverter func(SubS) S, ) error { rs := NewRecursiveSubgraph(name, maxDepth, condition) if err := builder(rs.graph); err != nil { return err } wrappedFn := func(ctx context.Context, state S) (S, error) { subState := converter(state) result, err := rs.Execute(ctx, subState) if err != nil { var zero S return zero, err } return resultConverter(result), nil } g.AddNode(name, "Recursive subgraph: "+name, wrappedFn) return nil } // AddNestedConditionalSubgraph creates a subgraph with its own conditional routing func AddNestedConditionalSubgraph[S, SubS any]( g *StateGraph[S], name string, router func(S) string, subgraphs map[string]*StateGraph[SubS], converter func(S) SubS, resultConverter func(SubS) S, ) error { // Create a wrapper node that routes to different subgraphs wrappedFn := func(ctx context.Context, state S) (S, error) { // Determine which subgraph to use subgraphName := router(state) subgraph, exists := subgraphs[subgraphName] if !exists { var zero S return zero, fmt.Errorf("subgraph %s not found", subgraphName) } // Convert state to SubS subState := converter(state) // Compile and execute the selected subgraph runnable, err := subgraph.Compile() if err != nil { var zero S return zero, fmt.Errorf("failed to compile subgraph %s: %w", subgraphName, err) } result, err := runnable.Invoke(ctx, subState) if err != nil { var zero S return zero, err } // Convert result back to S return resultConverter(result), nil } g.AddNode(name, "Nested conditional subgraph: "+name, wrappedFn) return nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/parallel_test.go
graph/parallel_test.go
package graph_test import ( "context" "fmt" "sync/atomic" "testing" "time" "github.com/smallnest/langgraphgo/graph" ) func TestParallelNodes(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() // Track execution order var counter int32 // Add parallel nodes parallelFuncs := make(map[string]func(context.Context, map[string]any) (map[string]any, error)) for i := range 5 { id := fmt.Sprintf("worker_%d", i) parallelFuncs[id] = func(workerID string) func(context.Context, map[string]any) (map[string]any, error) { return func(ctx context.Context, state map[string]any) (map[string]any, error) { // Simulate work time.Sleep(10 * time.Millisecond) atomic.AddInt32(&counter, 1) return map[string]any{"res": fmt.Sprintf("result_%s", workerID)}, nil } }(id) } // Merger function merger := func(results []map[string]any) map[string]any { // Combine results into a single map under "results" key res := make([]any, len(results)) for i, r := range results { res[i] = r["res"] } return map[string]any{"results": res} } g.AddParallelNodes("parallel_group", parallelFuncs, merger) g.AddEdge("parallel_group", graph.END) g.SetEntryPoint("parallel_group") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } start := time.Now() result, err := runnable.Invoke(context.Background(), map[string]any{"input": "input"}) duration := time.Since(start) if err != nil { t.Fatalf("Execution failed: %v", err) } // Check that all workers executed if atomic.LoadInt32(&counter) != 5 { t.Errorf("Expected 5 workers to execute, got %d", counter) } // Check that execution was parallel (should be faster than sequential) // 5 workers with 10ms each = 50ms sequential, should be ~10ms parallel if duration > 30*time.Millisecond { t.Logf("Warning: Parallel execution took %v, might not be parallel", duration) } // Check results t.Logf("Result: %v (type: %T)", result, result) var results []any if val, ok := result["results"]; ok { if arr, ok := val.([]any); ok { results = arr } } if results == nil { t.Errorf("Expected results to be present, got %v", result) return } if len(results) != 5 { t.Errorf("Expected 5 results, got %d", len(results)) } } func TestMapReduceNode(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() // Create map functions that process parts of data mapFuncs := map[string]func(context.Context, map[string]any) (map[string]any, error){ "map1": func(ctx context.Context, state map[string]any) (map[string]any, error) { nums := state["input"].([]int) sum := 0 for i := 0; i < len(nums)/2; i++ { sum += nums[i] } return map[string]any{"partial": sum}, nil }, "map2": func(ctx context.Context, state map[string]any) (map[string]any, error) { nums := state["input"].([]int) sum := 0 for i := len(nums) / 2; i < len(nums); i++ { sum += nums[i] } return map[string]any{"partial": sum}, nil }, } // Reducer function reducer := func(results []map[string]any) (map[string]any, error) { total := 0 for _, r := range results { total += r["partial"].(int) } return map[string]any{"value": total}, nil } g.AddMapReduceNode("sum_parallel", mapFuncs, reducer) g.AddEdge("sum_parallel", graph.END) g.SetEntryPoint("sum_parallel") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // Test with array of numbers input := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} result, err := runnable.Invoke(context.Background(), map[string]any{"input": input}) if err != nil { t.Fatalf("Execution failed: %v", err) } // Extract result var actualSum int if val, ok := result["value"]; ok { if v, ok := val.(int); ok { actualSum = v } } // Sum of 1-10 is 55 if actualSum != 55 { t.Errorf("Expected sum of 55, got %v (full result: %v)", actualSum, result) } } func TestFanOutFanIn(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() // Source node g.AddNode("source", "source", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) // Worker functions workers := map[string]func(context.Context, map[string]any) (map[string]any, error){ "worker1": func(ctx context.Context, state map[string]any) (map[string]any, error) { n := state["input"].(int) return map[string]any{"res": n * 2}, nil }, "worker2": func(ctx context.Context, state map[string]any) (map[string]any, error) { n := state["input"].(int) return map[string]any{"res": n * 3}, nil }, "worker3": func(ctx context.Context, state map[string]any) (map[string]any, error) { n := state["input"].(int) return map[string]any{"res": n * 4}, nil }, } // Aggregator for FanOutFanIn aggregator := func(results []map[string]any) map[string]any { // Collect "res" values into a slice under "values" key vals := make([]any, len(results)) for i, r := range results { vals[i] = r["res"] } return map[string]any{"values": vals} } // Collector function - receives the state produced by aggregator collector := func(state map[string]any) (map[string]any, error) { results := state["values"].([]any) sum := 0 for _, r := range results { sum += r.(int) } return map[string]any{"value": sum}, nil } g.FanOutFanIn("source", []string{"worker1", "worker2", "worker3"}, "collector", workers, aggregator, collector) g.AddEdge("collector", graph.END) g.SetEntryPoint("source") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } result, err := runnable.Invoke(context.Background(), map[string]any{"input": 10}) if err != nil { t.Fatalf("Execution failed: %v", err) } // 10*2 + 10*3 + 10*4 = 20 + 30 + 40 = 90 // Extract from value wrapper var actualSum int if val, ok := result["value"]; ok { if v, ok := val.(int); ok { actualSum = v } } if actualSum != 90 { t.Errorf("Expected 90, got %v (full result: %v)", actualSum, result) } } func TestParallelErrorHandling(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() // Add parallel nodes where one fails parallelFuncs := map[string]func(context.Context, map[string]any) (map[string]any, error){ "success1": func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"ok": 1}, nil }, "failure": func(ctx context.Context, state map[string]any) (map[string]any, error) { return nil, fmt.Errorf("deliberate failure") }, "success2": func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"ok": 2}, nil }, } merger := func(results []map[string]any) map[string]any { return map[string]any{} } g.AddParallelNodes("parallel_with_error", parallelFuncs, merger) g.AddEdge("parallel_with_error", graph.END) g.SetEntryPoint("parallel_with_error") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } _, err = runnable.Invoke(context.Background(), map[string]any{"input": "input"}) if err == nil { t.Error("Expected error from parallel execution") } } func TestParallelContextCancellation(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() // Add parallel nodes with different delays parallelFuncs := map[string]func(context.Context, map[string]any) (map[string]any, error){ "fast": func(ctx context.Context, _ map[string]any) (map[string]any, error) { select { case <-time.After(10 * time.Millisecond): return map[string]any{"fast": "done"}, nil case <-ctx.Done(): return nil, ctx.Err() } }, "slow": func(ctx context.Context, _ map[string]any) (map[string]any, error) { select { case <-time.After(1 * time.Second): return map[string]any{"slow": "done"}, nil case <-ctx.Done(): return nil, ctx.Err() } }, } merger := func(results []map[string]any) map[string]any { return map[string]any{} } g.AddParallelNodes("parallel_cancellable", parallelFuncs, merger) g.AddEdge("parallel_cancellable", graph.END) g.SetEntryPoint("parallel_cancellable") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // Create context with short timeout ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() start := time.Now() _, err = runnable.Invoke(ctx, map[string]any{"input": "input"}) duration := time.Since(start) if err == nil { t.Error("Expected timeout error") } // Should timeout quickly, not wait for slow node if duration > 100*time.Millisecond { t.Errorf("Cancellation took too long: %v", duration) } } func BenchmarkParallelExecution(b *testing.B) { g := graph.NewStateGraph[map[string]any]() // Create many parallel workers workers := make(map[string]func(context.Context, map[string]any) (map[string]any, error)) for i := range 10 { workerID := fmt.Sprintf("worker_%d", i) workers[workerID] = func(ctx context.Context, state map[string]any) (map[string]any, error) { // Simulate some work n := state["input"].(int) result := 0 for j := range 100 { result += n * j } return map[string]any{"res": result}, nil } } merger := func(results []map[string]any) map[string]any { return map[string]any{} } g.AddParallelNodes("parallel", workers, merger) g.AddEdge("parallel", graph.END) g.SetEntryPoint("parallel") runnable, err := g.Compile() if err != nil { b.Fatalf("Failed to compile: %v", err) } ctx := context.Background() for i := 0; b.Loop(); i++ { _, err := runnable.Invoke(ctx, map[string]any{"input": i}) if err != nil { b.Fatalf("Execution failed: %v", err) } } } func BenchmarkSequentialVsParallel(b *testing.B) { // Use int type for this benchmark since it's simpler workFunc := func(ctx context.Context, state int) (int, error) { // Simulate CPU-bound work result := 0 for i := range 1000 { result += state * i } return result, nil } b.Run("Sequential", func(b *testing.B) { g := graph.NewStateGraph[int]() // Chain nodes sequentially for i := range 5 { nodeName := fmt.Sprintf("node_%d", i) g.AddNode(nodeName, nodeName, workFunc) if i > 0 { prevNode := fmt.Sprintf("node_%d", i-1) g.AddEdge(prevNode, nodeName) } } g.AddEdge("node_4", graph.END) g.SetEntryPoint("node_0") runnable, _ := g.Compile() ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = runnable.Invoke(ctx, i) // StateGraph[int] expects int } }) b.Run("Parallel", func(b *testing.B) { g := graph.NewStateGraph[int]() // Add nodes in parallel - create a node that branches to multiple workers g.AddNode("split", "split", func(ctx context.Context, state int) (int, error) { return state, nil }) for i := range 5 { workerName := fmt.Sprintf("worker_%d", i) g.AddNode(workerName, workerName, workFunc) g.AddEdge("split", workerName) g.AddEdge(workerName, graph.END) } g.SetEntryPoint("split") runnable, _ := g.Compile() ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = runnable.Invoke(ctx, i) // StateGraph[int] expects int, not map[string]any } }) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/tracing_test.go
graph/tracing_test.go
package graph_test import ( "context" "errors" "fmt" "strings" "testing" "github.com/smallnest/langgraphgo/graph" ) func TestTracer_StartEndSpan(t *testing.T) { t.Parallel() tracer := graph.NewTracer() ctx := context.Background() // Test starting a span span := tracer.StartSpan(ctx, graph.TraceEventNodeStart, "test_node") if span.ID == "" { t.Error("Span ID should not be empty") } if span.Event != graph.TraceEventNodeStart { t.Errorf("Expected event %v, got %v", graph.TraceEventNodeStart, span.Event) } if span.NodeName != "test_node" { t.Errorf("Expected node name 'test_node', got %v", span.NodeName) } if span.StartTime.IsZero() { t.Error("Start time should be set") } if !span.EndTime.IsZero() { t.Error("End time should not be set yet") } // Test ending a span testState := "test_state" tracer.EndSpan(ctx, span, testState, nil) if span.EndTime.IsZero() { t.Error("End time should be set after ending span") } if span.Duration <= 0 { t.Error("Duration should be positive after ending span") } if span.State != testState { t.Errorf("Expected state %v, got %v", testState, span.State) } if span.Event != graph.TraceEventNodeEnd { t.Errorf("Expected event to be updated to %v, got %v", graph.TraceEventNodeEnd, span.Event) } } func TestTracer_SpanWithError(t *testing.T) { t.Parallel() tracer := graph.NewTracer() ctx := context.Background() span := tracer.StartSpan(ctx, graph.TraceEventNodeStart, "error_node") testError := fmt.Errorf("test error") tracer.EndSpan(ctx, span, nil, testError) if !errors.Is(span.Error, testError) { t.Errorf("Expected error %v, got %v", testError, span.Error) } if span.Event != graph.TraceEventNodeError { t.Errorf("Expected event to be %v for error case, got %v", graph.TraceEventNodeError, span.Event) } } func TestTracer_Hooks(t *testing.T) { t.Parallel() tracer := graph.NewTracer() ctx := context.Background() // Track hook calls hookCalls := make([]graph.TraceEvent, 0) hook := graph.TraceHookFunc(func(ctx context.Context, span *graph.TraceSpan) { hookCalls = append(hookCalls, span.Event) }) tracer.AddHook(hook) // Create and end a span span := tracer.StartSpan(ctx, graph.TraceEventNodeStart, "hooked_node") tracer.EndSpan(ctx, span, "state", nil) // Should have 2 hook calls: start and end if len(hookCalls) != 2 { t.Errorf("Expected 2 hook calls, got %d", len(hookCalls)) } // Verify the hook calls if hookCalls[0] != graph.TraceEventNodeStart { t.Errorf("First hook call should be start event, got %v", hookCalls[0]) } if hookCalls[1] != graph.TraceEventNodeEnd { t.Errorf("Second hook call should be end event, got %v", hookCalls[1]) } } func TestTracer_EdgeTraversal(t *testing.T) { t.Parallel() tracer := graph.NewTracer() ctx := context.Background() var edgeSpan *graph.TraceSpan hook := graph.TraceHookFunc(func(ctx context.Context, span *graph.TraceSpan) { if span.Event == graph.TraceEventEdgeTraversal { edgeSpan = span } }) tracer.AddHook(hook) tracer.TraceEdgeTraversal(ctx, "node1", "node2") if edgeSpan == nil { t.Fatal("Edge traversal span was not captured") } if edgeSpan.FromNode != "node1" { t.Errorf("Expected FromNode 'node1', got %v", edgeSpan.FromNode) } if edgeSpan.ToNode != "node2" { t.Errorf("Expected ToNode 'node2', got %v", edgeSpan.ToNode) } if edgeSpan.Event != graph.TraceEventEdgeTraversal { t.Errorf("Expected event %v, got %v", graph.TraceEventEdgeTraversal, edgeSpan.Event) } } func TestContextWithSpan(t *testing.T) { t.Parallel() ctx := context.Background() span := &graph.TraceSpan{ID: "test_span"} // Test storing span in context newCtx := graph.ContextWithSpan(ctx, span) // Test retrieving span from context retrievedSpan := graph.SpanFromContext(newCtx) if retrievedSpan == nil { t.Fatal("Should be able to retrieve span from context") } if retrievedSpan.ID != "test_span" { t.Errorf("Expected span ID 'test_span', got %v", retrievedSpan.ID) } // Test retrieving from context without span emptySpan := graph.SpanFromContext(ctx) if emptySpan != nil { t.Error("Should return nil when no span in context") } } func TestTracedRunnable_Invoke(t *testing.T) { t.Parallel() // Create a simple graph - use generic map[string]any for typed Runnable g := graph.NewStateGraph[map[string]any]() g.AddNode("node1", "node1", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"value": "processed_" + fmt.Sprintf("%v", state["value"])}, nil }) g.AddNode("node2", "node2", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"value": "final_" + fmt.Sprintf("%v", state["value"])}, nil }) g.AddEdge("node1", "node2") g.AddEdge("node2", graph.END) g.SetEntryPoint("node1") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile graph: %v", err) } // Create tracer and use SetTracer directly for node-level tracing tracer := graph.NewTracer() runnable.SetTracer(tracer) // Collect trace events events := make([]string, 0) hook := graph.TraceHookFunc(func(ctx context.Context, span *graph.TraceSpan) { events = append(events, fmt.Sprintf("%v:%v", span.Event, span.NodeName)) }) tracer.AddHook(hook) // Execute the graph ctx := context.Background() result, err := runnable.Invoke(ctx, map[string]any{"value": "test"}) if err != nil { t.Fatalf("Execution failed: %v", err) } if result["value"].(string) != "final_processed_test" { t.Errorf("Expected result 'final_processed_test', got %v", result["value"]) } // Verify trace events - With SetTracer, we get graph-level tracing // For node-level tracing, the StateRunnable would need to hook into the node execution // This test verifies the basic tracing works if len(events) < 2 { t.Errorf("Expected at least 2 trace events (graph_start, graph_end), got %d: %v", len(events), events) } } func TestTracedRunnable_WithError(t *testing.T) { t.Parallel() // Create a graph with an error-producing node g := graph.NewStateGraph[map[string]any]() g.AddNode("error_node", "error_node", func(ctx context.Context, state map[string]any) (map[string]any, error) { return nil, fmt.Errorf("intentional error") }) g.AddEdge("error_node", graph.END) g.SetEntryPoint("error_node") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile graph: %v", err) } // Create tracer and use SetTracer directly tracer := graph.NewTracer() runnable.SetTracer(tracer) // Collect error events errorEvents := make([]*graph.TraceSpan, 0) hook := graph.TraceHookFunc(func(ctx context.Context, span *graph.TraceSpan) { if span.Event == graph.TraceEventNodeError { errorEvents = append(errorEvents, span) } }) tracer.AddHook(hook) // Execute the graph (should fail) ctx := context.Background() _, err = runnable.Invoke(ctx, map[string]any{"test": true}) if err == nil { t.Fatal("Expected execution to fail") } // Should have captured at least 1 error event if len(errorEvents) < 1 { t.Errorf("Expected at least 1 error event, got %d", len(errorEvents)) } if len(errorEvents) > 0 { errorSpan := errorEvents[0] if errorSpan.Error == nil { t.Error("Error span should contain the error") } if !strings.Contains(errorSpan.Error.Error(), "intentional error") { t.Errorf("Expected error to contain 'intentional error', got %v", errorSpan.Error) } } } func TestTracer_SpanHierarchy(t *testing.T) { t.Parallel() tracer := graph.NewTracer() ctx := context.Background() // Create parent span parentSpan := tracer.StartSpan(ctx, graph.TraceEventGraphStart, "") parentCtx := graph.ContextWithSpan(ctx, parentSpan) // Create child span childSpan := tracer.StartSpan(parentCtx, graph.TraceEventNodeStart, "child_node") // Child span should have parent ID if childSpan.ParentID != parentSpan.ID { t.Errorf("Expected child span parent ID %v, got %v", parentSpan.ID, childSpan.ParentID) } // Parent span should not have parent ID if parentSpan.ParentID != "" { t.Errorf("Expected parent span to have empty parent ID, got %v", parentSpan.ParentID) } } // Benchmark tests func BenchmarkTracer_StartEndSpan(b *testing.B) { tracer := graph.NewTracer() ctx := context.Background() for b.Loop() { span := tracer.StartSpan(ctx, graph.TraceEventNodeStart, "benchmark_node") tracer.EndSpan(ctx, span, "state", nil) } } func BenchmarkTracedRunnable_Invoke(b *testing.B) { // Create a simple graph g := graph.NewStateGraph[map[string]any]() g.AddNode("node", "node", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddEdge("node", graph.END) g.SetEntryPoint("node") runnable, _ := g.Compile() tracer := graph.NewTracer() runnable.SetTracer(tracer) ctx := context.Background() for b.Loop() { _, _ = runnable.Invoke(ctx, map[string]any{"test": true}) tracer.Clear() // Clear spans to avoid memory buildup } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/update_state_test.go
graph/update_state_test.go
package graph import ( "context" "testing" "github.com/stretchr/testify/assert" ) func TestUpdateState(t *testing.T) { g := NewCheckpointableStateGraph[map[string]any]() // Setup schema with reducer schema := NewMapSchema() schema.RegisterReducer("count", func(curr, new any) (any, error) { if curr == nil { return new, nil } return curr.(int) + new.(int), nil }) g.SetSchema(schema) g.AddNode("A", "A", func(ctx context.Context, state map[string]any) (map[string]any, error) { return map[string]any{"count": 1}, nil }) g.SetEntryPoint("A") g.AddEdge("A", END) runnable, err := g.CompileCheckpointable() assert.NoError(t, err) // 1. Run initial graph with thread_id in config ctx := context.Background() threadID := runnable.GetExecutionID() config := &Config{ Configurable: map[string]any{ "thread_id": threadID, }, } res, err := runnable.InvokeWithConfig(ctx, map[string]any{"count": 10}, config) assert.NoError(t, err) mRes := res assert.Equal(t, 11, mRes["count"]) // 10 + 1 = 11 // 2. Update state manually (Human-in-the-loop) // We want to add 5 to the count // config already has thread_id from previous Invoke updateConfig := &Config{ Configurable: map[string]any{ "thread_id": threadID, }, } newConfig, err := runnable.UpdateState(ctx, updateConfig, "human", map[string]any{"count": 5}) assert.NoError(t, err) assert.NotEmpty(t, newConfig.Configurable["checkpoint_id"]) // 3. Verify state is updated snapshot, err := runnable.GetState(ctx, newConfig) assert.NoError(t, err) mSnap := snapshot.Values.(map[string]any) // Should be 11 (previous) + 5 (update) = 16 assert.Equal(t, 16, mSnap["count"]) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/context.go
graph/context.go
package graph import "context" type resumeValueKey struct{} // WithResumeValue adds a resume value to the context. // This value will be returned by Interrupt() when re-executing a node. func WithResumeValue(ctx context.Context, value any) context.Context { return context.WithValue(ctx, resumeValueKey{}, value) } // GetResumeValue retrieves the resume value from the context. func GetResumeValue(ctx context.Context) any { return ctx.Value(resumeValueKey{}) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/edge_cases_test.go
graph/edge_cases_test.go
package graph_test import ( "context" "errors" "fmt" "strings" "sync" "sync/atomic" "testing" "time" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" ) // TestEmptyGraph tests behavior with empty graphs func TestEmptyGraph(t *testing.T) { t.Parallel() tests := []struct { name string buildGraph func() *graph.StateGraph[map[string]any] expectError bool errorMsg string }{ { name: "Graph with no nodes", buildGraph: func() *graph.StateGraph[map[string]any] { g := graph.NewStateGraph[map[string]any]() return g }, expectError: true, errorMsg: "entry point not set", }, { name: "Graph with nodes but no entry point", buildGraph: func() *graph.StateGraph[map[string]any] { g := graph.NewStateGraph[map[string]any]() g.AddNode("node1", "node1", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) return g }, expectError: true, errorMsg: "entry point not set", }, { name: "Graph with self-referencing node", buildGraph: func() *graph.StateGraph[map[string]any] { g := graph.NewStateGraph[map[string]any]() g.AddNode("node1", "node1", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddEdge("node1", "node1") // Self-loop g.SetEntryPoint("node1") return g }, expectError: false, // Will create infinite loop, but that's valid }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() g := tt.buildGraph() _, err := g.Compile() if tt.expectError && err == nil { t.Error("Expected error but got none") } if !tt.expectError && err != nil { t.Errorf("Unexpected error: %v", err) } }) } } // TestLargeGraph tests performance with large graphs func TestLargeGraph(t *testing.T) { if testing.Short() { t.Skip("Skipping large graph test in short mode") } g := graph.NewStateGraph[int]() // Create a chain of 1000 nodes nodeCount := 1000 for i := range nodeCount { nodeName := fmt.Sprintf("node_%d", i) g.AddNode(nodeName, nodeName, func(ctx context.Context, state int) (int, error) { return state + 1, nil }) if i > 0 { prevNode := fmt.Sprintf("node_%d", i-1) g.AddEdge(prevNode, nodeName) } } lastNode := fmt.Sprintf("node_%d", nodeCount-1) g.AddEdge(lastNode, graph.END) g.SetEntryPoint("node_0") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile large graph: %v", err) } start := time.Now() result, err := runnable.Invoke(context.Background(), 0) duration := time.Since(start) if err != nil { t.Fatalf("Failed to execute large graph: %v", err) } if result != nodeCount { t.Errorf("Expected result %d, got %v", nodeCount, result) } t.Logf("Large graph with %d nodes executed in %v", nodeCount, duration) } // TestConcurrentExecution tests thread safety func TestConcurrentExecution(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() var counter int32 g.AddNode("increment", "increment", func(ctx context.Context, state map[string]any) (map[string]any, error) { atomic.AddInt32(&counter, 1) return state, nil }) g.AddEdge("increment", graph.END) g.SetEntryPoint("increment") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // Run multiple executions concurrently concurrency := 100 var wg sync.WaitGroup errors := make(chan error, concurrency) for i := range concurrency { wg.Add(1) go func(id int) { defer wg.Done() ctx := context.Background() _, err := runnable.Invoke(ctx, map[string]any{"id": id}) if err != nil { errors <- err } }(i) } wg.Wait() close(errors) // Check for errors for err := range errors { t.Errorf("Concurrent execution error: %v", err) } // Verify all executions completed finalCount := atomic.LoadInt32(&counter) if finalCount != int32(concurrency) { t.Errorf("Expected %d executions, got %d", concurrency, finalCount) } } // TestContextCancellation tests context cancellation handling func TestContextCancellation(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() // Add a slow node g.AddNode("slow_node", "slow_node", func(ctx context.Context, state map[string]any) (map[string]any, error) { select { case <-time.After(5 * time.Second): return state, nil case <-ctx.Done(): return nil, ctx.Err() } }) g.AddEdge("slow_node", graph.END) g.SetEntryPoint("slow_node") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // Create a context with timeout ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() start := time.Now() _, err = runnable.Invoke(ctx, map[string]any{"test": "value"}) duration := time.Since(start) if err == nil { t.Error("Expected context cancellation error") } if duration > 200*time.Millisecond { t.Errorf("Cancellation took too long: %v", duration) } } // TestPanicRecovery tests panic handling in node functions func TestPanicRecovery(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() g.AddNode("panic_node", "panic_node", func(ctx context.Context, state map[string]any) (map[string]any, error) { panic("intentional panic") }) g.AddEdge("panic_node", graph.END) g.SetEntryPoint("panic_node") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // This should handle the panic gracefully and convert it to an error ctx := context.Background() _, err = runnable.Invoke(ctx, map[string]any{"test": "value"}) if err == nil { t.Error("Expected error from panic recovery") } if err != nil && !strings.Contains(err.Error(), "panic in node panic_node") { t.Errorf("Expected panic error, got: %v", err) } } // TestComplexConditionalRouting tests complex conditional edge scenarios func TestComplexConditionalRouting(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() // Create a decision tree with multiple levels g.AddNode("root", "root", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddNode("branch_a", "branch_a", func(ctx context.Context, state map[string]any) (map[string]any, error) { path := append(state["path"].([]string), "A") state["path"] = path return state, nil }) g.AddNode("branch_b", "branch_b", func(ctx context.Context, state map[string]any) (map[string]any, error) { path := append(state["path"].([]string), "B") state["path"] = path return state, nil }) g.AddNode("leaf_a1", "leaf_a1", func(ctx context.Context, state map[string]any) (map[string]any, error) { path := append(state["path"].([]string), "A1") state["path"] = path return state, nil }) g.AddNode("leaf_a2", "leaf_a2", func(ctx context.Context, state map[string]any) (map[string]any, error) { path := append(state["path"].([]string), "A2") state["path"] = path return state, nil }) g.AddNode("leaf_b1", "leaf_b1", func(ctx context.Context, state map[string]any) (map[string]any, error) { path := append(state["path"].([]string), "B1") state["path"] = path return state, nil }) // Root conditional g.AddConditionalEdge("root", func(ctx context.Context, state map[string]any) string { if state["choice"].(string) == "A" { return "branch_a" } return "branch_b" }) // Branch A conditional g.AddConditionalEdge("branch_a", func(ctx context.Context, state map[string]any) string { if state["subchoice"].(int) == 1 { return "leaf_a1" } return "leaf_a2" }) // Branch B always goes to B1 g.AddEdge("branch_b", "leaf_b1") // All leaves go to END g.AddEdge("leaf_a1", graph.END) g.AddEdge("leaf_a2", graph.END) g.AddEdge("leaf_b1", graph.END) g.SetEntryPoint("root") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } // Test different paths tests := []struct { input map[string]any expectedPath []string }{ { input: map[string]any{ "choice": "A", "subchoice": 1, "path": []string{}, }, expectedPath: []string{"A", "A1"}, }, { input: map[string]any{ "choice": "A", "subchoice": 2, "path": []string{}, }, expectedPath: []string{"A", "A2"}, }, { input: map[string]any{ "choice": "B", "subchoice": 1, "path": []string{}, }, expectedPath: []string{"B", "B1"}, }, } for i, tt := range tests { t.Run(fmt.Sprintf("Path_%d", i), func(t *testing.T) { result, err := runnable.Invoke(context.Background(), tt.input) if err != nil { t.Fatalf("Execution failed: %v", err) } path := result["path"].([]string) if len(path) != len(tt.expectedPath) { t.Errorf("Expected path %v, got %v", tt.expectedPath, path) } else { for j, p := range path { if p != tt.expectedPath[j] { t.Errorf("Path mismatch at %d: expected %s, got %s", j, tt.expectedPath[j], p) } } } }) } } // TestStateModification tests various state modification scenarios // //nolint:gocognit,cyclop // Complex state modification scenarios require extensive testing func TestStateModification(t *testing.T) { t.Parallel() t.Run("Accumulator pattern", func(t *testing.T) { t.Parallel() g := graph.NewStateGraph[[]int]() g.AddNode("accumulate", "accumulate", func(ctx context.Context, state []int) ([]int, error) { return append(state, len(state)+1), nil }) g.AddConditionalEdge("accumulate", func(ctx context.Context, state []int) string { if len(state) >= 5 { return graph.END } return "accumulate" }) g.SetEntryPoint("accumulate") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } result, err := runnable.Invoke(context.Background(), []int{}) if err != nil { t.Fatalf("Execution failed: %v", err) } expected := []int{1, 2, 3, 4, 5} if len(result) != len(expected) { t.Errorf("Expected %v, got %v", expected, result) } else { for i := range expected { if result[i] != expected[i] { t.Errorf("Mismatch at index %d: expected %d, got %d", i, expected[i], result[i]) } } } }) t.Run("Map transformation", func(t *testing.T) { t.Parallel() g := graph.NewStateGraph[map[string]any]() g.AddNode("transform", "transform", func(ctx context.Context, state map[string]any) (map[string]any, error) { // Transform each value for k, v := range state { if num, ok := v.(int); ok { state[k] = num * 2 } } return state, nil }) g.AddEdge("transform", graph.END) g.SetEntryPoint("transform") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } result, err := runnable.Invoke(context.Background(), map[string]any{ "a": 1, "b": 2, "c": 3, }) if err != nil { t.Fatalf("Execution failed: %v", err) } expected := map[string]any{ "a": 2, "b": 4, "c": 6, } for k, v := range expected { if result[k] != v { t.Errorf("Map mismatch for key %s: expected %v, got %v", k, v, result[k]) } } }) } // TestErrorPropagation tests error handling and propagation func TestErrorPropagation(t *testing.T) { t.Parallel() g := graph.NewStateGraph[string]() g.AddNode("node1", "node1", func(ctx context.Context, state string) (string, error) { return "step1", nil }) g.AddNode("node2", "node2", func(ctx context.Context, state string) (string, error) { return "", errors.New("deliberate error in node2") }) g.AddNode("node3", "node3", func(ctx context.Context, state string) (string, error) { t.Error("node3 should not be executed after error in node2") return state, nil }) g.AddEdge("node1", "node2") g.AddEdge("node2", "node3") g.AddEdge("node3", graph.END) g.SetEntryPoint("node1") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } _, err = runnable.Invoke(context.Background(), "start") if err == nil { t.Error("Expected error from node2") } else if !errors.Is(err, errors.New("deliberate error in node2")) { // Check error message contains expected text if err.Error() != "error in node node2: deliberate error in node2" { t.Errorf("Unexpected error: %v", err) } } } // TestMessageContentEdgeCases tests edge cases with message content func TestMessageContentEdgeCases(t *testing.T) { t.Parallel() tests := []struct { name string initialState []llms.MessageContent transform func([]llms.MessageContent) []llms.MessageContent validate func(*testing.T, []llms.MessageContent) }{ { name: "Empty message list", initialState: []llms.MessageContent{}, transform: func(msgs []llms.MessageContent) []llms.MessageContent { return append(msgs, llms.TextParts("ai", "Hello")) }, validate: func(t *testing.T, msgs []llms.MessageContent) { if len(msgs) != 1 { t.Errorf("Expected 1 message, got %d", len(msgs)) } }, }, { name: "Multiple parts in message", initialState: []llms.MessageContent{ { Role: "human", Parts: []llms.ContentPart{ llms.TextContent{Text: "Part 1"}, llms.TextContent{Text: "Part 2"}, }, }, }, transform: func(msgs []llms.MessageContent) []llms.MessageContent { // Count total parts totalParts := 0 for _, msg := range msgs { totalParts += len(msg.Parts) } return append(msgs, llms.TextParts("ai", fmt.Sprintf("You have %d parts", totalParts))) }, validate: func(t *testing.T, msgs []llms.MessageContent) { if len(msgs) != 2 { t.Errorf("Expected 2 messages, got %d", len(msgs)) } lastMsg := msgs[len(msgs)-1] if lastMsg.Parts[0].(llms.TextContent).Text != "You have 2 parts" { t.Errorf("Unexpected response: %v", lastMsg.Parts[0]) } }, }, { name: "Very long message", initialState: []llms.MessageContent{ llms.TextParts("human", string(make([]byte, 10000))), // 10KB message }, transform: func(msgs []llms.MessageContent) []llms.MessageContent { // Should handle large messages return append(msgs, llms.TextParts("ai", "Handled large message")) }, validate: func(t *testing.T, msgs []llms.MessageContent) { if len(msgs) != 2 { t.Errorf("Expected 2 messages, got %d", len(msgs)) } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() g := graph.NewStateGraph[[]llms.MessageContent]() g.AddNode("process", "process", func(ctx context.Context, state []llms.MessageContent) ([]llms.MessageContent, error) { return tt.transform(state), nil }) g.AddEdge("process", graph.END) g.SetEntryPoint("process") runnable, err := g.Compile() if err != nil { t.Fatalf("Failed to compile: %v", err) } result, err := runnable.Invoke(context.Background(), tt.initialState) if err != nil { t.Fatalf("Execution failed: %v", err) } tt.validate(t, result) }) } } // BenchmarkConditionalEdges benchmarks conditional edge performance func BenchmarkConditionalEdges(b *testing.B) { g := graph.NewStateGraph[int]() // Create a graph with conditional routing g.AddNode("router", "router", func(ctx context.Context, state int) (int, error) { return state, nil }) for i := range 10 { nodeName := fmt.Sprintf("node_%d", i) g.AddNode(nodeName, nodeName, func(ctx context.Context, state int) (int, error) { return state + 1, nil }) g.AddEdge(nodeName, graph.END) } g.AddConditionalEdge("router", func(ctx context.Context, state int) string { return fmt.Sprintf("node_%d", state%10) }) g.SetEntryPoint("router") runnable, err := g.Compile() if err != nil { b.Fatalf("Failed to compile: %v", err) } ctx := context.Background() for i := 0; b.Loop(); i++ { _, err := runnable.Invoke(ctx, i) if err != nil { b.Fatalf("Execution failed: %v", err) } } } // BenchmarkLargeStateTransfer benchmarks performance with large state objects func BenchmarkLargeStateTransfer(b *testing.B) { g := graph.NewStateGraph[[]byte]() // Create nodes that pass large state g.AddNode("node1", "node1", func(ctx context.Context, state []byte) ([]byte, error) { return state, nil }) g.AddNode("node2", "node2", func(ctx context.Context, state []byte) ([]byte, error) { return state, nil }) g.AddNode("node3", "node3", func(ctx context.Context, state []byte) ([]byte, error) { return state, nil }) g.AddEdge("node1", "node2") g.AddEdge("node2", "node3") g.AddEdge("node3", graph.END) g.SetEntryPoint("node1") runnable, err := g.Compile() if err != nil { b.Fatalf("Failed to compile: %v", err) } // Create large state object (1MB) largeState := make([]byte, 1024*1024) ctx := context.Background() b.SetBytes(int64(len(largeState))) for b.Loop() { _, err := runnable.Invoke(ctx, largeState) if err != nil { b.Fatalf("Execution failed: %v", err) } } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/resume_test.go
graph/resume_test.go
package graph import ( "context" "testing" "github.com/stretchr/testify/assert" ) func TestGraphResume(t *testing.T) { g := NewStateGraph[map[string]any]() g.AddNode("A", "A", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["value"] = state["value"].(string) + "A" return state, nil }) g.AddNode("B", "B", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["value"] = state["value"].(string) + "B" return state, nil }) g.AddNode("C", "C", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["value"] = state["value"].(string) + "C" return state, nil }) g.SetEntryPoint("A") g.AddEdge("A", "B") g.AddEdge("B", "C") g.AddEdge("C", END) runnable, err := g.Compile() assert.NoError(t, err) // Test Resume after InterruptAfter t.Run("ResumeAfter", func(t *testing.T) { // 1. Run with interrupt after B config := &Config{ InterruptAfter: []string{"B"}, } _, err = runnable.InvokeWithConfig(context.Background(), map[string]any{"value": "Start"}, config) assert.Error(t, err) var interrupt *GraphInterrupt assert.ErrorAs(t, err, &interrupt) assert.Equal(t, "B", interrupt.Node) interruptState, ok := interrupt.State.(map[string]any) assert.True(t, ok) assert.Equal(t, "StartAB", interruptState["value"]) assert.Equal(t, []string{"C"}, interrupt.NextNodes) // 2. Resume from NextNodes with updated state // Simulate user modifying state updatedState := map[string]any{"value": interruptState["value"].(string) + "-Modified"} resumeConfig := &Config{ ResumeFrom: interrupt.NextNodes, } res2, err := runnable.InvokeWithConfig(context.Background(), updatedState, resumeConfig) assert.NoError(t, err) assert.Equal(t, "StartAB-ModifiedC", res2["value"]) }) // Test Resume from InterruptBefore t.Run("ResumeBefore", func(t *testing.T) { // 1. Run with interrupt before B config := &Config{ InterruptBefore: []string{"B"}, } _, err = runnable.InvokeWithConfig(context.Background(), map[string]any{"value": "Start"}, config) assert.Error(t, err) var interrupt *GraphInterrupt assert.ErrorAs(t, err, &interrupt) assert.Equal(t, "B", interrupt.Node) interruptState, ok := interrupt.State.(map[string]any) assert.True(t, ok) assert.Equal(t, "StartA", interruptState["value"]) // 2. Resume from interrupted node resumeConfig := &Config{ ResumeFrom: []string{interrupt.Node}, } res2, err := runnable.InvokeWithConfig(context.Background(), interruptState, resumeConfig) assert.NoError(t, err) assert.Equal(t, "StartABC", res2["value"]) }) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/add_messages.go
graph/add_messages.go
package graph import ( "fmt" "reflect" "github.com/tmc/langchaingo/llms" ) // MessageWithID is an interface that allows messages to have an ID for deduplication/upsert. // Since langchaingo's MessageContent doesn't have an ID field, we can wrap it or use a custom struct. // For now, we'll check if the message implements this interface or is a map with an "id" key. type MessageWithID interface { GetID() string GetContent() llms.MessageContent } // AddMessages is a reducer designed for merging chat messages. // It handles ID-based deduplication and upserts. // If a new message has the same ID as an existing one, it replaces the existing one. // Otherwise, it appends the new message. func AddMessages(current, new any) (any, error) { if current == nil { return new, nil } // Optimization: Fast path for []llms.MessageContent if currentSlice, ok := current.([]llms.MessageContent); ok { var newSlice []llms.MessageContent if ns, ok := new.([]llms.MessageContent); ok { newSlice = ns } else if n, ok := new.(llms.MessageContent); ok { newSlice = []llms.MessageContent{n} } else { // Fallback to reflection if new is not compatible goto ReflectionPath } // Since standard MessageContent doesn't support IDs in this implementation // (unless wrapped), and we are in the fast path for standard types, // we just append. return append(currentSlice, newSlice...), nil } ReflectionPath: // We expect current to be a slice of messages currentVal := reflect.ValueOf(current) if currentVal.Kind() != reflect.Slice { return nil, fmt.Errorf("current value is not a slice") } // We expect new to be a slice of messages or a single message newVal := reflect.ValueOf(new) var newMessages []any if newVal.Kind() == reflect.Slice { for i := 0; i < newVal.Len(); i++ { newMessages = append(newMessages, newVal.Index(i).Interface()) } } else { newMessages = append(newMessages, new) } // Convert current slice to a list of interfaces for manipulation result := make([]any, 0, currentVal.Len()+len(newMessages)) for i := 0; i < currentVal.Len(); i++ { result = append(result, currentVal.Index(i).Interface()) } // Index existing messages by ID if possible // Since standard MessageContent doesn't have ID, we only support ID logic // if the user uses a custom struct or map wrapper. // For standard MessageContent, we just append. // Map ID to index in result idToIndex := make(map[string]int) for i, msg := range result { if id := getMessageID(msg); id != "" { idToIndex[id] = i } } for _, msg := range newMessages { id := getMessageID(msg) if id != "" { if idx, exists := idToIndex[id]; exists { // Update existing message result[idx] = msg } else { // Append new message result = append(result, msg) idToIndex[id] = len(result) - 1 } } else { // No ID, just append result = append(result, msg) } } // Convert back to the original slice type if possible, or []any // If current was []llms.MessageContent, we try to return that. // But if we mixed types (e.g. wrapped messages), we might need to return []any // or fail if types are incompatible. // For simplicity in this implementation, if the original type was []llms.MessageContent, // and we are just appending standard messages, we return that type. // If we are doing advanced ID stuff, we assume the user is using a compatible slice type. targetType := currentVal.Type() finalSlice := reflect.MakeSlice(targetType, 0, len(result)) for _, item := range result { val := reflect.ValueOf(item) if val.Type().AssignableTo(targetType.Elem()) { finalSlice = reflect.Append(finalSlice, val) } else { // Try to convert? Or error? // If we can't put it back in the slice, we have a problem. return nil, fmt.Errorf("cannot append item of type %T to slice of %s", item, targetType.Elem()) } } return finalSlice.Interface(), nil } // getMessageID tries to extract an ID from a message object. func getMessageID(msg any) string { // 1. Check if it implements MessageWithID if m, ok := msg.(MessageWithID); ok { return m.GetID() } // 2. Check if it's a map with an "id" key if m, ok := msg.(map[string]any); ok { if id, ok := m["id"].(string); ok { return id } } // 3. Check specific struct fields via reflection (slow but flexible) val := reflect.ValueOf(msg) if val.Kind() == reflect.Ptr { val = val.Elem() } if val.Kind() == reflect.Struct { field := val.FieldByName("ID") if field.IsValid() && field.Kind() == reflect.String { return field.String() } } return "" }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/subgraph_test.go
graph/subgraph_test.go
package graph import ( "context" "testing" "github.com/stretchr/testify/assert" ) func TestSubgraph(t *testing.T) { // 1. Define Child Graph child := NewStateGraph[map[string]any]() child.AddNode("child_A", "child_A", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["child_visited"] = true return state, nil }) child.SetEntryPoint("child_A") child.AddEdge("child_A", END) // 2. Define Parent Graph parent := NewStateGraph[map[string]any]() parent.AddNode("parent_A", "parent_A", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["parent_visited"] = true return state, nil }) // Add Child Graph as a node - use identity converters for map[string]any err := AddSubgraph(parent, "child", child, func(s map[string]any) map[string]any { return s }, func(s map[string]any) map[string]any { return s }) assert.NoError(t, err) parent.SetEntryPoint("parent_A") parent.AddEdge("parent_A", "child") parent.AddEdge("child", END) // 3. Run Parent Graph runnable, err := parent.Compile() assert.NoError(t, err) res, err := runnable.Invoke(context.Background(), map[string]any{}) assert.NoError(t, err) assert.True(t, res["parent_visited"].(bool)) assert.True(t, res["child_visited"].(bool)) } func TestCreateSubgraph(t *testing.T) { // Test CreateSubgraph with builder pattern parent := NewStateGraph[map[string]any]() CreateSubgraph(parent, "dynamic_child", func(g *StateGraph[map[string]any]) error { g.AddNode("node1", "Node 1", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["dynamic_created"] = true return state, nil }) g.SetEntryPoint("node1") g.AddEdge("node1", END) return nil }, func(s map[string]any) map[string]any { return s }, func(s map[string]any) map[string]any { return s }) // Verify the node was added _, ok := parent.nodes["dynamic_child"] assert.True(t, ok, "Dynamic subgraph should be added") } func TestNewCompositeGraph(t *testing.T) { cg := NewCompositeGraph[map[string]any]() assert.NotNil(t, cg) assert.NotNil(t, cg.main) assert.NotNil(t, cg.graphs) assert.Empty(t, cg.graphs) } func TestCompositeGraph_AddGraph(t *testing.T) { cg := NewCompositeGraph[map[string]any]() graph1 := NewStateGraph[map[string]any]() graph1.AddNode("test", "Test", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) cg.AddGraph("graph1", graph1) assert.Equal(t, 1, len(cg.graphs)) assert.Equal(t, graph1, cg.graphs["graph1"]) } func TestCompositeGraph_Connect(t *testing.T) { cg := NewCompositeGraph[map[string]any]() graph1 := NewStateGraph[map[string]any]() graph1.AddNode("output1", "Output 1", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["from_graph1"] = true return state, nil }) graph2 := NewStateGraph[map[string]any]() graph2.AddNode("input2", "Input 2", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["to_graph2"] = true return state, nil }) // Add graphs to composite cg.AddGraph("graph1", graph1) cg.AddGraph("graph2", graph2) // Connect with transformation err := cg.Connect("graph1", "output1", "graph2", "input2", func(state map[string]any) map[string]any { state["transformed"] = true return state }) assert.NoError(t, err) // Check that bridge node was created bridgeName := "graph1_output1_to_graph2_input2" _, ok := cg.main.nodes[bridgeName] assert.True(t, ok, "Bridge node should be created") } func TestCompositeGraph_Compile(t *testing.T) { cg := NewCompositeGraph[map[string]any]() // Add a simple graph simpleGraph := NewStateGraph[map[string]any]() simpleGraph.AddNode("test", "Test", func(ctx context.Context, state map[string]any) (map[string]any, error) { state["compiled"] = true return state, nil }) simpleGraph.SetEntryPoint("test") simpleGraph.AddEdge("test", END) cg.AddGraph("simple", simpleGraph) // Set entry point on the composite graph's main graph to the simple subgraph cg.main.SetEntryPoint("simple") // Compile composite graph runnable, err := cg.Compile() assert.NoError(t, err) assert.NotNil(t, runnable) // Test execution _, err = runnable.Invoke(context.Background(), map[string]any{}) if err != nil { t.Logf("Execution error (expected if graph structure issues): %v", err) } } func TestNewRecursiveSubgraph(t *testing.T) { // Create recursive subgraph with max depth of 3 and condition on count maxDepth := 3 rs := NewRecursiveSubgraph[map[string]any]( "recursive", maxDepth, func(state map[string]any, depth int) bool { currentCount, _ := state["count"].(int) return currentCount < 2 // Recurse twice }, ) assert.NotNil(t, rs) assert.Equal(t, "recursive", rs.name) assert.Equal(t, maxDepth, rs.maxDepth) } func TestRecursiveSubgraph_Execute(t *testing.T) { rs := NewRecursiveSubgraph[map[string]any]( "recursive", 3, func(state map[string]any, depth int) bool { return depth < 2 // Recurse twice }, ) // Add a node to the recursive graph rs.graph.AddNode("process", "Process", func(ctx context.Context, state map[string]any) (map[string]any, error) { if state["count"] == nil { state["count"] = 0 } state["count"] = state["count"].(int) + 1 return state, nil }) rs.graph.SetEntryPoint("process") rs.graph.AddEdge("process", END) // Execute ctx := context.Background() initialState := map[string]any{"count": 0} result, err := rs.Execute(ctx, initialState) assert.NoError(t, err) assert.Equal(t, 2, result["count"], "Should have counted twice") } func TestRecursiveSubgraph_MaxDepth(t *testing.T) { rs := NewRecursiveSubgraph[map[string]any]( "recursive", 2, // Very shallow max depth func(state map[string]any, depth int) bool { return true // Always recurse }, ) // Add a node rs.graph.AddNode("process", "Process", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) rs.graph.SetEntryPoint("process") rs.graph.AddEdge("process", END) // Execute - should stop at max depth ctx := context.Background() result, err := rs.Execute(ctx, map[string]any{}) assert.NoError(t, err) // Should not panic and should complete assert.NotNil(t, result) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/listeners.go
graph/listeners.go
package graph import ( "context" "fmt" "sync" "time" ) // NodeEvent represents different types of node events type NodeEvent string const ( // NodeEventStart indicates a node has started execution NodeEventStart NodeEvent = "start" // NodeEventProgress indicates progress during node execution NodeEventProgress NodeEvent = "progress" // NodeEventComplete indicates a node has completed successfully NodeEventComplete NodeEvent = "complete" // NodeEventError indicates a node encountered an error NodeEventError NodeEvent = "error" // EventChainStart indicates the graph execution has started EventChainStart NodeEvent = "chain_start" // EventChainEnd indicates the graph execution has completed EventChainEnd NodeEvent = "chain_end" // EventToolStart indicates a tool execution has started EventToolStart NodeEvent = "tool_start" // EventToolEnd indicates a tool execution has completed EventToolEnd NodeEvent = "tool_end" // EventLLMStart indicates an LLM call has started EventLLMStart NodeEvent = "llm_start" // EventLLMEnd indicates an LLM call has completed EventLLMEnd NodeEvent = "llm_end" // EventToken indicates a generated token (for streaming) EventToken NodeEvent = "token" // EventCustom indicates a custom user-defined event EventCustom NodeEvent = "custom" ) // NodeListener defines the interface for typed node event listeners type NodeListener[S any] interface { // OnNodeEvent is called when a node event occurs OnNodeEvent(ctx context.Context, event NodeEvent, nodeName string, state S, err error) } // NodeListenerFunc is a function adapter for NodeListener type NodeListenerFunc[S any] func(ctx context.Context, event NodeEvent, nodeName string, state S, err error) // OnNodeEvent implements the NodeListener interface func (f NodeListenerFunc[S]) OnNodeEvent(ctx context.Context, event NodeEvent, nodeName string, state S, err error) { f(ctx, event, nodeName, state, err) } // StreamEvent represents a typed event in the streaming execution type StreamEvent[S any] struct { // Timestamp when the event occurred Timestamp time.Time // NodeName is the name of the node that generated the event NodeName string // Event is the type of event Event NodeEvent // State is the current state at the time of the event (typed) State S // Error contains any error that occurred (if Event is NodeEventError) Error error // Metadata contains additional event-specific data Metadata map[string]any // Duration is how long the node took (only for Complete events) Duration time.Duration } // listenerWrapper wraps a listener with a unique ID for comparison type listenerWrapper[S any] struct { id string listener NodeListener[S] } // ListenableNode extends TypedNode with listener capabilities type ListenableNode[S any] struct { TypedNode[S] listeners []listenerWrapper[S] mutex sync.RWMutex nextID int64 } // NewListenableNode creates a new listenable node from a regular typed node func NewListenableNode[S any](node TypedNode[S]) *ListenableNode[S] { return &ListenableNode[S]{ TypedNode: node, listeners: make([]listenerWrapper[S], 0), nextID: 1, } } // AddListener adds a listener to the node and returns the listenable node for chaining func (ln *ListenableNode[S]) AddListener(listener NodeListener[S]) *ListenableNode[S] { ln.mutex.Lock() defer ln.mutex.Unlock() id := fmt.Sprintf("listener_%d", ln.nextID) ln.nextID++ ln.listeners = append(ln.listeners, listenerWrapper[S]{ id: id, listener: listener, }) return ln } // AddListenerWithID adds a listener to the node and returns its ID func (ln *ListenableNode[S]) AddListenerWithID(listener NodeListener[S]) string { ln.mutex.Lock() defer ln.mutex.Unlock() id := fmt.Sprintf("listener_%d", ln.nextID) ln.nextID++ ln.listeners = append(ln.listeners, listenerWrapper[S]{ id: id, listener: listener, }) return id } // RemoveListener removes a listener from the node by ID func (ln *ListenableNode[S]) RemoveListener(listenerID string) { ln.mutex.Lock() defer ln.mutex.Unlock() for i, lw := range ln.listeners { if lw.id == listenerID { ln.listeners = append(ln.listeners[:i], ln.listeners[i+1:]...) break } } } // RemoveListenerByFunc removes a listener from the node by comparing pointer values func (ln *ListenableNode[S]) RemoveListenerByFunc(listener NodeListener[S]) { ln.mutex.Lock() defer ln.mutex.Unlock() for i, lw := range ln.listeners { // Compare pointer values for reference equality if &lw.listener == &listener || fmt.Sprintf("%p", lw.listener) == fmt.Sprintf("%p", listener) { ln.listeners = append(ln.listeners[:i], ln.listeners[i+1:]...) break } } } // NotifyListeners notifies all listeners of an event func (ln *ListenableNode[S]) NotifyListeners(ctx context.Context, event NodeEvent, state S, err error) { ln.mutex.RLock() wrappers := make([]listenerWrapper[S], len(ln.listeners)) copy(wrappers, ln.listeners) ln.mutex.RUnlock() // Use WaitGroup to synchronize listener notifications var wg sync.WaitGroup // Notify listeners in separate goroutines to avoid blocking execution for _, wrapper := range wrappers { wg.Add(1) go func(l NodeListener[S]) { defer wg.Done() // Protect against panics in listeners defer func() { if r := recover(); r != nil { // Panic recovered, but not logged to avoid dependencies _ = r // Acknowledge the panic was caught } }() l.OnNodeEvent(ctx, event, ln.Name, state, err) }(wrapper.listener) } // Wait for all listener notifications to complete wg.Wait() } // Execute runs the node function with listener notifications func (ln *ListenableNode[S]) Execute(ctx context.Context, state S) (S, error) { // Notify start ln.NotifyListeners(ctx, NodeEventStart, state, nil) // Execute the node function result, err := ln.Function(ctx, state) // Notify completion or error if err != nil { ln.NotifyListeners(ctx, NodeEventError, state, err) } else { ln.NotifyListeners(ctx, NodeEventComplete, result, nil) } return result, err } // GetListeners returns a copy of the current listeners func (ln *ListenableNode[S]) GetListeners() []NodeListener[S] { ln.mutex.RLock() defer ln.mutex.RUnlock() listeners := make([]NodeListener[S], len(ln.listeners)) for i, wrapper := range ln.listeners { listeners[i] = wrapper.listener } return listeners } // GetListenerIDs returns a copy of the current listener IDs func (ln *ListenableNode[S]) GetListenerIDs() []string { ln.mutex.RLock() defer ln.mutex.RUnlock() ids := make([]string, len(ln.listeners)) for i, wrapper := range ln.listeners { ids[i] = wrapper.id } return ids } // ListenableStateGraph extends StateGraph with listener capabilities type ListenableStateGraph[S any] struct { *StateGraph[S] listenableNodes map[string]*ListenableNode[S] } // NewListenableStateGraph creates a new typed state graph with listener support func NewListenableStateGraph[S any]() *ListenableStateGraph[S] { return &ListenableStateGraph[S]{ StateGraph: NewStateGraph[S](), listenableNodes: make(map[string]*ListenableNode[S]), } } // AddNode adds a node with listener capabilities func (g *ListenableStateGraph[S]) AddNode(name string, description string, fn func(ctx context.Context, state S) (S, error)) *ListenableNode[S] { node := TypedNode[S]{ Name: name, Description: description, Function: fn, } listenableNode := NewListenableNode(node) // Add to both the base graph and our listenable nodes map g.StateGraph.AddNode(name, description, fn) g.listenableNodes[name] = listenableNode return listenableNode } // GetListenableNode returns the listenable node by name func (g *ListenableStateGraph[S]) GetListenableNode(name string) *ListenableNode[S] { return g.listenableNodes[name] } // AddGlobalListener adds a listener to all nodes in the graph func (g *ListenableStateGraph[S]) AddGlobalListener(listener NodeListener[S]) { for _, node := range g.listenableNodes { node.AddListener(listener) } } // RemoveGlobalListener removes a listener from all nodes in the graph by function reference func (g *ListenableStateGraph[S]) RemoveGlobalListener(listener NodeListener[S]) { for _, node := range g.listenableNodes { node.RemoveListenerByFunc(listener) } } // RemoveGlobalListenerByID removes a listener from all nodes in the graph by ID func (g *ListenableStateGraph[S]) RemoveGlobalListenerByID(listenerID string) { for _, node := range g.listenableNodes { node.RemoveListener(listenerID) } } // ListenableRunnable wraps a StateRunnable with listener capabilities type ListenableRunnable[S any] struct { graph *ListenableStateGraph[S] listenableNodes map[string]*ListenableNode[S] runnable *StateRunnable[S] } // CompileListenable creates a runnable with listener support func (g *ListenableStateGraph[S]) CompileListenable() (*ListenableRunnable[S], error) { if g.entryPoint == "" { return nil, ErrEntryPointNotSet } runnable, err := g.StateGraph.Compile() if err != nil { return nil, err } // Configure the runnable to use our listenable nodes nodes := g.listenableNodes runnable.nodeRunner = func(ctx context.Context, nodeName string, state S) (S, error) { node, ok := nodes[nodeName] if !ok { var zero S return zero, fmt.Errorf("%w: %s", ErrNodeNotFound, nodeName) } return node.Execute(ctx, state) } return &ListenableRunnable[S]{ graph: g, listenableNodes: g.listenableNodes, runnable: runnable, }, nil } // Invoke executes the graph with listener notifications func (lr *ListenableRunnable[S]) Invoke(ctx context.Context, initialState S) (S, error) { return lr.runnable.Invoke(ctx, initialState) } // InvokeWithConfig executes the graph with listener notifications and config func (lr *ListenableRunnable[S]) InvokeWithConfig(ctx context.Context, initialState S, config *Config) (S, error) { if config != nil { ctx = WithConfig(ctx, config) } return lr.runnable.InvokeWithConfig(ctx, initialState, config) } // Stream executes the graph with listener notifications and streams events func (lr *ListenableRunnable[S]) Stream(ctx context.Context, initialState S) <-chan StreamEvent[S] { eventChan := make(chan StreamEvent[S], 100) // Buffered channel // Create a streaming listener // Using DefaultStreamConfig from streaming.go streamListener := NewStreamingListener(eventChan, DefaultStreamConfig()) // Add the listener to all nodes lr.graph.AddGlobalListener(streamListener) // Start execution in a goroutine go func() { defer func() { // Clean up: remove the listener and close the channel // We remove the listener first to stop new events lr.graph.RemoveGlobalListener(streamListener) // Close the listener (sets internal flag) streamListener.Close() // Close the channel close(eventChan) }() // Send chain start event eventChan <- StreamEvent[S]{ Timestamp: time.Now(), Event: EventChainStart, State: initialState, } // Execute the graph _, err := lr.runnable.Invoke(ctx, initialState) // Send chain end event eventChan <- StreamEvent[S]{ Timestamp: time.Now(), Event: EventChainEnd, State: initialState, // Note: This should be the final state Error: err, } }() return eventChan } // SetTracer sets a tracer for the underlying runnable func (lr *ListenableRunnable[S]) SetTracer(tracer *Tracer) { lr.runnable.SetTracer(tracer) } // GetTracer returns the tracer from the underlying runnable func (lr *ListenableRunnable[S]) GetTracer() *Tracer { return lr.runnable.GetTracer() } // WithTracer returns a new ListenableRunnableWith the given tracer func (lr *ListenableRunnable[S]) WithTracer(tracer *Tracer) *ListenableRunnable[S] { newRunnable := lr.runnable.WithTracer(tracer) return &ListenableRunnable[S]{ graph: lr.graph, listenableNodes: lr.listenableNodes, runnable: newRunnable, } } // GetGraph returns an Exporter for visualization func (lr *ListenableRunnable[S]) GetGraph() *Exporter[S] { // Convert the typed graph to a regular graph for visualization regularGraph := lr.convertToRegularGraph() return NewExporter[S](regularGraph) } // GetListenableGraph returns the underlying ListenableStateGraph func (lr *ListenableRunnable[S]) GetListenableGraph() *ListenableStateGraph[S] { return lr.graph } // convertToRegularGraph converts a StateGraph[S] to StateGraph[map[string]any] for visualization func (lr *ListenableRunnable[S]) convertToRegularGraph() *StateGraph[S] { // For visualization of typed graphs, we just return the original graph // The Exporter[S] can work with any StateGraph[S] return lr.graph.StateGraph }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/checkpointing.go
graph/checkpointing.go
package graph import ( "context" "fmt" "time" "github.com/google/uuid" "github.com/smallnest/langgraphgo/store" "github.com/smallnest/langgraphgo/store/file" "github.com/smallnest/langgraphgo/store/memory" ) // Checkpoint is an alias for store.Checkpoint type Checkpoint = store.Checkpoint // CheckpointStore is an alias for store.CheckpointStore type CheckpointStore = store.CheckpointStore // NewMemoryCheckpointStore creates a new in-memory checkpoint store func NewMemoryCheckpointStore() store.CheckpointStore { return memory.NewMemoryCheckpointStore() } // NewFileCheckpointStore creates a new file-based checkpoint store func NewFileCheckpointStore(path string) (store.CheckpointStore, error) { return file.NewFileCheckpointStore(path) } // CheckpointConfig configures checkpointing behavior type CheckpointConfig struct { // Store is the checkpoint storage backend Store store.CheckpointStore // AutoSave enables automatic checkpointing after each node AutoSave bool // SaveInterval specifies how often to save (when AutoSave is false) SaveInterval time.Duration // MaxCheckpoints limits the number of checkpoints to keep MaxCheckpoints int } // DefaultCheckpointConfig returns a default checkpoint configuration func DefaultCheckpointConfig() CheckpointConfig { return CheckpointConfig{ Store: NewMemoryCheckpointStore(), AutoSave: true, SaveInterval: 30 * time.Second, MaxCheckpoints: 10, } } // CheckpointListener automatically creates checkpoints during execution type CheckpointListener[S any] struct { store store.CheckpointStore executionID string threadID string autoSave bool maxCheckpoints int } // OnGraphStep is called after a step in the graph has completed and the state has been merged. func (cl *CheckpointListener[S]) OnGraphStep(ctx context.Context, nodeName string, state any) { if cl.autoSave { if s, ok := state.(S); ok { cl.saveCheckpoint(ctx, nodeName, s) } } } // Implement other methods of CallbackHandler as no-ops func (cl *CheckpointListener[S]) OnChainStart(context.Context, map[string]any, map[string]any, string, *string, []string, map[string]any) { } func (cl *CheckpointListener[S]) OnChainEnd(context.Context, map[string]any, string) {} func (cl *CheckpointListener[S]) OnChainError(context.Context, error, string) {} func (cl *CheckpointListener[S]) OnToolStart(context.Context, map[string]any, string, string, *string, []string, map[string]any) { } func (cl *CheckpointListener[S]) OnToolEnd(context.Context, string, string) {} func (cl *CheckpointListener[S]) OnToolError(context.Context, error, string) {} func (cl *CheckpointListener[S]) OnLLMStart(context.Context, map[string]any, []string, string, *string, []string, map[string]any) { } func (cl *CheckpointListener[S]) OnLLMEnd(context.Context, any, string) {} func (cl *CheckpointListener[S]) OnLLMError(context.Context, error, string) {} func (cl *CheckpointListener[S]) OnRetrieverStart(context.Context, map[string]any, string, string, *string, []string, map[string]any) { } func (cl *CheckpointListener[S]) OnRetrieverEnd(context.Context, []any, string) {} func (cl *CheckpointListener[S]) OnRetrieverError(context.Context, error, string) {} func (cl *CheckpointListener[S]) saveCheckpoint(ctx context.Context, nodeName string, state S) { // Get current version from existing checkpoints checkpoints, err := cl.store.List(ctx, cl.executionID) version := 1 if err == nil && len(checkpoints) > 0 { // Get the latest version latest := checkpoints[len(checkpoints)-1] version = latest.Version + 1 } metadata := map[string]any{ "execution_id": cl.executionID, "event": "step", } if cl.threadID != "" { metadata["thread_id"] = cl.threadID } checkpoint := &store.Checkpoint{ ID: generateCheckpointID(), NodeName: nodeName, State: state, Timestamp: time.Now(), Version: version, Metadata: metadata, } // Save checkpoint synchronously _ = cl.store.Save(ctx, checkpoint) // Cleanup old checkpoints if MaxCheckpoints is set if cl.maxCheckpoints > 0 { cl.cleanupOldCheckpoints(ctx) } } // cleanupOldCheckpoints removes oldest checkpoints exceeding the max limit func (cl *CheckpointListener[S]) cleanupOldCheckpoints(ctx context.Context) { // List checkpoints for this thread/execution var checkpoints []*store.Checkpoint var err error if cl.threadID != "" { checkpoints, err = cl.store.ListByThread(ctx, cl.threadID) } else { checkpoints, err = cl.store.List(ctx, cl.executionID) } if err != nil || len(checkpoints) <= cl.maxCheckpoints { return } // Sort by version ascending (oldest first) and delete excess // Checkpoints returned by List are already sorted by version ascending excessCount := len(checkpoints) - cl.maxCheckpoints for i := range excessCount { // Delete the oldest checkpoints _ = cl.store.Delete(ctx, checkpoints[i].ID) } } // CallbackHandler implementation for CheckpointListener is removed because CallbackHandler is untyped/legacy. // We rely on NodeListener[S]. // CheckpointableStateGraph[S any] extends ListenableStateGraph[S] with checkpointing type CheckpointableStateGraph[S any] struct { *ListenableStateGraph[S] config CheckpointConfig } // NewCheckpointableStateGraph creates a new checkpointable state graph with type parameter func NewCheckpointableStateGraph[S any]() *CheckpointableStateGraph[S] { baseGraph := NewListenableStateGraph[S]() return &CheckpointableStateGraph[S]{ ListenableStateGraph: baseGraph, config: DefaultCheckpointConfig(), } } // NewCheckpointableStateGraphWithConfig creates a checkpointable graph with custom config func NewCheckpointableStateGraphWithConfig[S any](config CheckpointConfig) *CheckpointableStateGraph[S] { baseGraph := NewListenableStateGraph[S]() return &CheckpointableStateGraph[S]{ ListenableStateGraph: baseGraph, config: config, } } // CompileCheckpointable compiles the graph into a checkpointable runnable func (g *CheckpointableStateGraph[S]) CompileCheckpointable() (*CheckpointableRunnable[S], error) { listenableRunnable, err := g.CompileListenable() if err != nil { return nil, err } return NewCheckpointableRunnable(listenableRunnable, g.config), nil } // SetCheckpointConfig updates the checkpointing configuration func (g *CheckpointableStateGraph[S]) SetCheckpointConfig(config CheckpointConfig) { g.config = config } // GetCheckpointConfig returns the current checkpointing configuration func (g *CheckpointableStateGraph[S]) GetCheckpointConfig() CheckpointConfig { return g.config } // CheckpointableRunnable[S] wraps a ListenableRunnable[S] with checkpointing capabilities type CheckpointableRunnable[S any] struct { runnable *ListenableRunnable[S] config CheckpointConfig executionID string listener *CheckpointListener[S] } // NewCheckpointableRunnable creates a new checkpointable runnable from a listenable runnable func NewCheckpointableRunnable[S any](runnable *ListenableRunnable[S], config CheckpointConfig) *CheckpointableRunnable[S] { executionID := generateExecutionID() cr := &CheckpointableRunnable[S]{ runnable: runnable, config: config, executionID: executionID, } // Create checkpoint listener cr.listener = &CheckpointListener[S]{ store: cr.config.Store, executionID: executionID, threadID: "", autoSave: true, maxCheckpoints: cr.config.MaxCheckpoints, } // The listener will be added to config callbacks during invocation. return cr } // Invoke executes the graph with checkpointing support func (cr *CheckpointableRunnable[S]) Invoke(ctx context.Context, initialState S) (S, error) { return cr.InvokeWithConfig(ctx, initialState, nil) } // InvokeWithConfig executes the graph with checkpointing support and config func (cr *CheckpointableRunnable[S]) InvokeWithConfig(ctx context.Context, initialState S, config *Config) (S, error) { // Extract thread_id from config if present var threadID string if config != nil && config.Configurable != nil { if tid, ok := config.Configurable["thread_id"].(string); ok { threadID = tid } } // Auto-resume: if thread_id is provided, try to load the latest checkpoint // and merge its state with the provided initialState (which may be just new input) if threadID != "" { // Only auto-resume if ResumeFrom is not explicitly set (manual control takes precedence) if config == nil || config.ResumeFrom == nil { if latestCP, err := cr.getLatestCheckpoint(ctx, threadID); err == nil && latestCP != nil { // Found existing checkpoint - this is a resume checkpointState, ok := latestCP.State.(S) if ok { // Merge checkpoint state with new input using Schema initialState = cr.mergeStates(ctx, checkpointState, initialState) // Check if the checkpoint is at END (completed execution) // Note: NodeName is empty when checkpoint is created at END or via other means if latestCP.NodeName == "" || latestCP.NodeName == END { // Graph has completed - just return the merged state // No need to re-execute anything return initialState, nil } // For incomplete checkpoints (interrupted), set ResumeFrom to continue // The graph will continue execution from the checkpoint node if config == nil { config = &Config{} } config.ResumeFrom = []string{latestCP.NodeName} } } } } // Update checkpoint listener with thread_id if cr.listener != nil { cr.listener.threadID = threadID cr.listener.autoSave = cr.config.AutoSave } // Add the listener to config callbacks if config == nil { config = &Config{} } config.Callbacks = append(config.Callbacks, cr.listener) return cr.runnable.InvokeWithConfig(ctx, initialState, config) } // Stream executes the graph with checkpointing and streaming support func (cr *CheckpointableRunnable[S]) Stream(ctx context.Context, initialState S) <-chan StreamEvent[S] { return cr.runnable.Stream(ctx, initialState) } // StateSnapshot represents a snapshot of the graph state type StateSnapshot struct { Values any Next []string Config Config Metadata map[string]any CreatedAt time.Time ParentID string } // getLatestCheckpoint retrieves the latest checkpoint for a given thread_id. // It first tries to use the optimized GetLatestByThread method, and falls back // to the List method for stores that don't implement it. func (cr *CheckpointableRunnable[S]) getLatestCheckpoint(ctx context.Context, threadID string) (*store.Checkpoint, error) { // Try to use the optimized GetLatestByThread method first if latestGetter, ok := cr.config.Store.(interface { GetLatestByThread(ctx context.Context, threadID string) (*store.Checkpoint, error) }); ok { return latestGetter.GetLatestByThread(ctx, threadID) } // Fallback to List method for stores that don't implement GetLatestByThread checkpoints, err := cr.config.Store.List(ctx, threadID) if err != nil { return nil, fmt.Errorf("failed to list checkpoints: %w", err) } if len(checkpoints) == 0 { return nil, fmt.Errorf("no checkpoints found for thread %s", threadID) } // Get the latest checkpoint (highest version) latest := checkpoints[0] for _, cp := range checkpoints { if cp.Version > latest.Version { latest = cp } } return latest, nil } // mergeStates merges the checkpoint state with new input using the graph's Schema. // If Schema is available, it uses Schema.Update which applies reducers for smart merging. // Otherwise, the input state takes precedence (replacement behavior). func (cr *CheckpointableRunnable[S]) mergeStates(ctx context.Context, checkpointState S, input S) S { // If no Schema, input state replaces checkpoint state (fallback behavior) if cr.runnable.graph == nil || cr.runnable.graph.Schema == nil { return input } // Use Schema.Update to merge states with reducer logic merged, err := cr.runnable.graph.Schema.Update(checkpointState, input) if err != nil { // On error, fall back to input state return input } return merged } // GetState retrieves the state for the given config func (cr *CheckpointableRunnable[S]) GetState(ctx context.Context, config *Config) (*StateSnapshot, error) { var threadID string var checkpointID string if config != nil && config.Configurable != nil { if tid, ok := config.Configurable["thread_id"].(string); ok { threadID = tid } if cid, ok := config.Configurable["checkpoint_id"].(string); ok { checkpointID = cid } } // Default to current execution ID if thread_id not provided if threadID == "" { threadID = cr.executionID } var checkpoint *store.Checkpoint var err error if checkpointID != "" { checkpoint, err = cr.config.Store.Load(ctx, checkpointID) } else if threadID != "" { // Try to use the optimized GetLatestByThread method first if latestGetter, ok := cr.config.Store.(interface { GetLatestByThread(ctx context.Context, threadID string) (*store.Checkpoint, error) }); ok { checkpoint, err = latestGetter.GetLatestByThread(ctx, threadID) if err != nil { return nil, fmt.Errorf("failed to get latest checkpoint by thread: %w", err) } } else { // Fallback to List method for stores that don't implement GetLatestByThread checkpoints, err := cr.config.Store.List(ctx, threadID) if err != nil { return nil, fmt.Errorf("failed to list checkpoints: %w", err) } if len(checkpoints) == 0 { return nil, fmt.Errorf("no checkpoints found for thread %s", threadID) } // Get the latest checkpoint (highest version) checkpoint = checkpoints[0] for _, cp := range checkpoints { if cp.Version > checkpoint.Version { checkpoint = cp } } } } if err != nil { return nil, fmt.Errorf("failed to load checkpoint: %w", err) } if checkpoint == nil { return nil, fmt.Errorf("checkpoint not found") } // Return state snapshot next := []string{checkpoint.NodeName} if checkpoint.NodeName == "" { next = []string{} } return &StateSnapshot{ Values: checkpoint.State, Next: next, Config: Config{ Configurable: map[string]any{ "thread_id": threadID, "checkpoint_id": checkpoint.ID, }, }, Metadata: checkpoint.Metadata, CreatedAt: checkpoint.Timestamp, }, nil } // SaveCheckpoint manually saves a checkpoint at the current state func (cr *CheckpointableRunnable[S]) SaveCheckpoint(ctx context.Context, nodeName string, state S) error { // Get current version to increment checkpoints, _ := cr.config.Store.List(ctx, cr.executionID) version := 1 if len(checkpoints) > 0 { for _, cp := range checkpoints { if cp.Version >= version { version = cp.Version + 1 } } } checkpoint := &store.Checkpoint{ ID: generateCheckpointID(), NodeName: nodeName, State: state, Timestamp: time.Now(), Version: version, Metadata: map[string]any{ "execution_id": cr.executionID, "source": "manual_save", "saved_by": nodeName, }, } return cr.config.Store.Save(ctx, checkpoint) } // ListCheckpoints lists all checkpoints for the current execution func (cr *CheckpointableRunnable[S]) ListCheckpoints(ctx context.Context) ([]*store.Checkpoint, error) { return cr.config.Store.List(ctx, cr.executionID) } // LoadCheckpoint loads a specific checkpoint func (cr *CheckpointableRunnable[S]) LoadCheckpoint(ctx context.Context, checkpointID string) (*store.Checkpoint, error) { return cr.config.Store.Load(ctx, checkpointID) } // ClearCheckpoints removes all checkpoints for this execution func (cr *CheckpointableRunnable[S]) ClearCheckpoints(ctx context.Context) error { return cr.config.Store.Clear(ctx, cr.executionID) } // UpdateState updates the state and saves a checkpoint. func (cr *CheckpointableRunnable[S]) UpdateState(ctx context.Context, config *Config, asNode string, values S) (*Config, error) { var threadID string if config != nil && config.Configurable != nil { if tid, ok := config.Configurable["thread_id"].(string); ok { threadID = tid } } if threadID == "" { threadID = cr.executionID } // Get current state from config if available var currentState S if config != nil { snapshot, err := cr.GetState(ctx, config) if err == nil && snapshot != nil { if s, ok := snapshot.Values.(S); ok { currentState = s } } } // If current state is still nil (e.g., no checkpoints), initialize from schema if any(currentState) == nil && cr.runnable.graph.Schema != nil { currentState = cr.runnable.graph.Schema.Init() } // Apply update using Schema if available var newState S if cr.runnable.graph.Schema != nil { var err error newState, err = cr.runnable.graph.Schema.Update(currentState, values) if err != nil { return nil, fmt.Errorf("failed to update state with schema: %w", err) } } else { // Default: Replace newState = values } // Get max version checkpoints, _ := cr.config.Store.List(ctx, threadID) version := 1 for _, cp := range checkpoints { if cp.Version >= version { version = cp.Version + 1 } } // Create new checkpoint checkpoint := &store.Checkpoint{ ID: generateCheckpointID(), NodeName: asNode, State: newState, Timestamp: time.Now(), Version: version, Metadata: map[string]any{ "execution_id": threadID, "source": "update_state", "updated_by": asNode, }, } if err := cr.config.Store.Save(ctx, checkpoint); err != nil { return nil, err } return &Config{ Configurable: map[string]any{ "thread_id": threadID, "checkpoint_id": checkpoint.ID, }, }, nil } // GetExecutionID returns the current execution ID func (cr *CheckpointableRunnable[S]) GetExecutionID() string { return cr.executionID } // SetExecutionID sets a new execution ID func (cr *CheckpointableRunnable[S]) SetExecutionID(executionID string) { cr.executionID = executionID if cr.listener != nil { cr.listener.executionID = executionID } } // GetTracer returns the tracer from the underlying runnable func (cr *CheckpointableRunnable[S]) GetTracer() *Tracer { return cr.runnable.GetTracer() } // SetTracer sets the tracer on the underlying runnable func (cr *CheckpointableRunnable[S]) SetTracer(tracer *Tracer) { cr.runnable.SetTracer(tracer) } // WithTracer returns a new CheckpointableRunnable with the given tracer func (cr *CheckpointableRunnable[S]) WithTracer(tracer *Tracer) *CheckpointableRunnable[S] { newRunnable := cr.runnable.WithTracer(tracer) return &CheckpointableRunnable[S]{ runnable: newRunnable, config: cr.config, executionID: cr.executionID, listener: cr.listener, } } // GetGraph returns the underlying graph func (cr *CheckpointableRunnable[S]) GetGraph() *ListenableStateGraph[S] { return cr.runnable.GetListenableGraph() } // Helper functions func generateExecutionID() string { return fmt.Sprintf("exec_%d", time.Now().UnixNano()) } func generateCheckpointID() string { return fmt.Sprintf("checkpoint_%s", uuid.New().String()) } // WithThreadID creates a Config with the given thread_id set in the configurable map. // This is a convenience function for setting up checkpoint-based conversation resumption. // // Example: // // result, err := runnable.Invoke(ctx, state, graph.WithThreadID("conversation-1")) func WithThreadID(threadID string) *Config { return &Config{ Configurable: map[string]any{ "thread_id": threadID, }, } } // WithInterruptBefore creates a Config with interrupt points set before specified nodes. // // Example: // // config := graph.WithInterruptBefore("node1", "node2") // result, err := runnable.Invoke(ctx, state, config) func WithInterruptBefore(nodes ...string) *Config { return &Config{ InterruptBefore: nodes, } } // WithInterruptAfter creates a Config with interrupt points set after specified nodes. // // Example: // // config := graph.WithInterruptAfter("node1", "node2") // result, err := runnable.Invoke(ctx, state, config) func WithInterruptAfter(nodes ...string) *Config { return &Config{ InterruptAfter: nodes, } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/graph/visualization_test.go
graph/visualization_test.go
package graph import ( "context" "testing" "github.com/stretchr/testify/assert" ) func TestVisualization(t *testing.T) { g := NewStateGraph[map[string]any]() g.AddNode("A", "A", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddNode("B", "B", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.AddNode("C", "C", func(ctx context.Context, state map[string]any) (map[string]any, error) { return state, nil }) g.SetEntryPoint("A") g.AddEdge("A", "B") g.AddConditionalEdge("B", func(ctx context.Context, state map[string]any) string { return "C" }) g.AddEdge("C", END) _, err := g.Compile() assert.NoError(t, err) // Create an exporter for the graph exporter := NewExporter(g) // Test Mermaid mermaid := exporter.DrawMermaid() assert.Contains(t, mermaid, "A --> B") assert.Contains(t, mermaid, "B -.-> B_condition((?))") assert.Contains(t, mermaid, "C --> END") // Test Mermaid with Options mermaidLR := exporter.DrawMermaidWithOptions(MermaidOptions{Direction: "LR"}) assert.Contains(t, mermaidLR, "flowchart LR") // Test DOT dot := exporter.DrawDOT() assert.Contains(t, dot, "A -> B") assert.Contains(t, dot, "B -> B_condition [style=dashed, label=\"?\"]") // Test ASCII ascii := exporter.DrawASCII() assert.Contains(t, ascii, "A") assert.Contains(t, ascii, "B") assert.Contains(t, ascii, "(?)") // Since C is not directly linked from B in static analysis (it's conditional), it might not appear in ASCII tree under B // But B has a conditional edge, so we show (?) // C is not reachable via static edges from B, so it won't be shown under B. // This is expected behavior for static visualization of dynamic graphs. }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/checkpoint.go
store/checkpoint.go
package store import ( "context" "time" ) // Checkpoint represents a saved state at a specific point in execution type Checkpoint struct { ID string `json:"id"` NodeName string `json:"node_name"` State any `json:"state"` Metadata map[string]any `json:"metadata"` Timestamp time.Time `json:"timestamp"` Version int `json:"version"` } // CheckpointStore defines the interface for checkpoint persistence type CheckpointStore interface { // Save stores a checkpoint Save(ctx context.Context, checkpoint *Checkpoint) error // Load retrieves a checkpoint by ID Load(ctx context.Context, checkpointID string) (*Checkpoint, error) // List returns all checkpoints for a given execution List(ctx context.Context, executionID string) ([]*Checkpoint, error) // ListByThread returns all checkpoints for a specific thread_id. // Returns checkpoints sorted by version (ascending). ListByThread(ctx context.Context, threadID string) ([]*Checkpoint, error) // GetLatestByThread returns the latest checkpoint for a thread_id. // Returns the checkpoint with the highest version. GetLatestByThread(ctx context.Context, threadID string) (*Checkpoint, error) // Delete removes a checkpoint Delete(ctx context.Context, checkpointID string) error // Clear removes all checkpoints for an execution Clear(ctx context.Context, executionID string) error }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/doc.go
store/doc.go
// Package store provides storage implementations for persisting LangGraph checkpoints and state. // // Store implementations allow graph executions to be persisted across different runs, // processes, or even different machines. This enables features like resuming // interrupted workflows, debugging complex executions, and maintaining state // in distributed systems. // // The store package includes implementations for three popular storage backends: // - SQLite: Lightweight, serverless file-based storage // - PostgreSQL: Robust, scalable relational database // - Redis: High-performance in-memory storage // // # Core Concepts // // ## Checkpointing // // Checkpointing captures the state of a graph execution at specific points, including: // - The current node being executed // - The complete state object // - Execution metadata // - Timestamp and configuration information // // This allows execution to be paused and later resumed from the exact same state. // // ## Store Interface // // All store implementations follow the same interface defined in the graph package: // // type CheckpointStore interface { // // Save a checkpoint // Put(ctx context.Context, checkpoint *Checkpoint) error // // // Retrieve a specific checkpoint // Get(ctx context.Context, threadID, checkpointID string) (*Checkpoint, error) // // // List all checkpoints for a thread // List(ctx context.Context, threadID string) ([]*Checkpoint, error) // // // Delete a checkpoint // Delete(ctx context.Context, threadID, checkpointID string) error // // // Clear all checkpoints for a thread // Clear(ctx context.Context, threadID string) error // } // // # Available Implementations // // ## SQLite Store (store/sqlite) // // Best for: // - Single-process applications // - Development and testing // - Desktop applications // - Scenarios requiring zero configuration // // Features: // - Serverless, file-based database // - ACID transactions // - No external dependencies // - Built-in full-text search // // Example: // // import "github.com/smallnest/langgraphgo/store/sqlite" // // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "./checkpoints.db", // }) // // ## PostgreSQL Store (store/postgres) // // Best for: // - Production deployments // - High-throughput applications // - Complex querying requirements // - Distributed systems // // Features: // - Scalable relational database // - Connection pooling // - Advanced indexing // - JSONB support for metadata // // Example: // // import "github.com/smallnest/langgraphgo/store/postgres" // // store, err := postgres.NewPostgresCheckpointStore(ctx, postgres.PostgresOptions{ // ConnString: "postgres://user:pass@localhost/langgraph", // }) // // ## Redis Store (store/redis) // // Best for: // - High-performance requirements // - Distributed caching scenarios // - Temporary checkpoint storage // - Real-time collaboration features // // Features: // - In-memory storage with optional persistence // - Automatic TTL (time-to-live) expiration // - Atomic operations // - Pub/Sub notifications // // Example: // // import "github.com/smallnest/langgraphgo/store/redis" // // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "localhost:6379", // TTL: 24 * time.Hour, // }) // // # Usage Patterns // // ## Basic Checkpointing // // // Create a graph // g := graph.NewStateGraph() // // ... configure graph ... // // // Choose and configure a store // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "./checkpoints.db", // }) // if err != nil { // return err // } // defer store.Close() // // // Enable checkpointing // compileConfig := graph.CompileConfig{ // CheckpointConfig: graph.CheckpointConfig{ // Store: store, // }, // } // // runnable, err := g.CompileWithOptions(compileConfig) // // // Execute with automatic checkpointing // result, err := runnable.Invoke(ctx, input, // graph.WithExecutionID("unique-execution-id")) // // // Resume from a checkpoint // resumed, err := runnable.Resume(ctx, // "unique-execution-id", // "checkpoint-to-resume-from") // // ## Custom Checkpointing Strategy // // // Checkpoint at specific intervals // type IntervalCheckpointConfig struct { // graph.CheckpointConfig // Interval time.Duration // LastCheckpoint time.Time // } // // // Or checkpoint on specific conditions // type ConditionalCheckpointConfig struct { // graph.CheckpointConfig // ShouldCheckpoint func(state any) bool // } // // # Choosing the Right Store // // ## Decision Guide // // Use SQLite when: // - You need a simple, self-contained solution // - Your application runs on a single machine // - You prefer zero configuration // - You need to store checkpoints in files // // Use PostgreSQL when: // - You need robust persistence and scalability // - Your application requires complex queries // - You have multiple processes accessing the same data // - You need enterprise-grade features (backups, replication) // // Use Redis when: // - Performance is the primary concern // - You need automatic expiration of old checkpoints // - You're building a distributed system // - You need real-time notifications for checkpoint changes // // ## Migration Between Stores // // The package provides utilities to migrate between different store implementations: // // // Migrate from SQLite to PostgreSQL // migrator := store.NewMigrator(sqliteStore, postgresStore) // err := migrator.MigrateAll(ctx) // // // Or migrate specific checkpoints // err := migrator.MigrateThread(ctx, "thread-id") // // # Performance Considerations // // ## Serialization // // All stores use JSON serialization for checkpoint data. For optimal performance: // - Keep state objects relatively small // - Avoid storing large binary data in checkpoints // - Consider compression for large state objects // // ## Batch Operations // // Some stores support batch operations for better performance: // // // Batch save multiple checkpoints // checkpoints := []*graph.Checkpoint{cp1, cp2, cp3} // err := store.PutBatch(ctx, checkpoints) // // # Best Practices // // 1. **Choose the right store for your use case** // - SQLite for simple applications // - PostgreSQL for production systems // - Redis for high-performance scenarios // // 2. **Handle errors gracefully** // - Implement retry logic for transient errors // - Provide fallback mechanisms // - Log checkpoint failures for debugging // // 3. **Manage checkpoint lifecycle** // - Clean up old checkpoints regularly // - Use TTL for automatic cleanup (Redis) // - Implement retention policies // // 4. **Secure checkpoint data** // - Encrypt sensitive data before storage // - Use secure database connections // - Implement proper access controls // // 5. **Monitor storage usage** // - Track checkpoint sizes and counts // - Monitor database performance metrics // - Set up alerts for storage limits // // # Integration with LangGraph // // Stores integrate seamlessly with all LangGraph components: // // // With prebuilt agents // agent := prebuilt.CreateReactAgent(llm, tools, 10, // prebuilt.WithCheckpointing(graph.CheckpointConfig{ // Store: store, // }), // ) // // // With custom graphs // g := graph.NewStateGraph() // g.WithCheckpointing(graph.CheckpointConfig{ // Store: store, // }) // // // With streaming execution // streaming := graph.NewStreamingStateGraph(g, graph.StreamConfig{ // BufferSize: 100, // }) // streaming.WithCheckpointing(graph.CheckpointConfig{ // Store: store, // }) // // # Advanced Features // // ## Checkpoint Versioning // // Some stores support checkpoint versioning for tracking evolution: // // versionedStore := postgres.NewVersionedCheckpointStore(opts) // err := versionedStore.PutVersion(ctx, checkpoint, "v1.0") // // versions, err := versionedStore.ListVersions(ctx, checkpointID) // // ## Checkpoint Compression // // For large state objects, consider compression: // // compressedStore := store.NewCompressedWrapper(store, gzip.BestCompression) // err := compressedStore.Put(ctx, checkpoint) // // ## Checkpoint Encryption // // Encrypt sensitive checkpoint data: // // encryptedStore := store.NewEncryptedWrapper(store, encryptionKey) // err := encryptedStore.Put(ctx, checkpoint) // // # Extending the Package // // To add a new store implementation: // // 1. Implement the CheckpointStore interface // 2. Add the package to the store directory // 3. Create comprehensive tests // 4. Add documentation with examples // 5. Include migration utilities if needed // // Example implementation structure: // // package mystore // // type MyStore struct { // // Implementation details // } // // func (s *MyStore) Put(ctx context.Context, cp *graph.Checkpoint) error { // // Implementation // } // // // Implement other interface methods... // // # Community Contributions // // The store package welcomes contributions for additional storage backends: // - MongoDB store // - DynamoDB store // - Cassandra store // - S3/object storage store // - etcd store // // Please follow the established patterns and provide comprehensive tests and documentation. package store
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/type_registry.go
store/type_registry.go
package store import ( "encoding/json" "fmt" "reflect" "sync" ) // TypeRegistry manages type information for generic state serialization/deserialization. // It allows state types to register themselves for proper checkpointing. type TypeRegistry struct { mu sync.RWMutex typeNameToType map[string]reflect.Type typeToName map[reflect.Type]string typeCreators map[string]func() any jsonMarshallers map[reflect.Type]func(any) ([]byte, error) jsonUnmarshallers map[reflect.Type]func([]byte, any) (any, error) } // globalTypeRegistry is the singleton instance of TypeRegistry var globalTypeRegistry = &TypeRegistry{ typeNameToType: make(map[string]reflect.Type), typeToName: make(map[reflect.Type]string), typeCreators: make(map[string]func() any), jsonMarshallers: make(map[reflect.Type]func(any) ([]byte, error)), jsonUnmarshallers: make(map[reflect.Type]func([]byte, any) (any, error)), } // GlobalTypeRegistry returns the global type registry instance func GlobalTypeRegistry() *TypeRegistry { return globalTypeRegistry } // RegisterType registers a reflect.Type with the registry for serialization/deserialization. // Use RegisterTypeWithValue for a more convenient API with generics. // // Example usage: // // var state MyState // RegisterType(reflect.TypeOf(state), "MyState") func RegisterType(t reflect.Type, typeName string) error { return globalTypeRegistry.RegisterTypeInternal(t, typeName) } // RegisterTypeInternal registers a type with the registry. func (r *TypeRegistry) RegisterTypeInternal(t reflect.Type, typeName string) error { // Only allow struct types (or pointers to structs) if t.Kind() != reflect.Struct { if t.Kind() == reflect.Ptr { elem := t.Elem() if elem.Kind() != reflect.Struct { return fmt.Errorf("type %s must be a struct or pointer to struct", t) } } else { return fmt.Errorf("type %s must be a struct", t) } } r.mu.Lock() defer r.mu.Unlock() // Check if type already registered with different name if existingName, ok := r.typeToName[t]; ok && existingName != typeName { return fmt.Errorf("type %v already registered as %s", t, existingName) } r.typeNameToType[typeName] = t r.typeToName[t] = typeName r.typeCreators[typeName] = func() any { return reflect.New(t).Elem().Interface() } return nil } // RegisterTypeWithValue is a convenience function that uses reflection from a value. // This is the recommended way to register types. // // Example usage: // // var state MyState // RegisterTypeWithValue(state, "MyState") func RegisterTypeWithValue(value any, typeName string) error { return globalTypeRegistry.RegisterTypeInternal(reflect.TypeOf(value), typeName) } // RegisterTypeWithCustomSerialization registers a type with custom JSON marshaling/unmarshaling. // // Example usage: // // var state MyState // RegisterTypeWithCustomSerialization( // reflect.TypeOf(state), // "MyState", // func(v any) ([]byte, error) { ... }, // func(data []byte) (any, error) { ... }, // ) func RegisterTypeWithCustomSerialization( t reflect.Type, typeName string, marshalFunc func(any) ([]byte, error), unmarshalFunc func([]byte) (any, error), ) error { return globalTypeRegistry.RegisterTypeWithCustomSerializationInternal(t, typeName, marshalFunc, unmarshalFunc) } // RegisterTypeWithCustomSerializationInternal registers a type with custom serialization. func (r *TypeRegistry) RegisterTypeWithCustomSerializationInternal( t reflect.Type, typeName string, marshalFunc func(any) ([]byte, error), unmarshalFunc func([]byte) (any, error), ) error { if err := r.RegisterTypeInternal(t, typeName); err != nil { return err } r.mu.Lock() defer r.mu.Unlock() r.jsonMarshallers[t] = marshalFunc r.jsonUnmarshallers[t] = func(data []byte, _ any) (any, error) { return unmarshalFunc(data) } return nil } // GetTypeByName returns the reflect.Type for a registered type name. func (r *TypeRegistry) GetTypeByName(typeName string) (reflect.Type, bool) { r.mu.RLock() defer r.mu.RUnlock() t, ok := r.typeNameToType[typeName] return t, ok } // GetTypeName returns the registered name for a type. func (r *TypeRegistry) GetTypeName(t reflect.Type) (string, bool) { r.mu.RLock() defer r.mu.RUnlock() name, ok := r.typeToName[t] return name, ok } // CreateInstance creates a new instance of a registered type by name. func (r *TypeRegistry) CreateInstance(typeName string) (any, error) { r.mu.RLock() creator, ok := r.typeCreators[typeName] r.mu.RUnlock() if !ok { return nil, fmt.Errorf("type %s not registered", typeName) } instance := creator() return instance, nil } // Marshal marshals a value to JSON with type information. func (r *TypeRegistry) Marshal(value any) ([]byte, error) { if value == nil { return json.Marshal(nil) } t := reflect.TypeOf(value) // Get type name typeName, ok := r.GetTypeName(t) if !ok { // Type not registered, try standard JSON marshaling return json.Marshal(value) } r.mu.RLock() marshalFunc, hasCustomMarshaler := r.jsonMarshallers[t] r.mu.RUnlock() var jsonData []byte var err error if hasCustomMarshaler { jsonData, err = marshalFunc(value) } else { jsonData, err = json.Marshal(value) } if err != nil { return nil, err } // Wrap with type information wrapped := map[string]any{ "_type": typeName, "_value": json.RawMessage(jsonData), } return json.Marshal(wrapped) } // Unmarshal unmarshals JSON with type information. func (r *TypeRegistry) Unmarshal(data []byte) (any, error) { // First, try to unmarshal as wrapped type var wrapped map[string]json.RawMessage if err := json.Unmarshal(data, &wrapped); err != nil { // Not a wrapped object, return as-is var result any if err := json.Unmarshal(data, &result); err != nil { return nil, err } return result, nil } // Check if this is a typed wrapper if typeBytes, ok := wrapped["_type"]; ok { var typeName string if err := json.Unmarshal(typeBytes, &typeName); err != nil { return nil, fmt.Errorf("failed to unmarshal type name: %w", err) } // Get the type t, ok := r.GetTypeByName(typeName) if !ok { return nil, fmt.Errorf("unknown type: %s", typeName) } // Create instance instance, err := r.CreateInstance(typeName) if err != nil { return nil, err } r.mu.RLock() unmarshalFunc, hasCustomUnmarshaler := r.jsonUnmarshallers[t] r.mu.RUnlock() if hasCustomUnmarshaler { // Use custom unmarshaler valueBytes, ok := wrapped["_value"] if !ok { return nil, fmt.Errorf("missing _value in wrapped data") } return unmarshalFunc(valueBytes, instance) } // Use standard JSON unmarshaling valueBytes, ok := wrapped["_value"] if !ok { return nil, fmt.Errorf("missing _value in wrapped data") } // For struct types, we need to unmarshal into a pointer // CreateInstance returns a value, so we need to get a pointer to it if t.Kind() == reflect.Struct { // Create a pointer to the value ptr := reflect.New(t).Interface() if err := json.Unmarshal(valueBytes, ptr); err != nil { return nil, fmt.Errorf("failed to unmarshal value: %w", err) } // Dereference to get the value return reflect.ValueOf(ptr).Elem().Interface(), nil } if err := json.Unmarshal(valueBytes, instance); err != nil { return nil, fmt.Errorf("failed to unmarshal value: %w", err) } return instance, nil } // Not a typed wrapper, return as-is var result any if err := json.Unmarshal(data, &result); err != nil { return nil, err } return result, nil } // MarshalState is a helper for marshaling checkpoint states type MarshalFunc func(any) ([]byte, error) // UnmarshalState is a helper for unmarshaling checkpoint states type UnmarshalFunc func([]byte) (any, error) // StateMarshaler creates a marshal function for the given registry func (r *TypeRegistry) StateMarshaler() MarshalFunc { return func(state any) ([]byte, error) { return r.Marshal(state) } } // StateUnmarshaler creates an unmarshal function for the given registry func (r *TypeRegistry) StateUnmarshaler() UnmarshalFunc { return func(data []byte) (any, error) { return r.Unmarshal(data) } } // CheckpointData represents checkpoint data with type information type CheckpointData struct { TypeName string `json:"_type"` Data json.RawMessage `json:"_data"` } // NewCheckpointData creates checkpoint data from a state value func NewCheckpointData(state any) (*CheckpointData, error) { if state == nil { return &CheckpointData{}, nil } t := reflect.TypeOf(state) registry := GlobalTypeRegistry() typeName, ok := registry.GetTypeName(t) if !ok { // Type not registered, use standard JSON marshaling data, err := json.Marshal(state) if err != nil { return nil, err } return &CheckpointData{ Data: json.RawMessage(data), }, nil } registry.mu.RLock() marshalFunc, hasCustomMarshaler := registry.jsonMarshallers[t] registry.mu.RUnlock() var jsonData []byte var err error if hasCustomMarshaler { jsonData, err = marshalFunc(state) } else { jsonData, err = json.Marshal(state) } if err != nil { return nil, err } return &CheckpointData{ TypeName: typeName, Data: json.RawMessage(jsonData), }, nil } // ToValue converts checkpoint data back to a state value func (cd *CheckpointData) ToValue() (any, error) { if cd.TypeName == "" && len(cd.Data) == 0 { return nil, nil } registry := GlobalTypeRegistry() if cd.TypeName == "" { // No type information, try to unmarshal as-is var result any if err := json.Unmarshal(cd.Data, &result); err != nil { return nil, err } return result, nil } // Get the type t, ok := registry.GetTypeByName(cd.TypeName) if !ok { return nil, fmt.Errorf("unknown type: %s", cd.TypeName) } // Create instance instance, err := registry.CreateInstance(cd.TypeName) if err != nil { return nil, err } registry.mu.RLock() unmarshalFunc, hasCustomUnmarshaler := registry.jsonUnmarshallers[t] registry.mu.RUnlock() if hasCustomUnmarshaler { return unmarshalFunc(cd.Data, instance) } // Use standard JSON unmarshaling // For struct types, we need to unmarshal into a pointer if t.Kind() == reflect.Struct { // Create a pointer to the value ptr := reflect.New(t).Interface() if err := json.Unmarshal(cd.Data, ptr); err != nil { return nil, fmt.Errorf("failed to unmarshal value: %w", err) } // Dereference to get the value return reflect.ValueOf(ptr).Elem().Interface(), nil } if err := json.Unmarshal(cd.Data, instance); err != nil { return nil, fmt.Errorf("failed to unmarshal value: %w", err) } return instance, nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/type_registry_test.go
store/type_registry_test.go
package store import ( "encoding/json" "reflect" "testing" "github.com/stretchr/testify/assert" ) // Test types for type registry type TestState struct { Name string `json:"name"` Count int `json:"count"` } type AnotherState struct { ID int `json:"id"` Value string `json:"value"` } type PointerState struct { Field1 string Field2 int } type CustomState struct { Name string `json:"name"` Count int `json:"count"` } // newTestRegistry creates a new isolated registry for testing func newTestRegistry() *TypeRegistry { return &TypeRegistry{ typeNameToType: make(map[string]reflect.Type), typeToName: make(map[reflect.Type]string), typeCreators: make(map[string]func() any), jsonMarshallers: make(map[reflect.Type]func(any) ([]byte, error)), jsonUnmarshallers: make(map[reflect.Type]func([]byte, any) (any, error)), } } func TestTypeRegistry_RegisterType(t *testing.T) { registry := newTestRegistry() t.Run("Register struct type", func(t *testing.T) { err := registry.RegisterTypeInternal(reflect.TypeFor[TestState](), "TestState") assert.NoError(t, err) // Verify we can retrieve it typ, ok := registry.GetTypeByName("TestState") assert.True(t, ok) assert.Equal(t, "TestState", typ.Name()) }) t.Run("Register pointer to struct", func(t *testing.T) { err := registry.RegisterTypeInternal(reflect.TypeFor[*PointerState](), "PointerState") assert.NoError(t, err) // Verify we can retrieve it typ, ok := registry.GetTypeByName("PointerState") assert.True(t, ok) assert.True(t, typ.Kind() == reflect.Ptr) }) t.Run("Register non-struct type should fail", func(t *testing.T) { err := registry.RegisterTypeInternal(reflect.TypeFor[string](), "StringType") assert.Error(t, err) assert.Contains(t, err.Error(), "must be a struct") }) t.Run("Register pointer to non-struct should fail", func(t *testing.T) { err := registry.RegisterTypeInternal(reflect.TypeFor[*int](), "IntPtr") assert.Error(t, err) assert.Contains(t, err.Error(), "must be a struct") }) t.Run("Register same type with different name should fail", func(t *testing.T) { // Use a fresh registry for this test registry2 := newTestRegistry() err1 := registry2.RegisterTypeInternal(reflect.TypeFor[TestState](), "TestState_First") assert.NoError(t, err1) err2 := registry2.RegisterTypeInternal(reflect.TypeFor[TestState](), "TestState_Second") assert.Error(t, err2) assert.Contains(t, err2.Error(), "already registered") }) t.Run("Register same type with same name should succeed", func(t *testing.T) { err1 := registry.RegisterTypeInternal(reflect.TypeFor[AnotherState](), "AnotherState") assert.NoError(t, err1) err2 := registry.RegisterTypeInternal(reflect.TypeFor[AnotherState](), "AnotherState") assert.NoError(t, err2) }) } func TestTypeRegistry_GetTypeName(t *testing.T) { registry := newTestRegistry() t.Run("Get existing type name", func(t *testing.T) { typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState1") name, ok := registry.GetTypeName(typ) assert.True(t, ok) assert.Equal(t, "TestState1", name) }) t.Run("Get non-existing type name", func(t *testing.T) { typ := reflect.TypeOf(struct{ Field int }{}) _, ok := registry.GetTypeName(typ) assert.False(t, ok) }) } func TestTypeRegistry_GetTypeByName(t *testing.T) { registry := newTestRegistry() t.Run("Get existing type", func(t *testing.T) { typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState2") retrievedType, ok := registry.GetTypeByName("TestState2") assert.True(t, ok) assert.Equal(t, reflect.TypeFor[TestState](), retrievedType) }) t.Run("Get non-existing type", func(t *testing.T) { _, ok := registry.GetTypeByName("NonExistentType") assert.False(t, ok) }) } func TestTypeRegistry_CreateInstance(t *testing.T) { registry := newTestRegistry() t.Run("Create instance of registered type", func(t *testing.T) { typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState3") instance, err := registry.CreateInstance("TestState3") assert.NoError(t, err) assert.NotNil(t, instance) // Verify it's the correct type _, ok := instance.(TestState) assert.True(t, ok) }) t.Run("Create instance of non-registered type", func(t *testing.T) { _, err := registry.CreateInstance("NonExistent") assert.Error(t, err) assert.Contains(t, err.Error(), "not registered") }) } func TestTypeRegistry_Marshal(t *testing.T) { registry := newTestRegistry() t.Run("Marshal registered type", func(t *testing.T) { typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState4") state := TestState{Name: "test", Count: 42} data, err := registry.Marshal(state) assert.NoError(t, err) assert.NotNil(t, data) // Verify wrapped structure var wrapped map[string]json.RawMessage err = json.Unmarshal(data, &wrapped) assert.NoError(t, err) assert.Contains(t, wrapped, "_type") assert.Contains(t, wrapped, "_value") var typeName string json.Unmarshal(wrapped["_type"], &typeName) assert.Equal(t, "TestState4", typeName) }) t.Run("Marshal unregistered type", func(t *testing.T) { state := struct{ Field int }{Field: 123} data, err := registry.Marshal(state) assert.NoError(t, err) // Should be plain JSON without type wrapper var result map[string]any err = json.Unmarshal(data, &result) assert.NoError(t, err) assert.Equal(t, 123.0, result["Field"]) assert.NotContains(t, result, "_type") }) t.Run("Marshal nil", func(t *testing.T) { data, err := registry.Marshal(nil) assert.NoError(t, err) assert.NotNil(t, data) var result any err = json.Unmarshal(data, &result) assert.NoError(t, err) assert.Nil(t, result) }) } func TestTypeRegistry_Unmarshal(t *testing.T) { t.Run("Unmarshal registered type", func(t *testing.T) { registry := newTestRegistry() typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState5") // Create wrapped data wrapped := map[string]any{ "_type": "TestState5", "_value": map[string]any{ "name": "test", "count": 99, }, } jsonData, _ := json.Marshal(wrapped) result, err := registry.Unmarshal(jsonData) assert.NoError(t, err) assert.NotNil(t, result) // Verify type and content state, ok := result.(TestState) assert.True(t, ok) assert.Equal(t, "test", state.Name) assert.Equal(t, 99, state.Count) }) t.Run("Unmarshal unknown type", func(t *testing.T) { registry := newTestRegistry() wrapped := map[string]any{ "_type": "UnknownType", "_value": map[string]any{}, } jsonData, _ := json.Marshal(wrapped) _, err := registry.Unmarshal(jsonData) assert.Error(t, err) assert.Contains(t, err.Error(), "unknown type") }) t.Run("Unmarshal non-wrapped data", func(t *testing.T) { registry := newTestRegistry() data := []byte(`{"field": "value"}`) result, err := registry.Unmarshal(data) assert.NoError(t, err) assert.NotNil(t, result) var resultMap map[string]any err = json.Unmarshal(data, &resultMap) assert.NoError(t, err) assert.Equal(t, "value", resultMap["field"]) }) t.Run("Unmarshal wrapped data with missing _value", func(t *testing.T) { registry := newTestRegistry() // Register the type first so we can test missing _value typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState6") wrapped := map[string]any{ "_type": "TestState6", } jsonData, _ := json.Marshal(wrapped) _, err := registry.Unmarshal(jsonData) assert.Error(t, err) assert.Contains(t, err.Error(), "missing _value") }) } func TestTypeRegistry_CustomSerialization(t *testing.T) { registry := newTestRegistry() t.Run("Register with custom serialization", func(t *testing.T) { typ := reflect.TypeFor[TestState]() marshalFunc := func(v any) ([]byte, error) { state := v.(TestState) custom := map[string]any{ "custom_name": state.Name, "custom_count": state.Count, } return json.Marshal(custom) } unmarshalFunc := func(data []byte) (any, error) { var custom map[string]any if err := json.Unmarshal(data, &custom); err != nil { return nil, err } return TestState{ Name: custom["custom_name"].(string), Count: int(custom["custom_count"].(float64)), }, nil } err := registry.RegisterTypeWithCustomSerializationInternal(typ, "CustomState", marshalFunc, unmarshalFunc) assert.NoError(t, err) // Test marshal state := TestState{Name: "custom", Count: 100} data, err := registry.Marshal(state) assert.NoError(t, err) var wrapped map[string]json.RawMessage json.Unmarshal(data, &wrapped) var value map[string]any json.Unmarshal(wrapped["_value"], &value) assert.Equal(t, "custom", value["custom_name"]) assert.Equal(t, 100.0, value["custom_count"]) // Test unmarshal result, err := registry.Unmarshal(data) assert.NoError(t, err) resultState, ok := result.(TestState) assert.True(t, ok) assert.Equal(t, "custom", resultState.Name) assert.Equal(t, 100, resultState.Count) }) } func TestTypeRegistry_StateMarshalerUnmarshaler(t *testing.T) { t.Run("StateMarshaler creates correct function", func(t *testing.T) { // Use a fresh registry registry := newTestRegistry() // First register the type typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState7") // Then create the marshaler marshaler := registry.StateMarshaler() assert.NotNil(t, marshaler) state := TestState{Name: "marshaler", Count: 1} data, err := marshaler(state) assert.NoError(t, err) assert.NotNil(t, data) // Verify wrapped structure var wrapped map[string]json.RawMessage err = json.Unmarshal(data, &wrapped) assert.NoError(t, err) assert.Contains(t, wrapped, "_type") assert.Contains(t, wrapped, "_value") var typeName string json.Unmarshal(wrapped["_type"], &typeName) assert.Equal(t, "TestState7", typeName) }) t.Run("StateUnmarshaler creates correct function", func(t *testing.T) { // Use a fresh registry registry := newTestRegistry() // First register the type typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState8") // Then create the unmarshaler unmarshaler := registry.StateUnmarshaler() assert.NotNil(t, unmarshaler) wrapped := map[string]any{ "_type": "TestState8", "_value": map[string]any{"name": "unmarshaler", "count": float64(2)}, } jsonData, _ := json.Marshal(wrapped) result, err := unmarshaler(jsonData) assert.NoError(t, err) assert.NotNil(t, result) resultState, ok := result.(TestState) assert.True(t, ok) assert.Equal(t, "unmarshaler", resultState.Name) assert.Equal(t, 2, resultState.Count) }) } func TestCheckpointData(t *testing.T) { // Use global registry for CheckpointData tests since that's what it uses registry := GlobalTypeRegistry() t.Run("NewCheckpointData with registered type", func(t *testing.T) { typ := reflect.TypeFor[TestState]() registry.RegisterTypeInternal(typ, "TestState9") state := TestState{Name: "checkpoint", Count: 50} cd, err := NewCheckpointData(state) assert.NoError(t, err) assert.NotNil(t, cd) assert.Equal(t, "TestState9", cd.TypeName) assert.NotNil(t, cd.Data) // Verify we can convert back value, err := cd.ToValue() assert.NoError(t, err) resultState, ok := value.(TestState) assert.True(t, ok) assert.Equal(t, "checkpoint", resultState.Name) assert.Equal(t, 50, resultState.Count) }) t.Run("NewCheckpointData with unregistered type", func(t *testing.T) { state := struct{ Field string }{Field: "test"} cd, err := NewCheckpointData(state) assert.NoError(t, err) assert.NotNil(t, cd) assert.Equal(t, "", cd.TypeName) // No type name assert.NotNil(t, cd.Data) }) t.Run("NewCheckpointData with nil", func(t *testing.T) { cd, err := NewCheckpointData(nil) assert.NoError(t, err) assert.NotNil(t, cd) assert.Equal(t, "", cd.TypeName) assert.Equal(t, 0, len(cd.Data)) }) t.Run("CheckpointData ToValue with unknown type", func(t *testing.T) { cd := &CheckpointData{ TypeName: "UnknownType", Data: json.RawMessage(`{"field": "value"}`), } _, err := cd.ToValue() assert.Error(t, err) assert.Contains(t, err.Error(), "unknown type") }) t.Run("CheckpointData ToValue without type info", func(t *testing.T) { data, _ := json.Marshal(map[string]any{"field": "value"}) cd := &CheckpointData{ Data: json.RawMessage(data), } result, err := cd.ToValue() assert.NoError(t, err) assert.NotNil(t, result) resultMap, ok := result.(map[string]any) assert.True(t, ok) assert.Equal(t, "value", resultMap["field"]) }) t.Run("CheckpointData ToValue empty data", func(t *testing.T) { cd := &CheckpointData{} result, err := cd.ToValue() assert.NoError(t, err) assert.Nil(t, result) }) t.Run("CheckpointData ToValue with custom unmarshaler", func(t *testing.T) { typ := reflect.TypeFor[CustomState]() unmarshalFunc := func(data []byte) (any, error) { var custom map[string]any if err := json.Unmarshal(data, &custom); err != nil { return nil, err } return CustomState{ Name: "custom_" + custom["name"].(string), Count: 999, }, nil } registry.RegisterTypeWithCustomSerializationInternal( typ, "CustomUnmarshalState", nil, unmarshalFunc, ) stateData, _ := json.Marshal(CustomState{Name: "test", Count: 1}) cd := &CheckpointData{ TypeName: "CustomUnmarshalState", Data: json.RawMessage(stateData), } result, err := cd.ToValue() assert.NoError(t, err) resultState, ok := result.(CustomState) assert.True(t, ok) assert.Equal(t, "custom_test", resultState.Name) assert.Equal(t, 999, resultState.Count) }) } func TestGlobalTypeRegistry(t *testing.T) { t.Run("GlobalTypeRegistry returns singleton", func(t *testing.T) { r1 := GlobalTypeRegistry() r2 := GlobalTypeRegistry() assert.Same(t, r1, r2) }) } func TestRegisterTypeWithValue(t *testing.T) { // Use local registry to avoid conflicts registry := newTestRegistry() t.Run("RegisterTypeWithValue with struct value", func(t *testing.T) { err := registry.RegisterTypeInternal(reflect.TypeFor[TestState](), "TestState10") assert.NoError(t, err) // Verify it's registered _, ok := registry.GetTypeByName("TestState10") assert.True(t, ok) }) t.Run("RegisterTypeWithValue with pointer", func(t *testing.T) { err := registry.RegisterTypeInternal(reflect.TypeFor[*PointerState](), "PointerState2") assert.NoError(t, err) // Verify it's registered typ, ok := registry.GetTypeByName("PointerState2") assert.True(t, ok) assert.True(t, typ.Kind() == reflect.Ptr) }) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/sqlite/sqlite_test.go
store/sqlite/sqlite_test.go
package sqlite import ( "context" "testing" "time" "github.com/smallnest/langgraphgo/graph" "github.com/stretchr/testify/assert" ) func TestSqliteCheckpointStore(t *testing.T) { // Use in-memory database store, err := NewSqliteCheckpointStore(SqliteOptions{ Path: ":memory:", }) assert.NoError(t, err) defer store.Close() ctx := context.Background() execID := "exec-123" // Create checkpoint cp := &graph.Checkpoint{ ID: "cp-1", NodeName: "node-a", State: map[string]any{"foo": "bar"}, Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "execution_id": execID, }, } // Test Save err = store.Save(ctx, cp) assert.NoError(t, err) // Test Load loaded, err := store.Load(ctx, "cp-1") assert.NoError(t, err) assert.Equal(t, cp.ID, loaded.ID) assert.Equal(t, cp.NodeName, loaded.NodeName) state, ok := loaded.State.(map[string]any) assert.True(t, ok) assert.Equal(t, "bar", state["foo"]) // Test List list, err := store.List(ctx, execID) assert.NoError(t, err) assert.Len(t, list, 1) assert.Equal(t, cp.ID, list[0].ID) // Test Delete err = store.Delete(ctx, "cp-1") assert.NoError(t, err) _, err = store.Load(ctx, "cp-1") assert.Error(t, err) list, err = store.List(ctx, execID) assert.NoError(t, err) assert.Len(t, list, 0) // Test Clear cp2 := &graph.Checkpoint{ID: "cp-2", Metadata: map[string]any{"execution_id": execID}} cp3 := &graph.Checkpoint{ID: "cp-3", Metadata: map[string]any{"execution_id": execID}} store.Save(ctx, cp2) store.Save(ctx, cp3) list, err = store.List(ctx, execID) assert.NoError(t, err) assert.Len(t, list, 2) err = store.Clear(ctx, execID) assert.NoError(t, err) list, err = store.List(ctx, execID) assert.NoError(t, err) assert.Len(t, list, 0) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/sqlite/sqlite.go
store/sqlite/sqlite.go
package sqlite import ( "context" "database/sql" "encoding/json" "fmt" "sort" _ "github.com/mattn/go-sqlite3" "github.com/smallnest/langgraphgo/graph" "github.com/smallnest/langgraphgo/store" ) // SqliteCheckpointStore implements graph.CheckpointStore using SQLite type SqliteCheckpointStore struct { db *sql.DB tableName string } // SqliteOptions configuration for SQLite connection type SqliteOptions struct { Path string TableName string // Default "checkpoints" } // NewSqliteCheckpointStore creates a new SQLite checkpoint store func NewSqliteCheckpointStore(opts SqliteOptions) (*SqliteCheckpointStore, error) { db, err := sql.Open("sqlite3", opts.Path) if err != nil { return nil, fmt.Errorf("unable to open database: %w", err) } tableName := opts.TableName if tableName == "" { tableName = "checkpoints" } store := &SqliteCheckpointStore{ db: db, tableName: tableName, } if err := store.InitSchema(context.Background()); err != nil { db.Close() return nil, err } return store, nil } // InitSchema creates the necessary table if it doesn't exist func (s *SqliteCheckpointStore) InitSchema(ctx context.Context) error { query := fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %s ( id TEXT PRIMARY KEY, execution_id TEXT NOT NULL, thread_id TEXT, node_name TEXT NOT NULL, state TEXT NOT NULL, metadata TEXT, timestamp DATETIME NOT NULL, version INTEGER NOT NULL ); CREATE INDEX IF NOT EXISTS idx_%s_execution_id ON %s (execution_id); CREATE INDEX IF NOT EXISTS idx_%s_thread_id ON %s (thread_id); `, s.tableName, s.tableName, s.tableName, s.tableName, s.tableName) _, err := s.db.ExecContext(ctx, query) if err != nil { return fmt.Errorf("failed to create schema: %w", err) } return nil } // Close closes the database connection func (s *SqliteCheckpointStore) Close() error { return s.db.Close() } // Save stores a checkpoint func (s *SqliteCheckpointStore) Save(ctx context.Context, checkpoint *graph.Checkpoint) error { stateJSON, err := json.Marshal(checkpoint.State) if err != nil { return fmt.Errorf("failed to marshal state: %w", err) } metadataJSON, err := json.Marshal(checkpoint.Metadata) if err != nil { return fmt.Errorf("failed to marshal metadata: %w", err) } executionID := "" if id, ok := checkpoint.Metadata["execution_id"].(string); ok { executionID = id } threadID := "" if id, ok := checkpoint.Metadata["thread_id"].(string); ok { threadID = id } // nolint:gosec // G201: Table name cannot be parameterized, but all values use parameterized queries query := fmt.Sprintf(` INSERT INTO %s (id, execution_id, thread_id, node_name, state, metadata, timestamp, version) VALUES (?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(id) DO UPDATE SET execution_id = excluded.execution_id, thread_id = excluded.thread_id, node_name = excluded.node_name, state = excluded.state, metadata = excluded.metadata, timestamp = excluded.timestamp, version = excluded.version `, s.tableName) _, err = s.db.ExecContext(ctx, query, checkpoint.ID, executionID, threadID, checkpoint.NodeName, string(stateJSON), string(metadataJSON), checkpoint.Timestamp, checkpoint.Version, ) if err != nil { return fmt.Errorf("failed to save checkpoint: %w", err) } return nil } // Load retrieves a checkpoint by ID func (s *SqliteCheckpointStore) Load(ctx context.Context, checkpointID string) (*graph.Checkpoint, error) { // nolint:gosec // G201: Table name cannot be parameterized, but all values use parameterized queries query := fmt.Sprintf(` SELECT id, node_name, state, metadata, timestamp, version FROM %s WHERE id = ? `, s.tableName) var cp graph.Checkpoint var stateJSON string var metadataJSON string err := s.db.QueryRowContext(ctx, query, checkpointID).Scan( &cp.ID, &cp.NodeName, &stateJSON, &metadataJSON, &cp.Timestamp, &cp.Version, ) if err != nil { if err == sql.ErrNoRows { return nil, fmt.Errorf("checkpoint not found: %s", checkpointID) } return nil, fmt.Errorf("failed to load checkpoint: %w", err) } if err := json.Unmarshal([]byte(stateJSON), &cp.State); err != nil { return nil, fmt.Errorf("failed to unmarshal state: %w", err) } if len(metadataJSON) > 0 { if err := json.Unmarshal([]byte(metadataJSON), &cp.Metadata); err != nil { return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) } } return &cp, nil } // List returns all checkpoints for a given execution func (s *SqliteCheckpointStore) List(ctx context.Context, executionID string) ([]*graph.Checkpoint, error) { // nolint:gosec // G201: Table name cannot be parameterized, but all values use parameterized queries query := fmt.Sprintf(` SELECT id, node_name, state, metadata, timestamp, version FROM %s WHERE execution_id = ? ORDER BY timestamp ASC `, s.tableName) rows, err := s.db.QueryContext(ctx, query, executionID) if err != nil { return nil, fmt.Errorf("failed to list checkpoints: %w", err) } defer rows.Close() var checkpoints []*graph.Checkpoint for rows.Next() { var cp graph.Checkpoint var stateJSON string var metadataJSON string err := rows.Scan( &cp.ID, &cp.NodeName, &stateJSON, &metadataJSON, &cp.Timestamp, &cp.Version, ) if err != nil { return nil, fmt.Errorf("failed to scan checkpoint row: %w", err) } if err := json.Unmarshal([]byte(stateJSON), &cp.State); err != nil { return nil, fmt.Errorf("failed to unmarshal state: %w", err) } if len(metadataJSON) > 0 { if err := json.Unmarshal([]byte(metadataJSON), &cp.Metadata); err != nil { return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) } } checkpoints = append(checkpoints, &cp) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("error iterating checkpoint rows: %w", err) } return checkpoints, nil } // Delete removes a checkpoint func (s *SqliteCheckpointStore) Delete(ctx context.Context, checkpointID string) error { // nolint:gosec // G201: Table name cannot be parameterized, but all values use parameterized queries query := fmt.Sprintf("DELETE FROM %s WHERE id = ?", s.tableName) _, err := s.db.ExecContext(ctx, query, checkpointID) if err != nil { return fmt.Errorf("failed to delete checkpoint: %w", err) } return nil } // Clear removes all checkpoints for an execution func (s *SqliteCheckpointStore) Clear(ctx context.Context, executionID string) error { // nolint:gosec // G201: Table name cannot be parameterized, but all values use parameterized queries query := fmt.Sprintf("DELETE FROM %s WHERE execution_id = ?", s.tableName) _, err := s.db.ExecContext(ctx, query, executionID) if err != nil { return fmt.Errorf("failed to clear checkpoints: %w", err) } return nil } // ListByThread returns all checkpoints for a specific thread_id func (s *SqliteCheckpointStore) ListByThread(ctx context.Context, threadID string) ([]*store.Checkpoint, error) { // nolint:gosec // G201: Table name cannot be parameterized, but all values use parameterized queries query := fmt.Sprintf(` SELECT id, node_name, state, metadata, timestamp, version FROM %s WHERE thread_id = ? ORDER BY timestamp ASC `, s.tableName) rows, err := s.db.QueryContext(ctx, query, threadID) if err != nil { return nil, fmt.Errorf("failed to list checkpoints by thread: %w", err) } defer rows.Close() var checkpoints []*store.Checkpoint for rows.Next() { var cp store.Checkpoint var stateJSON string var metadataJSON string err := rows.Scan( &cp.ID, &cp.NodeName, &stateJSON, &metadataJSON, &cp.Timestamp, &cp.Version, ) if err != nil { return nil, fmt.Errorf("failed to scan checkpoint row: %w", err) } if err := json.Unmarshal([]byte(stateJSON), &cp.State); err != nil { return nil, fmt.Errorf("failed to unmarshal state: %w", err) } if len(metadataJSON) > 0 { if err := json.Unmarshal([]byte(metadataJSON), &cp.Metadata); err != nil { return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) } } checkpoints = append(checkpoints, &cp) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("error iterating checkpoint rows: %w", err) } // Sort by version ascending sort.Slice(checkpoints, func(i, j int) bool { return checkpoints[i].Version < checkpoints[j].Version }) return checkpoints, nil } // GetLatestByThread returns the latest checkpoint for a thread_id func (s *SqliteCheckpointStore) GetLatestByThread(ctx context.Context, threadID string) (*store.Checkpoint, error) { checkpoints, err := s.ListByThread(ctx, threadID) if err != nil { return nil, err } if len(checkpoints) == 0 { return nil, fmt.Errorf("no checkpoints found for thread: %s", threadID) } // Return the last one (highest version due to sorting) return checkpoints[len(checkpoints)-1], nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/sqlite/doc.go
store/sqlite/doc.go
// Package sqlite provides SQLite-backed storage for LangGraph Go checkpoints and state. // // This package implements file-based checkpoint storage using SQLite, perfect for // applications requiring a lightweight, serverless database solution with ACID // compliance and zero external dependencies. // // # Key Features // // - Serverless, file-based database // - ACID transaction support // - Zero configuration needed // - Cross-platform compatibility // - Embedded database (no separate server process) // - Full-text search capabilities // - Thread-safe operations // - Support for custom table schemas // - Backup and restore functionality // - WAL mode for concurrent access // // # Basic Usage // // import ( // "context" // "github.com/smallnest/langgraphgo/store/sqlite" // ) // // // Create a SQLite checkpoint store // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "./checkpoints.db", // Database file path // TableName: "checkpoints", // Optional table name // }) // if err != nil { // return err // } // defer store.Close() // // // Use with a graph // g := graph.NewStateGraph() // // ... configure graph ... // // // Enable checkpointing // compileConfig := graph.CompileConfig{ // CheckpointConfig: graph.CheckpointConfig{ // Store: store, // }, // } // // runnable, err := g.CompileWithOptions(compileConfig) // // # Configuration // // ## Database File Options // // // In-memory database (volatile) // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: ":memory:", // }) // // // Temporary file database // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "", // }) // Creates temporary file // // // Persistent file database // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "./data/langgraph.db", // }) // // // With custom URI options // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "file:./checkpoints.db?cache=shared&mode=rwc", // }) // // ## Connection Pooling // // // Configure connection pool // store, err := sqlite.NewSqliteCheckpointStoreWithPool(sqlite.SqlitePoolOptions{ // Path: "./checkpoints.db", // MaxOpenConns: 10, // MaxIdleConns: 5, // ConnMaxLifetime: time.Hour, // }) // // # Advanced Features // // ## WAL Mode for Concurrency // // // Enable Write-Ahead Logging for better concurrent access // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "./checkpoints.db", // }) // // // Enable WAL mode // if err := store.EnableWAL(context.Background()); err != nil { // return err // } // // // Configure WAL checkpointing // if err := store.SetWALCheckpointMode( // context.Background(), // sqlite.WALCheckpointPassive, // 1000, // WAL size threshold // ); err != nil { // return err // } // // ## Custom Schema // // // Initialize with custom table schema // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "./checkpoints.db", // TableName: "custom_checkpoints", // }) // // // Add custom indexes // _, err = store.Exec(context.Background(), ` // CREATE INDEX IF NOT EXISTS idx_custom_checkpoints_thread_id // ON custom_checkpoints (thread_id); // // CREATE INDEX IF NOT EXISTS idx_custom_checkpoints_created_at // ON custom_checkpoints (created_at DESC); // `) // // // Add full-text search // _, err = store.Exec(context.Background(), ` // CREATE VIRTUAL TABLE IF NOT EXISTS checkpoint_search // USING fts5(checkpoint_id, thread_id, content); // `) // // ## Backup and Restore // // // Create backup // err := store.Backup(context.Background(), "./backup/checkpoints_backup.db") // if err != nil { // return err // } // // // Restore from backup // err := store.Restore(context.Background(), "./backup/checkpoints_backup.db") // if err != nil { // return err // } // // // Incremental backup // err := store.IncrementalBackup(context.Background(), "./backup/incremental.db") // // # Querying and Analytics // // ## Advanced Queries // // // Find checkpoints by thread with pagination // checkpoints, err := store.QueryWithPagination(context.Background(), ` // SELECT * FROM checkpoints // WHERE thread_id = ? // ORDER BY created_at DESC // LIMIT ? OFFSET ? // `, "thread-123", 10, 0) // // // Search checkpoints by content // results, err := store.SearchCheckpoints(context.Background(), ` // SELECT checkpoint_id, thread_id, created_at // FROM checkpoint_search // WHERE checkpoint_search MATCH ? // ORDER BY rank // `, "error OR exception") // // // Get checkpoint statistics // stats, err := store.GetStatistics(context.Background()) // fmt.Printf("Total checkpoints: %d\n", stats.Total) // fmt.Printf("Threads: %d\n", stats.Threads) // fmt.Printf("Average size: %.2f KB\n", stats.AverageSize) // // type CheckpointStats struct { // Total int64 // Threads int64 // AverageSize float64 // Oldest time.Time // Newest time.Time // } // // # Performance Optimization // // ## Pragmas and Settings // // // Optimize for performance // pragmas := map[string]string{ // "journal_mode": "WAL", // "synchronous": "NORMAL", // "cache_size": "10000", // "temp_store": "MEMORY", // "mmap_size": "268435456", // 256MB // "wal_autocheckpoint": "1000", // } // // for key, value := range pragmas { // _, err := store.Exec(context.Background(), // fmt.Sprintf("PRAGMA %s = %s", key, value)) // if err != nil { // return err // } // } // // ## Prepared Statements // // // Use prepared statements for frequent operations // insertStmt, err := store.Prepare(context.Background(), ` // INSERT OR REPLACE INTO checkpoints // (id, thread_id, checkpoint_id, checkpoint_data, metadata, created_at) // VALUES (?, ?, ?, ?, ?, ?) // `) // if err != nil { // return err // } // defer insertStmt.Close() // // // Use the prepared statement // _, err = insertStmt.ExecContext(context.Background(), // checkpoint.ID, // checkpoint.ThreadID, // checkpoint.CheckpointID, // checkpointData, // metadataJSON, // time.Now(), // ) // // # Transactions // // // Atomic operations with transactions // err := store.Transaction(context.Background(), func(ctx context.Context, tx *sql.Tx) error { // // Checkpoint current state // if err := store.PutTx(ctx, tx, checkpoint1); err != nil { // return err // } // // // Save related data // if err := saveRelatedDataTx(ctx, tx, relatedData); err != nil { // return err // } // // // Update metadata // if err := updateMetadataTx(ctx, tx, metadata); err != nil { // return err // } // // return nil // }) // // # Monitoring and Maintenance // // // Analyze database // _, err := store.Exec(context.Background(), "ANALYZE") // // // Vacuum to reclaim space // _, err = store.Exec(context.Background(), "VACUUM") // // // Check integrity // result, err := store.QueryRow(context.Background(), "PRAGMA integrity_check").Scan(&result) // if result != "ok" { // return fmt.Errorf("database integrity check failed: %s", result) // } // // // Get database info // info := &DatabaseInfo{} // store.QueryRow(context.Background(), "PRAGMA page_size").Scan(&info.PageSize) // store.QueryRow(context.Background(), "PRAGMA page_count").Scan(&info.PageCount) // info.DatabaseSize = int64(info.PageSize) * int64(info.PageCount) // // type DatabaseInfo struct { // PageSize int // PageCount int // DatabaseSize int64 // } // // # Error Handling // // // Handle SQLite-specific errors // if err := store.Put(ctx, checkpoint); err != nil { // if sqlite.IsConstraint(err) { // // Handle constraint violation // switch sqlite.ConstraintType(err) { // case sqlite.ConstraintPrimaryKey: // // Duplicate primary key // case sqlite.ConstraintUnique: // // Unique constraint violation // case sqlite.ConstraintForeignKey: // // Foreign key violation // } // } else if sqlite.IsBusy(err) { // // Database is locked // time.Sleep(time.Second) // // Retry... // } else if sqlite.IsLocked(err) { // // Table is locked // } // } // // # Integration Examples // // ## With Desktop Application // // // Local file database for desktop app // appDataDir, _ := os.UserConfigDir() // dbPath := filepath.Join(appDataDir, "myapp", "checkpoints.db") // // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: dbPath, // }) // // // Ensure directory exists // os.MkdirAll(filepath.Dir(dbPath), 0755) // // ## With Web Application // // // Per-user SQLite databases // func getUserStore(userID string) (graph.CheckpointStore, error) { // userDir := filepath.Join("./data", "users", userID) // os.MkdirAll(userDir, 0755) // // dbPath := filepath.Join(userDir, "checkpoints.db") // return sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: dbPath, // }) // } // // ## Development and Testing // // // In-memory database for tests // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: ":memory:", // }) // if err != nil { // t.Fatal(err) // } // // // Or use shared in-memory for multiple connections // store, err := sqlite.NewSqliteCheckpointStore(sqlite.SqliteOptions{ // Path: "file::memory:?cache=shared", // }) // // # Best Practices // // 1. Use WAL mode for concurrent access // 2. Set appropriate timeouts for operations // 3. Use transactions for multi-step operations // 4. Close connections when done // 5. Regularly run VACUUM and ANALYZE // 6. Monitor database size and performance // 7. Use prepared statements for frequent queries // 8. Implement proper backup strategies // 9. Handle database locking gracefully // 10. Consider connection pooling for web applications // // # Security Considerations // // - Set appropriate file permissions // - Use directory isolation for multi-tenant apps // - Validate inputs to prevent SQL injection // - Encrypt sensitive data before storage // - Use file system permissions for access control // - Consider full disk encryption for sensitive data // - Implement proper backup encryption // - Audit database access // // # Docker Integration // // Use with Docker volumes: // // ```yaml // version: '3.8' // services: // // langgraph: // image: your-app // volumes: // - ./data:/app/data // environment: // - SQLITE_PATH=/app/data/checkpoints.db // // backup: // image: your-backup-app // volumes: // - ./data:/app/data:ro // - ./backups:/app/backups // // ``` // // # Comparison with Other Stores // // | Feature | SQLite Store | Redis Store | PostgreSQL Store | // |---------------------|--------------|-------------|------------------| // | Performance | Medium | Very High | High | // | Memory Usage | Low | High | Low | // | Concurrency | Limited | High | High | // | Persistence | Yes | Optional | Yes | // | Scaling | Single | Cluster | Cluster | // | Query Capabilities | SQL | Basic | Advanced SQL | // | Setup Complexity | None | Low | Medium | // | Best For | Small/Medium | High-speed | Enterprise | // | File Size | Up to TB | RAM limited | Unlimited | // | Backup | Simple copy | Export/Import| pg_dump | package sqlite
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/memory/doc.go
store/memory/doc.go
// Package memory provides in-memory checkpoint storage implementation. package memory
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/memory/memory.go
store/memory/memory.go
package memory import ( "context" "fmt" "sort" "sync" "github.com/smallnest/langgraphgo/store" ) // MemoryCheckpointStore provides in-memory checkpoint storage type MemoryCheckpointStore struct { checkpoints map[string]*store.Checkpoint // id -> checkpoint threadIndex map[string][]string // thread_id -> []checkpoint IDs executionIndex map[string][]string // execution_id -> []checkpoint IDs mutex sync.RWMutex } // NewMemoryCheckpointStore creates a new in-memory checkpoint store func NewMemoryCheckpointStore() store.CheckpointStore { return &MemoryCheckpointStore{ checkpoints: make(map[string]*store.Checkpoint), threadIndex: make(map[string][]string), executionIndex: make(map[string][]string), } } // Save implements CheckpointStore interface func (m *MemoryCheckpointStore) Save(_ context.Context, checkpoint *store.Checkpoint) error { m.mutex.Lock() defer m.mutex.Unlock() // Store checkpoint m.checkpoints[checkpoint.ID] = checkpoint // Update execution_id index if execID, ok := checkpoint.Metadata["execution_id"].(string); ok && execID != "" { m.executionIndex[execID] = append(m.executionIndex[execID], checkpoint.ID) } // Update thread_id index if threadID, ok := checkpoint.Metadata["thread_id"].(string); ok && threadID != "" { m.threadIndex[threadID] = append(m.threadIndex[threadID], checkpoint.ID) } return nil } // Load implements CheckpointStore interface func (m *MemoryCheckpointStore) Load(_ context.Context, checkpointID string) (*store.Checkpoint, error) { m.mutex.RLock() defer m.mutex.RUnlock() checkpoint, exists := m.checkpoints[checkpointID] if !exists { return nil, fmt.Errorf("checkpoint not found: %s", checkpointID) } return checkpoint, nil } // List implements CheckpointStore interface func (m *MemoryCheckpointStore) List(_ context.Context, executionID string) ([]*store.Checkpoint, error) { m.mutex.RLock() defer m.mutex.RUnlock() var checkpoints []*store.Checkpoint for _, checkpoint := range m.checkpoints { // Filter by various ID fields that can be used for grouping execID, _ := checkpoint.Metadata["execution_id"].(string) threadID, _ := checkpoint.Metadata["thread_id"].(string) sessionID, _ := checkpoint.Metadata["session_id"].(string) workflowID, _ := checkpoint.Metadata["workflow_id"].(string) if execID == executionID || threadID == executionID || sessionID == executionID || workflowID == executionID { checkpoints = append(checkpoints, checkpoint) } } // Sort by version (ascending order) so latest is last sort.Slice(checkpoints, func(i, j int) bool { return checkpoints[i].Version < checkpoints[j].Version }) return checkpoints, nil } // ListByThread returns all checkpoints for a specific thread_id func (m *MemoryCheckpointStore) ListByThread(_ context.Context, threadID string) ([]*store.Checkpoint, error) { m.mutex.RLock() defer m.mutex.RUnlock() ids, exists := m.threadIndex[threadID] if !exists { return []*store.Checkpoint{}, nil } checkpoints := make([]*store.Checkpoint, 0, len(ids)) for _, id := range ids { if cp, ok := m.checkpoints[id]; ok { checkpoints = append(checkpoints, cp) } } // Sort by version (ascending order) sort.Slice(checkpoints, func(i, j int) bool { return checkpoints[i].Version < checkpoints[j].Version }) return checkpoints, nil } // GetLatestByThread returns the latest checkpoint for a thread_id func (m *MemoryCheckpointStore) GetLatestByThread(_ context.Context, threadID string) (*store.Checkpoint, error) { m.mutex.RLock() defer m.mutex.RUnlock() ids, exists := m.threadIndex[threadID] if !exists || len(ids) == 0 { return nil, fmt.Errorf("no checkpoints found for thread: %s", threadID) } var latest *store.Checkpoint for _, id := range ids { cp := m.checkpoints[id] if latest == nil || cp.Version > latest.Version { latest = cp } } return latest, nil } // Delete implements CheckpointStore interface func (m *MemoryCheckpointStore) Delete(_ context.Context, checkpointID string) error { m.mutex.Lock() defer m.mutex.Unlock() checkpoint, exists := m.checkpoints[checkpointID] if !exists { return nil } // Remove from indexes if execID, ok := checkpoint.Metadata["execution_id"].(string); ok { if ids, ok := m.executionIndex[execID]; ok { for i, id := range ids { if id == checkpointID { m.executionIndex[execID] = append(ids[:i], ids[i+1:]...) break } } } } if threadID, ok := checkpoint.Metadata["thread_id"].(string); ok { if ids, ok := m.threadIndex[threadID]; ok { for i, id := range ids { if id == checkpointID { m.threadIndex[threadID] = append(ids[:i], ids[i+1:]...) break } } } } delete(m.checkpoints, checkpointID) return nil } // Clear implements CheckpointStore interface func (m *MemoryCheckpointStore) Clear(_ context.Context, executionID string) error { m.mutex.Lock() defer m.mutex.Unlock() var idsToDelete []string // Find checkpoints to delete for id, checkpoint := range m.checkpoints { execID, _ := checkpoint.Metadata["execution_id"].(string) threadID, _ := checkpoint.Metadata["thread_id"].(string) sessionID, _ := checkpoint.Metadata["session_id"].(string) workflowID, _ := checkpoint.Metadata["workflow_id"].(string) if execID == executionID || threadID == executionID || sessionID == executionID || workflowID == executionID { idsToDelete = append(idsToDelete, id) } } // Delete from indexes and main map for _, id := range idsToDelete { checkpoint := m.checkpoints[id] // Remove from execution_index if execID, ok := checkpoint.Metadata["execution_id"].(string); ok { if ids, ok := m.executionIndex[execID]; ok { for i, cid := range ids { if cid == id { m.executionIndex[execID] = append(ids[:i], ids[i+1:]...) break } } } } // Remove from thread_index if threadID, ok := checkpoint.Metadata["thread_id"].(string); ok { if ids, ok := m.threadIndex[threadID]; ok { for i, cid := range ids { if cid == id { m.threadIndex[threadID] = append(ids[:i], ids[i+1:]...) break } } } } delete(m.checkpoints, id) } return nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/memory/memory_test.go
store/memory/memory_test.go
package memory import ( "context" "fmt" "testing" "time" "github.com/smallnest/langgraphgo/store" ) func TestMemoryCheckpointStore_New(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() if ms == nil { t.Fatal("Store should not be nil") } // Verify it implements the interface var _ store.CheckpointStore = ms } func TestMemoryCheckpointStore_BasicOperations(t *testing.T) { t.Parallel() t.Run("save and load", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() cp := &store.Checkpoint{ ID: "user-session-123", NodeName: "auth-handler", State: "waiting_for_2fa", Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "user_id": "alice@example.com", "session_id": "sess-abc-123", "ip_address": "10.0.0.45", }, } // Save it err := ms.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save: %v", err) } // Load it back loaded, err := ms.Load(ctx, cp.ID) if err != nil { t.Fatalf("Failed to load: %v", err) } // Verify everything matches if loaded.ID != cp.ID { t.Errorf("ID mismatch: got %s, want %s", loaded.ID, cp.ID) } if loaded.NodeName != cp.NodeName { t.Errorf("NodeName mismatch: got %s, want %s", loaded.NodeName, cp.NodeName) } if loaded.State != cp.State { t.Errorf("State mismatch: got %s, want %s", loaded.State, cp.State) } if loaded.Version != cp.Version { t.Errorf("Version mismatch: got %d, want %d", loaded.Version, cp.Version) } // Check some metadata if userID, ok := loaded.Metadata["user_id"].(string); !ok || userID != "alice@example.com" { t.Error("User ID not preserved correctly") } }) t.Run("load missing returns error", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() _, err := ms.Load(ctx, "does-not-exist") if err == nil { t.Error("Expected error for missing checkpoint") } }) t.Run("overwrite works", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() // Save first version cp1 := &store.Checkpoint{ ID: "overwrite-test", NodeName: "processor-v1", State: "initial", Timestamp: time.Now(), Version: 1, } err := ms.Save(ctx, cp1) if err != nil { t.Fatalf("Failed to save v1: %v", err) } // Save second version with same ID cp2 := &store.Checkpoint{ ID: "overwrite-test", NodeName: "processor-v2", State: "updated", Timestamp: time.Now(), Version: 2, } err = ms.Save(ctx, cp2) if err != nil { t.Fatalf("Failed to save v2: %v", err) } // Load and verify we get v2 loaded, err := ms.Load(ctx, "overwrite-test") if err != nil { t.Fatalf("Failed to load: %v", err) } if loaded.NodeName != "processor-v2" { t.Errorf("Expected v2 processor, got %s", loaded.NodeName) } if loaded.State != "updated" { t.Errorf("Expected updated state, got %s", loaded.State) } if loaded.Version != 2 { t.Errorf("Expected version 2, got %d", loaded.Version) } }) } func TestMemoryCheckpointStore_List(t *testing.T) { t.Parallel() t.Run("filters by session_id", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() userSession := "web-user-12345" checkpoints := []struct { id string node string version int }{ {"homepage-visit", "page-renderer", 1}, {"login-attempt", "auth-handler", 2}, {"dashboard-view", "dashboard-renderer", 3}, } for _, cp := range checkpoints { fullCP := &store.Checkpoint{ ID: cp.id, NodeName: cp.node, State: "success", Timestamp: time.Now().Add(time.Duration(cp.version) * time.Minute), Version: cp.version, Metadata: map[string]any{ "session_id": userSession, }, } err := ms.Save(ctx, fullCP) if err != nil { t.Fatalf("Failed to save %s: %v", cp.id, err) } } results, err := ms.List(ctx, userSession) if err != nil { t.Fatalf("Failed to list: %v", err) } if len(results) != 3 { t.Fatalf("Expected 3 checkpoints for user session, got %d", len(results)) } // Verify they're sorted by version for i := 1; i < len(results); i++ { if results[i-1].Version > results[i].Version { t.Error("Checkpoints not sorted by version") break } } }) t.Run("filters by thread_id", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() botSession := "api-bot-67890" cp := &store.Checkpoint{ ID: "api-call", NodeName: "request-handler", State: "success", Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "thread_id": botSession, }, } err := ms.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save: %v", err) } results, err := ms.List(ctx, botSession) if err != nil { t.Fatalf("Failed to list: %v", err) } if len(results) != 1 { t.Fatalf("Expected 1 checkpoint for bot session, got %d", len(results)) } if results[0].ID != "api-call" { t.Errorf("Expected api-call, got %s", results[0].ID) } }) t.Run("empty for unknown session", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() results, err := ms.List(ctx, "ghost-session") if err != nil { t.Fatalf("Failed to list: %v", err) } if len(results) != 0 { t.Errorf("Expected 0 checkpoints, got %d", len(results)) } }) t.Run("mixed session/thread filters", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() userSession := "web-user-12345" adminThread := "admin-ops-thread-1" // Add a checkpoint that has both mixedCP := &store.Checkpoint{ ID: "mixed-metadata", NodeName: "hybrid-handler", State: "processing", Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "session_id": userSession, "thread_id": adminThread, }, } err := ms.Save(ctx, mixedCP) if err != nil { t.Fatalf("Failed to save mixed: %v", err) } // Should appear in both session and thread lists sessionList, _ := ms.List(ctx, userSession) threadList, _ := ms.List(ctx, adminThread) if len(sessionList) != 1 { t.Errorf("Expected 1 in session list, got %d", len(sessionList)) } if len(threadList) != 1 { t.Errorf("Expected 1 in thread list, got %d", len(threadList)) } }) } func TestMemoryCheckpointStore_Delete(t *testing.T) { t.Parallel() t.Run("delete existing", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() // Save a few checkpoints ids := []string{"keep-1", "delete-me", "keep-2"} for _, id := range ids { cp := &store.Checkpoint{ ID: id, NodeName: "test-node", State: "test", Timestamp: time.Now(), Version: 1, } err := ms.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save %s: %v", id, err) } } err := ms.Delete(ctx, "delete-me") if err != nil { t.Errorf("Delete failed: %v", err) } // Verify it's gone _, err = ms.Load(ctx, "delete-me") if err == nil { t.Error("Deleted checkpoint should not load") } // Verify others are still there _, err = ms.Load(ctx, "keep-1") if err != nil { t.Error("keep-1 should still exist") } _, err = ms.Load(ctx, "keep-2") if err != nil { t.Error("keep-2 should still exist") } }) t.Run("delete missing is no-op", func(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() err := ms.Delete(ctx, "never-existed") if err != nil { t.Errorf("Should not error for missing checkpoint: %v", err) } }) } func TestMemoryCheckpointStore_Clear(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() // Create checkpoints for two different workflows workflowA := "data-pipeline-2024" workflowB := "ml-training-job-999" setupData := []struct { id string workflow string version int }{ {"extract-step", workflowA, 1}, {"transform-step", workflowA, 2}, {"load-step", workflowA, 3}, {"model-init", workflowB, 1}, {"training-start", workflowB, 2}, } for _, d := range setupData { cp := &store.Checkpoint{ ID: d.id, NodeName: "processor", State: "running", Timestamp: time.Now(), Version: d.version, Metadata: map[string]any{ "workflow_id": d.workflow, }, } err := ms.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save %s: %v", d.id, err) } } // Verify initial state aList, _ := ms.List(ctx, workflowA) bList, _ := ms.List(ctx, workflowB) if len(aList) != 3 || len(bList) != 2 { t.Fatalf("Initial setup wrong: a=%d, b=%d", len(aList), len(bList)) } err := ms.Clear(ctx, workflowA) if err != nil { t.Fatalf("Clear failed: %v", err) } // Workflow A should be empty aList, _ = ms.List(ctx, workflowA) if len(aList) != 0 { t.Errorf("Workflow A should be empty, has %d", len(aList)) } // Workflow B should be untouched bList, _ = ms.List(ctx, workflowB) if len(bList) != 2 { t.Errorf("Workflow B should still have 2, has %d", len(bList)) } // Verify individual checkpoints _, err = ms.Load(ctx, "extract-step") if err == nil { t.Error("extract-step should be cleared") } _, err = ms.Load(ctx, "model-init") if err != nil { t.Error("model-init should still exist") } } func TestMemoryCheckpointStore_ThreadSafety(t *testing.T) { t.Parallel() ms := NewMemoryCheckpointStore() ctx := context.Background() // Simulate multiple API endpoints writing checkpoints concurrently numGoroutines := 10 checkpointsPerGoroutine := 5 done := make(chan bool, numGoroutines) errs := make(chan error, numGoroutines) // Start multiple "workers" for i := range numGoroutines { go func(workerID int) { defer func() { done <- true }() for j := range checkpointsPerGoroutine { cp := &store.Checkpoint{ ID: fmt.Sprintf("worker-%d-step-%d", workerID, j), NodeName: fmt.Sprintf("handler-%d", workerID), State: fmt.Sprintf("processing-step-%d", j), Metadata: map[string]any{ "worker_id": workerID, "step_number": j, "timestamp": time.Now().UnixNano(), }, Timestamp: time.Now(), Version: j + 1, } // Concurrent save if err := ms.Save(ctx, cp); err != nil { errs <- fmt.Errorf("worker %d save step %d failed: %v", workerID, j, err) return } // Concurrent load to verify it saved loaded, err := ms.Load(ctx, cp.ID) if err != nil { errs <- fmt.Errorf("worker %d load step %d failed: %v", workerID, j, err) return } if loaded.ID != cp.ID { errs <- fmt.Errorf("worker %d step %d ID mismatch", workerID, j) return } } }(i) } // Wait for all workers for range numGoroutines { select { case <-done: // Worker finished case err := <-errs: t.Errorf("Worker error: %v", err) case <-time.After(10 * time.Second): t.Fatal("Test timed out") } } // Verify all checkpoints are there for i := range numGoroutines { for j := range checkpointsPerGoroutine { id := fmt.Sprintf("worker-%d-step-%d", i, j) _, err := ms.Load(ctx, id) if err != nil { t.Errorf("Checkpoint %s missing", id) } } } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/file/file.go
store/file/file.go
package file import ( "context" "encoding/json" "fmt" "os" "path/filepath" "sort" "sync" "github.com/smallnest/langgraphgo/store" ) // FileCheckpointStore provides file-based checkpoint storage type FileCheckpointStore struct { path string mutex sync.RWMutex } // threadIndex represents the in-memory index for thread_id -> checkpoint IDs type threadIndex struct { Threads map[string][]string // thread_id -> []checkpoint IDs } // NewFileCheckpointStore creates a new file-based checkpoint store func NewFileCheckpointStore(path string) (store.CheckpointStore, error) { // Ensure directory exists if err := os.MkdirAll(path, 0755); err != nil { return nil, fmt.Errorf("failed to create checkpoint directory: %w", err) } // Ensure index directory exists indexDir := filepath.Join(path, "by_thread") if err := os.MkdirAll(indexDir, 0755); err != nil { return nil, fmt.Errorf("failed to create index directory: %w", err) } return &FileCheckpointStore{ path: path, }, nil } // Save implements CheckpointStore interface for file storage func (f *FileCheckpointStore) Save(_ context.Context, checkpoint *store.Checkpoint) error { f.mutex.Lock() defer f.mutex.Unlock() // Create filename from ID filename := filepath.Join(f.path, fmt.Sprintf("%s.json", checkpoint.ID)) data, err := json.Marshal(checkpoint) if err != nil { return fmt.Errorf("failed to marshal checkpoint: %w", err) } if err := os.WriteFile(filename, data, 0600); err != nil { return fmt.Errorf("failed to write checkpoint file: %w", err) } // Update thread_id index if threadID, ok := checkpoint.Metadata["thread_id"].(string); ok && threadID != "" { if err := f.addToThreadIndex(threadID, checkpoint.ID); err != nil { // Log error but don't fail the save _ = fmt.Errorf("failed to update thread index: %w", err) } } return nil } // Load implements CheckpointStore interface for file storage func (f *FileCheckpointStore) Load(_ context.Context, checkpointID string) (*store.Checkpoint, error) { f.mutex.RLock() defer f.mutex.RUnlock() filename := filepath.Join(f.path, fmt.Sprintf("%s.json", checkpointID)) data, err := os.ReadFile(filename) if err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("checkpoint not found: %s", checkpointID) } return nil, fmt.Errorf("failed to read checkpoint file: %w", err) } var checkpoint store.Checkpoint err = json.Unmarshal(data, &checkpoint) if err != nil { return nil, fmt.Errorf("failed to unmarshal checkpoint: %w", err) } return &checkpoint, nil } // List implements CheckpointStore interface for file storage func (f *FileCheckpointStore) List(_ context.Context, executionID string) ([]*store.Checkpoint, error) { f.mutex.RLock() defer f.mutex.RUnlock() files, err := os.ReadDir(f.path) if err != nil { return nil, fmt.Errorf("failed to read checkpoint directory: %w", err) } var checkpoints []*store.Checkpoint for _, file := range files { // Skip directories and non-JSON files if file.IsDir() || filepath.Ext(file.Name()) != ".json" { continue } data, err := os.ReadFile(filepath.Join(f.path, file.Name())) if err != nil { // Skip unreadable files continue } var checkpoint store.Checkpoint if err := json.Unmarshal(data, &checkpoint); err != nil { // Skip invalid files continue } // Filter by executionID, threadID, sessionID, or workflowID execID, _ := checkpoint.Metadata["execution_id"].(string) threadID, _ := checkpoint.Metadata["thread_id"].(string) sessionID, _ := checkpoint.Metadata["session_id"].(string) workflowID, _ := checkpoint.Metadata["workflow_id"].(string) if execID == executionID || threadID == executionID || sessionID == executionID || workflowID == executionID { checkpoints = append(checkpoints, &checkpoint) } } // Sort by version (ascending order) so latest is last sort.Slice(checkpoints, func(i, j int) bool { return checkpoints[i].Version < checkpoints[j].Version }) return checkpoints, nil } // ListByThread returns all checkpoints for a specific thread_id using index func (f *FileCheckpointStore) ListByThread(_ context.Context, threadID string) ([]*store.Checkpoint, error) { f.mutex.RLock() defer f.mutex.RUnlock() // Load thread index checkpointIDs, err := f.loadThreadIndex(threadID) if err != nil { // Fallback to scanning all files if index doesn't exist return f.listByThreadScan(threadID) } if len(checkpointIDs) == 0 { return []*store.Checkpoint{}, nil } var checkpoints []*store.Checkpoint for _, id := range checkpointIDs { filename := filepath.Join(f.path, fmt.Sprintf("%s.json", id)) data, err := os.ReadFile(filename) if err != nil { // Skip unreadable files continue } var checkpoint store.Checkpoint if err := json.Unmarshal(data, &checkpoint); err != nil { // Skip invalid files continue } checkpoints = append(checkpoints, &checkpoint) } // Sort by version (ascending order) sort.Slice(checkpoints, func(i, j int) bool { return checkpoints[i].Version < checkpoints[j].Version }) return checkpoints, nil } // GetLatestByThread returns the latest checkpoint for a thread_id func (f *FileCheckpointStore) GetLatestByThread(ctx context.Context, threadID string) (*store.Checkpoint, error) { checkpoints, err := f.ListByThread(ctx, threadID) if err != nil { return nil, err } if len(checkpoints) == 0 { return nil, fmt.Errorf("no checkpoints found for thread: %s", threadID) } // Return the last one (highest version due to sorting) return checkpoints[len(checkpoints)-1], nil } // Delete implements CheckpointStore interface for file storage func (f *FileCheckpointStore) Delete(_ context.Context, checkpointID string) error { f.mutex.Lock() defer f.mutex.Unlock() // Load checkpoint first to get thread_id filename := filepath.Join(f.path, fmt.Sprintf("%s.json", checkpointID)) data, err := os.ReadFile(filename) if err != nil { if os.IsNotExist(err) { // Already deleted return nil } return fmt.Errorf("failed to read checkpoint file: %w", err) } var checkpoint store.Checkpoint if err := json.Unmarshal(data, &checkpoint); err != nil { return fmt.Errorf("failed to unmarshal checkpoint: %w", err) } // Remove the checkpoint file if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { return fmt.Errorf("failed to delete checkpoint file: %w", err) } // Remove from thread index if threadID, ok := checkpoint.Metadata["thread_id"].(string); ok && threadID != "" { if err := f.removeFromThreadIndex(threadID, checkpointID); err != nil { // Log error but don't fail the delete _ = fmt.Errorf("failed to update thread index: %w", err) } } return nil } // Clear implements CheckpointStore interface for file storage func (f *FileCheckpointStore) Clear(ctx context.Context, executionID string) error { checkpoints, err := f.List(ctx, executionID) if err != nil { return err } var errs []error for _, cp := range checkpoints { if err := f.Delete(ctx, cp.ID); err != nil { errs = append(errs, err) } } if len(errs) > 0 { return fmt.Errorf("failed to clear some checkpoints: %v", errs) } return nil } // Helper functions for thread index management func (f *FileCheckpointStore) getThreadIndexPath(threadID string) string { return filepath.Join(f.path, "by_thread", fmt.Sprintf("%s.json", threadID)) } func (f *FileCheckpointStore) loadThreadIndex(threadID string) ([]string, error) { indexPath := f.getThreadIndexPath(threadID) data, err := os.ReadFile(indexPath) if err != nil { if os.IsNotExist(err) { return []string{}, nil } return nil, err } var index threadIndex if err := json.Unmarshal(data, &index); err != nil { return nil, err } if index.Threads == nil { return []string{}, nil } ids, ok := index.Threads[threadID] if !ok { return []string{}, nil } return ids, nil } func (f *FileCheckpointStore) addToThreadIndex(threadID, checkpointID string) error { indexPath := f.getThreadIndexPath(threadID) // Load existing index var index threadIndex if data, err := os.ReadFile(indexPath); err == nil { _ = json.Unmarshal(data, &index) } if index.Threads == nil { index.Threads = make(map[string][]string) } // Add checkpoint ID to index index.Threads[threadID] = append(index.Threads[threadID], checkpointID) // Write index back to disk data, err := json.Marshal(index) if err != nil { return err } return os.WriteFile(indexPath, data, 0600) } func (f *FileCheckpointStore) removeFromThreadIndex(threadID, checkpointID string) error { indexPath := f.getThreadIndexPath(threadID) // Load existing index var index threadIndex data, err := os.ReadFile(indexPath) if err != nil { if os.IsNotExist(err) { return nil } return err } if err := json.Unmarshal(data, &index); err != nil { return err } if index.Threads == nil { return nil } // Remove checkpoint ID from index ids, ok := index.Threads[threadID] if !ok { return nil } for i, id := range ids { if id == checkpointID { index.Threads[threadID] = append(ids[:i], ids[i+1:]...) break } } // Write index back to disk data, err = json.Marshal(index) if err != nil { return err } return os.WriteFile(indexPath, data, 0600) } // listByThreadScan is a fallback method that scans all files func (f *FileCheckpointStore) listByThreadScan(threadID string) ([]*store.Checkpoint, error) { files, err := os.ReadDir(f.path) if err != nil { return nil, fmt.Errorf("failed to read checkpoint directory: %w", err) } var checkpoints []*store.Checkpoint for _, file := range files { // Skip directories and non-JSON files if file.IsDir() || filepath.Ext(file.Name()) != ".json" { continue } // Skip index directory if file.Name() == "by_thread" { continue } data, err := os.ReadFile(filepath.Join(f.path, file.Name())) if err != nil { continue } var checkpoint store.Checkpoint if err := json.Unmarshal(data, &checkpoint); err != nil { continue } // Filter by thread_id if cpThreadID, ok := checkpoint.Metadata["thread_id"].(string); ok && cpThreadID == threadID { checkpoints = append(checkpoints, &checkpoint) } } // Sort by version (ascending order) sort.Slice(checkpoints, func(i, j int) bool { return checkpoints[i].Version < checkpoints[j].Version }) return checkpoints, nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/file/doc.go
store/file/doc.go
// Package file provides file-based checkpoint storage implementation. package file
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/file/file_test.go
store/file/file_test.go
package file import ( "context" "fmt" "os" "path/filepath" "testing" "time" "github.com/smallnest/langgraphgo/store" ) func TestFileCheckpointStore_New(t *testing.T) { t.Parallel() t.Run("creates directory if missing", func(t *testing.T) { t.Parallel() tempDir := t.TempDir() checkpointPath := filepath.Join(tempDir, "checkpoints") store, err := NewFileCheckpointStore(checkpointPath) if err != nil { t.Fatalf("Failed to create store: %v", err) } if store == nil { t.Fatal("Store should not be nil") } // Verify directory exists if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { t.Error("Directory should have been created") } }) t.Run("works with existing directory", func(t *testing.T) { t.Parallel() tempDir := t.TempDir() // Create directory first err := os.MkdirAll(tempDir, 0755) if err != nil { t.Fatalf("Failed to create test directory: %v", err) } store, err := NewFileCheckpointStore(tempDir) if err != nil { t.Fatalf("Failed to create store: %v", err) } if store == nil { t.Fatal("Store should not be nil") } }) } func TestFileCheckpointStore_SaveAndLoad(t *testing.T) { t.Parallel() ctx := context.Background() now := time.Now() t.Run("save creates file", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } cp := &store.Checkpoint{ ID: "user-session-123", NodeName: "login-handler", State: "authenticated", Timestamp: now, Version: 1, Metadata: map[string]any{ "user_id": "john.doe@example.com", "ip": "192.168.1.100", }, } err = fs.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save: %v", err) } // Check file exists filename := filepath.Join(fs.(*FileCheckpointStore).path, cp.ID+".json") if _, err := os.Stat(filename); os.IsNotExist(err) { t.Error("Checkpoint file should exist") } }) t.Run("load returns saved checkpoint", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } cp := &store.Checkpoint{ ID: "user-session-123", NodeName: "login-handler", State: "authenticated", Timestamp: now, Version: 1, Metadata: map[string]any{ "user_id": "john.doe@example.com", "ip": "192.168.1.100", }, } err = fs.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save: %v", err) } loaded, err := fs.Load(ctx, cp.ID) if err != nil { t.Fatalf("Failed to load: %v", err) } if loaded.ID != cp.ID { t.Errorf("Expected ID %s, got %s", cp.ID, loaded.ID) } if loaded.NodeName != cp.NodeName { t.Errorf("Expected NodeName %s, got %s", cp.NodeName, loaded.NodeName) } if loaded.State != cp.State { t.Errorf("Expected State %s, got %s", cp.State, loaded.State) } if loaded.Version != cp.Version { t.Errorf("Expected Version %d, got %d", cp.Version, loaded.Version) } // Check metadata if userID, ok := loaded.Metadata["user_id"].(string); !ok || userID != "john.doe@example.com" { t.Error("User ID metadata mismatch") } }) t.Run("save complex state", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } complexCP := &store.Checkpoint{ ID: "order-flow-456", NodeName: "payment-processor", State: map[string]any{ "order_id": 789, "items": []string{"widget", "gadget"}, "total_amount": 99.99, "currency": "USD", }, Timestamp: now, Version: 3, Metadata: map[string]any{ "session_id": "sess-xyz-789", }, } err = fs.Save(ctx, complexCP) if err != nil { t.Fatalf("Failed to save complex checkpoint: %v", err) } loaded, err := fs.Load(ctx, complexCP.ID) if err != nil { t.Fatalf("Failed to load complex checkpoint: %v", err) } // Verify complex state state, ok := loaded.State.(map[string]any) if !ok { t.Fatal("State should be a map") } if state["order_id"] != float64(789) { // JSON numbers are float64 t.Errorf("Expected order_id 789, got %v", state["order_id"]) } }) t.Run("load missing checkpoint", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } _, err = fs.Load(ctx, "does-not-exist") if err == nil { t.Error("Should return error for missing checkpoint") } }) } func TestFileCheckpointStore_List(t *testing.T) { t.Parallel() t.Run("filters by session_id", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } ctx := context.Background() sessionID := "web-session-2024" // Add checkpoints for this session checkpoints := []struct { id string node string version int }{ {"page-visit-1", "home-page", 1}, {"page-visit-2", "product-page", 2}, } for _, cp := range checkpoints { fullCP := &store.Checkpoint{ ID: cp.id, NodeName: cp.node, State: "processing", Timestamp: time.Now(), Version: cp.version, Metadata: map[string]any{ "session_id": sessionID, }, } err := fs.Save(ctx, fullCP) if err != nil { t.Fatalf("Failed to save checkpoint %s: %v", cp.id, err) } } results, err := fs.List(ctx, sessionID) if err != nil { t.Fatalf("Failed to list: %v", err) } if len(results) != 2 { t.Errorf("Expected 2 checkpoints for session, got %d", len(results)) } // Check they're sorted by version if results[0].Version > results[1].Version { t.Error("Results should be sorted by version ascending") } }) t.Run("filters by thread_id", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } ctx := context.Background() threadID := "user-john-thread-1" cp := &store.Checkpoint{ ID: "cart-action-1", NodeName: "add-to-cart", State: "processing", Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "thread_id": threadID, }, } err = fs.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save checkpoint: %v", err) } results, err := fs.List(ctx, threadID) if err != nil { t.Fatalf("Failed to list: %v", err) } if len(results) != 1 { t.Errorf("Expected 1 checkpoint for thread, got %d", len(results)) } if results[0].ID != "cart-action-1" { t.Errorf("Expected cart-action-1, got %s", results[0].ID) } }) t.Run("empty result for unknown session", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } ctx := context.Background() results, err := fs.List(ctx, "unknown-session") if err != nil { t.Fatalf("Failed to list: %v", err) } if len(results) != 0 { t.Errorf("Expected 0 checkpoints, got %d", len(results)) } }) } func TestFileCheckpointStore_Delete(t *testing.T) { t.Parallel() t.Run("deletes existing checkpoint", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } ctx := context.Background() storePath := fs.(*FileCheckpointStore).path cp := &store.Checkpoint{ ID: "temp-checkpoint", NodeName: "test-node", State: "test-state", Timestamp: time.Now(), Version: 1, } err = fs.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save checkpoint: %v", err) } // Verify file exists filename := filepath.Join(storePath, cp.ID+".json") if _, err := os.Stat(filename); os.IsNotExist(err) { t.Fatal("Checkpoint file should exist") } err = fs.Delete(ctx, cp.ID) if err != nil { t.Fatalf("Failed to delete: %v", err) } // File should be gone if _, err := os.Stat(filename); !os.IsNotExist(err) { t.Error("Checkpoint file should be deleted") } // Should not be loadable _, err = fs.Load(ctx, cp.ID) if err == nil { t.Error("Should not be able to load deleted checkpoint") } }) t.Run("deleting non-existing is no-op", func(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } ctx := context.Background() err = fs.Delete(ctx, "never-existed") if err != nil { t.Errorf("Delete should not error for non-existing checkpoint: %v", err) } }) } func TestFileCheckpointStore_Clear(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } ctx := context.Background() // Create checkpoints for two different sessions session1 := "user-session-alpha" session2 := "user-session-beta" checkpoints := []struct { id string session string version int }{ {"alpha-1", session1, 1}, {"alpha-2", session1, 2}, {"beta-1", session2, 1}, } for _, cp := range checkpoints { fullCP := &store.Checkpoint{ ID: cp.id, NodeName: "processor", State: "running", Timestamp: time.Now(), Version: cp.version, Metadata: map[string]any{ "session_id": cp.session, }, } err := fs.Save(ctx, fullCP) if err != nil { t.Fatalf("Failed to save checkpoint %s: %v", cp.id, err) } } // Verify we have checkpoints alphaList, _ := fs.List(ctx, session1) if len(alphaList) != 2 { t.Fatalf("Expected 2 alpha checkpoints, got %d", len(alphaList)) } err = fs.Clear(ctx, session1) if err != nil { t.Fatalf("Failed to clear session: %v", err) } // Alpha session should be empty alphaList, _ = fs.List(ctx, session1) if len(alphaList) != 0 { t.Errorf("Expected 0 alpha checkpoints after clear, got %d", len(alphaList)) } // Beta session should still have its checkpoint betaList, _ := fs.List(ctx, session2) if len(betaList) != 1 { t.Errorf("Expected 1 beta checkpoint, got %d", len(betaList)) } } func TestFileCheckpointStore_Permissions(t *testing.T) { t.Parallel() if os.Getenv("CI") != "" { t.Skip("Skipping permission test in CI") } fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } ctx := context.Background() storePath := fs.(*FileCheckpointStore).path cp := &store.Checkpoint{ ID: "secret-checkpoint", NodeName: "auth-handler", State: "authenticated", Timestamp: time.Now(), Version: 1, } err = fs.Save(ctx, cp) if err != nil { t.Fatalf("Failed to save checkpoint: %v", err) } // Check file permissions filename := filepath.Join(storePath, cp.ID+".json") fileInfo, err := os.Stat(filename) if err != nil { t.Fatalf("Failed to stat file: %v", err) } // On Unix, files should be readable/writable only by owner if os.Getenv("GOOS") != "windows" { perm := fileInfo.Mode().Perm() if perm != 0600 { // Allow for more permissive umask settings (like 0022 => 0644) if perm != 0644 { t.Logf("File permissions: %o (expected 0600 or 0644 due to umask)", perm) } } } } func TestFileCheckpointStore_Concurrent(t *testing.T) { t.Parallel() fs, err := NewFileCheckpointStore(t.TempDir()) if err != nil { t.Fatalf("Failed to create store: %v", err) } ctx := context.Background() numWorkers := 5 checkpointsPerWorker := 3 done := make(chan bool, numWorkers) errs := make(chan error, numWorkers) // Launch workers for i := range numWorkers { go func(workerID int) { defer func() { done <- true }() for j := range checkpointsPerWorker { cp := &store.Checkpoint{ ID: fmt.Sprintf("worker-%d-checkpoint-%d", workerID, j), NodeName: fmt.Sprintf("worker-%d-processor", workerID), State: fmt.Sprintf("state-%d", j), Metadata: map[string]any{ "worker_id": workerID, "step": j, }, Timestamp: time.Now(), Version: j + 1, } // Save if err := fs.Save(ctx, cp); err != nil { errs <- fmt.Errorf("worker %d save failed: %v", workerID, err) return } // Load loaded, err := fs.Load(ctx, cp.ID) if err != nil { errs <- fmt.Errorf("worker %d load failed: %v", workerID, err) return } if loaded.ID != cp.ID { errs <- fmt.Errorf("worker %d ID mismatch", workerID) return } } }(i) } // Wait for workers for range numWorkers { select { case <-done: // Worker completed case err := <-errs: t.Errorf("Worker error: %v", err) case <-time.After(5 * time.Second): t.Fatal("Test timed out") } } // Verify all checkpoints exist expectedTotal := numWorkers * checkpointsPerWorker files, err := os.ReadDir(fs.(*FileCheckpointStore).path) if err != nil { t.Fatalf("Failed to read directory: %v", err) } jsonCount := 0 for _, file := range files { if !file.IsDir() && filepath.Ext(file.Name()) == ".json" { jsonCount++ } } if jsonCount != expectedTotal { t.Errorf("Expected %d checkpoint files, got %d", expectedTotal, jsonCount) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/postgres/postgres.go
store/postgres/postgres.go
package postgres import ( "context" "encoding/json" "fmt" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgxpool" "github.com/smallnest/langgraphgo/graph" ) // DBPool defines the interface for database connection pool type DBPool interface { Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row Close() } // PostgresCheckpointStore implements graph.CheckpointStore using PostgreSQL type PostgresCheckpointStore struct { pool DBPool tableName string } // PostgresOptions configuration for Postgres connection type PostgresOptions struct { ConnString string TableName string // Default "checkpoints" } // NewPostgresCheckpointStore creates a new Postgres checkpoint store func NewPostgresCheckpointStore(ctx context.Context, opts PostgresOptions) (*PostgresCheckpointStore, error) { pool, err := pgxpool.New(ctx, opts.ConnString) if err != nil { return nil, fmt.Errorf("unable to create connection pool: %w", err) } tableName := opts.TableName if tableName == "" { tableName = "checkpoints" } return &PostgresCheckpointStore{ pool: pool, tableName: tableName, }, nil } // NewPostgresCheckpointStoreWithPool creates a new Postgres checkpoint store with an existing pool // Useful for testing with mocks func NewPostgresCheckpointStoreWithPool(pool DBPool, tableName string) *PostgresCheckpointStore { if tableName == "" { tableName = "checkpoints" } return &PostgresCheckpointStore{ pool: pool, tableName: tableName, } } // InitSchema creates the necessary table if it doesn't exist func (s *PostgresCheckpointStore) InitSchema(ctx context.Context) error { query := fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %s ( id TEXT PRIMARY KEY, execution_id TEXT NOT NULL, thread_id TEXT, node_name TEXT NOT NULL, state JSONB NOT NULL, metadata JSONB, timestamp TIMESTAMPTZ NOT NULL, version INTEGER NOT NULL ); CREATE INDEX IF NOT EXISTS idx_%s_execution_id ON %s (execution_id); CREATE INDEX IF NOT EXISTS idx_%s_thread_id ON %s (thread_id); CREATE INDEX IF NOT EXISTS idx_%s_execution_thread ON %s (execution_id, thread_id); `, s.tableName, s.tableName, s.tableName, s.tableName, s.tableName, s.tableName, s.tableName) _, err := s.pool.Exec(ctx, query) if err != nil { return fmt.Errorf("failed to create schema: %w", err) } return nil } // MigrateSchema adds the thread_id column if it doesn't exist (for existing installations) func (s *PostgresCheckpointStore) MigrateSchema(ctx context.Context) error { // Add thread_id column if it doesn't exist migrationQuery := fmt.Sprintf(` DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns WHERE table_name = '%s' AND column_name = 'thread_id' ) THEN ALTER TABLE %s ADD COLUMN thread_id TEXT; END IF; IF NOT EXISTS ( SELECT 1 FROM pg_indexes WHERE indexname = 'idx_%s_thread_id' ) THEN CREATE INDEX idx_%s_thread_id ON %s (thread_id); END IF; IF NOT EXISTS ( SELECT 1 FROM pg_indexes WHERE indexname = 'idx_%s_execution_thread' ) THEN CREATE INDEX idx_%s_execution_thread ON %s (execution_id, thread_id); END IF; END $$; `, s.tableName, s.tableName, s.tableName, s.tableName, s.tableName, s.tableName, s.tableName, s.tableName) _, err := s.pool.Exec(ctx, migrationQuery) if err != nil { return fmt.Errorf("failed to migrate schema: %w", err) } return nil } // Close closes the connection pool func (s *PostgresCheckpointStore) Close() { s.pool.Close() } // Save stores a checkpoint func (s *PostgresCheckpointStore) Save(ctx context.Context, checkpoint *graph.Checkpoint) error { stateJSON, err := json.Marshal(checkpoint.State) if err != nil { return fmt.Errorf("failed to marshal state: %w", err) } metadataJSON, err := json.Marshal(checkpoint.Metadata) if err != nil { return fmt.Errorf("failed to marshal metadata: %w", err) } executionID := "" if id, ok := checkpoint.Metadata["execution_id"].(string); ok { executionID = id } threadID := "" if id, ok := checkpoint.Metadata["thread_id"].(string); ok { threadID = id } query := fmt.Sprintf(` INSERT INTO %s (id, execution_id, thread_id, node_name, state, metadata, timestamp, version) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (id) DO UPDATE SET execution_id = EXCLUDED.execution_id, thread_id = EXCLUDED.thread_id, node_name = EXCLUDED.node_name, state = EXCLUDED.state, metadata = EXCLUDED.metadata, timestamp = EXCLUDED.timestamp, version = EXCLUDED.version `, s.tableName) _, err = s.pool.Exec(ctx, query, checkpoint.ID, executionID, threadID, checkpoint.NodeName, stateJSON, metadataJSON, checkpoint.Timestamp, checkpoint.Version, ) if err != nil { return fmt.Errorf("failed to save checkpoint: %w", err) } return nil } // Load retrieves a checkpoint by ID func (s *PostgresCheckpointStore) Load(ctx context.Context, checkpointID string) (*graph.Checkpoint, error) { query := fmt.Sprintf(` SELECT id, node_name, state, metadata, timestamp, version FROM %s WHERE id = $1 `, s.tableName) var cp graph.Checkpoint var stateJSON []byte var metadataJSON []byte err := s.pool.QueryRow(ctx, query, checkpointID).Scan( &cp.ID, &cp.NodeName, &stateJSON, &metadataJSON, &cp.Timestamp, &cp.Version, ) if err != nil { if err == pgx.ErrNoRows { return nil, fmt.Errorf("checkpoint not found: %s", checkpointID) } return nil, fmt.Errorf("failed to load checkpoint: %w", err) } if err := json.Unmarshal(stateJSON, &cp.State); err != nil { return nil, fmt.Errorf("failed to unmarshal state: %w", err) } if len(metadataJSON) > 0 { if err := json.Unmarshal(metadataJSON, &cp.Metadata); err != nil { return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) } } return &cp, nil } // List returns all checkpoints for a given execution func (s *PostgresCheckpointStore) List(ctx context.Context, executionID string) ([]*graph.Checkpoint, error) { query := fmt.Sprintf(` SELECT id, node_name, state, metadata, timestamp, version FROM %s WHERE execution_id = $1 ORDER BY timestamp ASC `, s.tableName) rows, err := s.pool.Query(ctx, query, executionID) if err != nil { return nil, fmt.Errorf("failed to list checkpoints: %w", err) } defer rows.Close() var checkpoints []*graph.Checkpoint for rows.Next() { var cp graph.Checkpoint var stateJSON []byte var metadataJSON []byte err := rows.Scan( &cp.ID, &cp.NodeName, &stateJSON, &metadataJSON, &cp.Timestamp, &cp.Version, ) if err != nil { return nil, fmt.Errorf("failed to scan checkpoint row: %w", err) } if err := json.Unmarshal(stateJSON, &cp.State); err != nil { return nil, fmt.Errorf("failed to unmarshal state: %w", err) } if len(metadataJSON) > 0 { if err := json.Unmarshal(metadataJSON, &cp.Metadata); err != nil { return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) } } checkpoints = append(checkpoints, &cp) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("error iterating checkpoint rows: %w", err) } return checkpoints, nil } // ListByThread returns all checkpoints for a specific thread_id func (s *PostgresCheckpointStore) ListByThread(ctx context.Context, threadID string) ([]*graph.Checkpoint, error) { query := fmt.Sprintf(` SELECT id, node_name, state, metadata, timestamp, version FROM %s WHERE thread_id = $1 ORDER BY timestamp ASC `, s.tableName) rows, err := s.pool.Query(ctx, query, threadID) if err != nil { return nil, fmt.Errorf("failed to list checkpoints by thread: %w", err) } defer rows.Close() var checkpoints []*graph.Checkpoint for rows.Next() { var cp graph.Checkpoint var stateJSON []byte var metadataJSON []byte err := rows.Scan( &cp.ID, &cp.NodeName, &stateJSON, &metadataJSON, &cp.Timestamp, &cp.Version, ) if err != nil { return nil, fmt.Errorf("failed to scan checkpoint row: %w", err) } if err := json.Unmarshal(stateJSON, &cp.State); err != nil { return nil, fmt.Errorf("failed to unmarshal state: %w", err) } if len(metadataJSON) > 0 { if err := json.Unmarshal(metadataJSON, &cp.Metadata); err != nil { return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) } } checkpoints = append(checkpoints, &cp) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("error iterating checkpoint rows: %w", err) } return checkpoints, nil } // GetLatestByThread returns the latest checkpoint for a thread_id func (s *PostgresCheckpointStore) GetLatestByThread(ctx context.Context, threadID string) (*graph.Checkpoint, error) { query := fmt.Sprintf(` SELECT id, node_name, state, metadata, timestamp, version FROM %s WHERE thread_id = $1 ORDER BY version DESC LIMIT 1 `, s.tableName) var cp graph.Checkpoint var stateJSON []byte var metadataJSON []byte err := s.pool.QueryRow(ctx, query, threadID).Scan( &cp.ID, &cp.NodeName, &stateJSON, &metadataJSON, &cp.Timestamp, &cp.Version, ) if err != nil { if err == pgx.ErrNoRows { return nil, fmt.Errorf("no checkpoints found for thread: %s", threadID) } return nil, fmt.Errorf("failed to get latest checkpoint by thread: %w", err) } if err := json.Unmarshal(stateJSON, &cp.State); err != nil { return nil, fmt.Errorf("failed to unmarshal state: %w", err) } if len(metadataJSON) > 0 { if err := json.Unmarshal(metadataJSON, &cp.Metadata); err != nil { return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) } } return &cp, nil } // Delete removes a checkpoint func (s *PostgresCheckpointStore) Delete(ctx context.Context, checkpointID string) error { query := fmt.Sprintf("DELETE FROM %s WHERE id = $1", s.tableName) _, err := s.pool.Exec(ctx, query, checkpointID) if err != nil { return fmt.Errorf("failed to delete checkpoint: %w", err) } return nil } // Clear removes all checkpoints for an execution func (s *PostgresCheckpointStore) Clear(ctx context.Context, executionID string) error { query := fmt.Sprintf("DELETE FROM %s WHERE execution_id = $1", s.tableName) _, err := s.pool.Exec(ctx, query, executionID) if err != nil { return fmt.Errorf("failed to clear checkpoints: %w", err) } return nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/postgres/doc.go
store/postgres/doc.go
// Package postgres provides PostgreSQL-backed storage for LangGraph Go checkpoints and state. // // This package implements durable checkpoint storage using PostgreSQL, allowing graph // executions to be persisted and resumed across different runs and processes. It's // designed for production use with robust error handling, connection pooling, and // migration support. // // # Key Features // // - Persistent storage of graph checkpoints // - Thread-safe operations // - Connection pooling for performance // - Automatic schema initialization and migrations // - Support for custom table names // - Efficient serialization of complex state objects // - Transaction support for data consistency // - TTL (time-to-live) support for automatic cleanup // // # Basic Usage // // import ( // "context" // "github.com/smallnest/langgraphgo/store/postgres" // ) // // // Create a PostgreSQL checkpoint store // store, err := postgres.NewPostgresCheckpointStore(ctx, postgres.PostgresOptions{ // ConnString: "postgres://user:password@localhost/langgraph?sslmode=disable", // TableName: "workflow_checkpoints", // Optional, defaults to "checkpoints" // }) // if err != nil { // return err // } // defer store.Close() // // // Initialize the database schema // if err := store.InitSchema(ctx); err != nil { // return err // } // // // Use with a graph // g := graph.NewStateGraph() // // ... configure graph ... // // // Enable checkpointing // compileConfig := graph.CompileConfig{ // CheckpointConfig: graph.CheckpointConfig{ // Store: store, // }, // } // // runnable, err := g.CompileWithOptions(compileConfig) // // # Configuration // // ## Connection String // // The connection string follows PostgreSQL format: // // postgres://[user[:password]@][host][:port][/dbname][?param1=value1&...] // // Examples: // // // Local PostgreSQL // "postgres://postgres:password@localhost:5432/langgraph?sslmode=disable" // // // With SSL // "postgres://user:pass@host:5432/dbname?sslmode=require" // // // Unix socket // "postgres:///dbname?host=/var/run/postgresql" // // ## Connection Pool // // For more control over connection pooling: // // pool, err := postgres.NewConnectionPool(ctx, postgres.PoolConfig{ // ConnString: connString, // MaxConns: 20, // Maximum connections // MinConns: 5, // Minimum connections // MaxConnLifetime: time.Hour, // Connection lifetime // MaxConnIdleTime: 30 * time.Minute, // HealthCheckPeriod: time.Minute, // }) // // store, err := postgres.NewCheckpointStoreFromPool(pool, "checkpoints") // // # Advanced Features // // ## Custom Table Configuration // // // Configure with custom table options // store, err := postgres.NewPostgresCheckpointStore(ctx, postgres.PostgresOptions{ // ConnString: connString, // TableName: "custom_checkpoints", // }) // // // The store will create the table with the following schema: // // CREATE TABLE IF NOT EXISTS custom_checkpoints ( // // id UUID PRIMARY KEY DEFAULT gen_random_uuid(), // // thread_id VARCHAR(255) NOT NULL, // // checkpoint_id VARCHAR(255) NOT NULL, // // checkpoint_data BYTEA, // // metadata JSONB, // // created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), // // expires_at TIMESTAMP WITH TIME ZONE, // // ); // // ## TTL Support // // // Create checkpoints that expire after 24 hours // checkpoint := &graph.Checkpoint{ // ID: "checkpoint-123", // ThreadID: "thread-456", // State: state, // Metadata: map[string]any{ // "expires_at": time.Now().Add(24 * time.Hour), // }, // } // // // The store will automatically clean up expired checkpoints // if err := store.Put(ctx, checkpoint); err != nil { // return err // } // // // Manually trigger cleanup // deleted, err := store.CleanupExpired(ctx) // fmt.Printf("Deleted %d expired checkpoints\n", deleted) // // ## Batch Operations // // // Save multiple checkpoints in a transaction // checkpoints := []*graph.Checkpoint{checkpoint1, checkpoint2, checkpoint3} // if err := store.PutBatch(ctx, checkpoints); err != nil { // return err // } // // // List checkpoints with pagination // listOptions := postgres.ListOptions{ // ThreadID: "thread-456", // Limit: 10, // Offset: 20, // OrderBy: "created_at DESC", // } // // checkpoints, total, err := store.ListWithOptions(ctx, listOptions) // // # Migration and Schema Management // // ## Automatic Migration // // // The store can automatically handle schema migrations // store, err := postgres.NewPostgresCheckpointStore(ctx, postgres.PostgresOptions{ // ConnString: connString, // AutoMigrate: true, // Automatically create/update schema // }) // // ## Manual Schema Control // // // Get current schema version // version, err := store.GetSchemaVersion(ctx) // if err != nil { // return err // } // // // Run migrations manually // if err := store.MigrateTo(ctx, targetVersion); err != nil { // return err // } // // # Performance Optimization // // ## Indexing Strategy // // // Create additional indexes for better performance // _, err := pool.Exec(ctx, ` // CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_checkpoints_thread_created // ON checkpoints (thread_id, created_at DESC); // // CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_checkpoints_expires // ON checkpoints (expires_at) WHERE expires_at IS NOT NULL; // `) // // ## Connection Tuning // // // Optimize for high-throughput scenarios // config := &pgxpool.Config{ // ConnString: connString, // MaxConns: 50, // More connections // MinConns: 10, // Maintain minimum // MaxConnLifetime: 30 * time.Minute, // MaxConnIdleTime: 5 * time.Minute, // HealthCheckPeriod: 30 * time.Second, // // Additional performance tuning // BeforeAcquire: func(ctx context.Context, conn *pgx.Conn) bool { // // Validate connection before use // return conn.Ping(ctx) == nil // }, // } // // # Monitoring and Metrics // // // Get connection pool statistics // stats := store.Pool().Stat() // fmt.Printf("Total connections: %d\n", stats.TotalConns()) // fmt.Printf("Idle connections: %d\n", stats.IdleConns()) // fmt.Printf("Acquired connections: %d\n", stats.AcquiredConns()) // // // Monitor checkpoint operations // metrics := store.GetMetrics() // fmt.Printf("Puts: %d, Gets: %d, Lists: %d\n", // metrics.Puts, metrics.Gets, metrics.Lists) // // # Error Handling // // // Handle specific PostgreSQL errors // if err := store.Put(ctx, checkpoint); err != nil { // var pgErr *pgconn.PgError // if errors.As(err, &pgErr) { // switch pgErr.Code { // case "23505": // Unique violation // // Handle duplicate checkpoint // case "23503": // Foreign key violation // // Handle missing reference // case "23514": // Check constraint violation // // Handle constraint failure // default: // // Handle other PostgreSQL errors // } // } // } // // # Integration Examples // // ## With Supervisor Agent // // store, _ := postgres.NewPostgresCheckpointStore(ctx, postgres.PostgresOptions{ // ConnString: "postgres://...", // }) // // members := map[string]*graph.StateRunnableUntyped // "worker1": worker1, // "worker2": worker2, // } // // supervisor, _ := prebuilt.CreateSupervisor( // llm, // members, // "router", // graph.WithCheckpointing(graph.CheckpointConfig{ // Store: store, // }), // ) // // // Execute with automatic checkpointing // result, _ := supervisor.Invoke(ctx, input, // graph.WithExecutionID("supervisor-run-123")) // // // Resume from checkpoint // resumed, _ := supervisor.Resume(ctx, "supervisor-run-123", "checkpoint-456") // // ## With Streaming Execution // // // Store streaming checkpoints // streaming := graph.NewStreamingStateGraph(g, graph.StreamConfig{ // BufferSize: 100, // }) // // streaming.WithCheckpointing(graph.CheckpointConfig{ // Store: store, // CheckpointInterval: 5 * time.Second, // Checkpoint every 5 seconds // }) // // # Best Practices // // 1. Use connection pooling for production // 2. Set appropriate timeouts on operations // 3. Handle transient errors with retries // 4. Use transactions for multi-step operations // 5. Monitor connection pool health // 6. Implement proper cleanup for expired checkpoints // 7. Use SSL/TLS for connections in production // 8. Create indexes based on query patterns // 9. Set up proper backup strategies // 10. Test schema migrations in staging // // # Security Considerations // // - Use environment variables for credentials // - Enable SSL/TLS for all connections // - Implement proper user permissions // - Use connection limiting // - Audit checkpoint access // - Encrypt sensitive data before storage // - Use prepared statements to prevent SQL injection // // # Docker Integration // // Use with Docker Compose: // // ```yaml // version: '3.8' // services: // // langgraph: // image: your-app // environment: // - DB_URL=postgres://postgres:password@postgres:5432/langgraph // depends_on: // - postgres // // postgres: // image: postgres:15 // environment: // - POSTGRES_DB=langgraph // - POSTGRES_USER=postgres // - POSTGRES_PASSWORD=password // volumes: // - postgres_data:/var/lib/postgresql/data // ports: // - "5432:5432" // // volumes: // // postgres_data: // // ``` package postgres
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/postgres/postgres_test.go
store/postgres/postgres_test.go
package postgres import ( "context" "encoding/json" "errors" "regexp" "testing" "time" "github.com/jackc/pgx/v5" "github.com/pashagolub/pgxmock/v3" "github.com/smallnest/langgraphgo/graph" "github.com/stretchr/testify/assert" ) func TestPostgresCheckpointStore_Save(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cp := &graph.Checkpoint{ ID: "cp-1", NodeName: "node-a", State: map[string]any{"foo": "bar"}, Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "execution_id": "exec-1", }, } stateJSON, _ := json.Marshal(cp.State) metadataJSON, _ := json.Marshal(cp.Metadata) // Expect INSERT mock.ExpectExec(regexp.QuoteMeta("INSERT INTO checkpoints")). WithArgs( cp.ID, "exec-1", "", // thread_id cp.NodeName, stateJSON, metadataJSON, cp.Timestamp, cp.Version, ). WillReturnResult(pgxmock.NewResult("INSERT", 1)) err = store.Save(context.Background(), cp) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Load(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cpID := "cp-1" timestamp := time.Now() state := map[string]any{"foo": "bar"} metadata := map[string]any{"execution_id": "exec-1"} stateJSON, _ := json.Marshal(state) metadataJSON, _ := json.Marshal(metadata) rows := pgxmock.NewRows([]string{"id", "node_name", "state", "metadata", "timestamp", "version"}). AddRow(cpID, "node-a", stateJSON, metadataJSON, timestamp, 1) mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE id = $1")). WithArgs(cpID). WillReturnRows(rows) loaded, err := store.Load(context.Background(), cpID) assert.NoError(t, err) assert.Equal(t, cpID, loaded.ID) assert.Equal(t, "node-a", loaded.NodeName) assert.Equal(t, 1, loaded.Version) // Check state loadedState, ok := loaded.State.(map[string]any) assert.True(t, ok) assert.Equal(t, "bar", loadedState["foo"]) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Save_WithoutExecutionID(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cp := &graph.Checkpoint{ ID: "cp-1", NodeName: "node-a", State: map[string]any{"foo": "bar"}, Timestamp: time.Now(), Version: 1, Metadata: map[string]any{}, // No execution_id } stateJSON, _ := json.Marshal(cp.State) metadataJSON, _ := json.Marshal(cp.Metadata) // Expect INSERT with empty execution_id mock.ExpectExec(regexp.QuoteMeta("INSERT INTO checkpoints")). WithArgs( cp.ID, "", // empty execution_id "", // empty thread_id cp.NodeName, stateJSON, metadataJSON, cp.Timestamp, cp.Version, ). WillReturnResult(pgxmock.NewResult("INSERT", 1)) err = store.Save(context.Background(), cp) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Save_MarshalError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") // Create invalid state that cannot be marshaled cp := &graph.Checkpoint{ ID: "cp-1", NodeName: "node-a", State: make(chan int), // channels cannot be marshaled to JSON Timestamp: time.Now(), Version: 1, } err = store.Save(context.Background(), cp) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to marshal state") } func TestPostgresCheckpoint_Load_NotFound(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cpID := "non-existent" mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE id = $1")). WithArgs(cpID). WillReturnError(pgx.ErrNoRows) loaded, err := store.Load(context.Background(), cpID) assert.Error(t, err) assert.Nil(t, loaded) assert.Contains(t, err.Error(), "checkpoint not found") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpoint_Load_DatabaseError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cpID := "cp-1" dbError := errors.New("database connection failed") mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE id = $1")). WithArgs(cpID). WillReturnError(dbError) loaded, err := store.Load(context.Background(), cpID) assert.Error(t, err) assert.Nil(t, loaded) assert.Contains(t, err.Error(), "failed to load checkpoint") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpoint_Load_InvalidStateJSON(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cpID := "cp-1" timestamp := time.Now() // Create row with invalid JSON rows := pgxmock.NewRows([]string{"id", "node_name", "state", "metadata", "timestamp", "version"}). AddRow(cpID, "node-a", []byte("{invalid json"), []byte("{}"), timestamp, 1) mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE id = $1")). WithArgs(cpID). WillReturnRows(rows) loaded, err := store.Load(context.Background(), cpID) assert.Error(t, err) assert.Nil(t, loaded) assert.Contains(t, err.Error(), "failed to unmarshal state") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpoint_Load_InvalidMetadataJSON(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cpID := "cp-1" timestamp := time.Now() state := map[string]any{"foo": "bar"} stateJSON, _ := json.Marshal(state) // Create row with invalid metadata JSON rows := pgxmock.NewRows([]string{"id", "node_name", "state", "metadata", "timestamp", "version"}). AddRow(cpID, "node-a", stateJSON, []byte("{invalid metadata json"), timestamp, 1) mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE id = $1")). WithArgs(cpID). WillReturnRows(rows) loaded, err := store.Load(context.Background(), cpID) assert.Error(t, err) assert.Nil(t, loaded) assert.Contains(t, err.Error(), "failed to unmarshal metadata") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpoint_Load_NilMetadata(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cpID := "cp-1" timestamp := time.Now() state := map[string]any{"foo": "bar"} stateJSON, _ := json.Marshal(state) // Create row with nil metadata rows := pgxmock.NewRows([]string{"id", "node_name", "state", "metadata", "timestamp", "version"}). AddRow(cpID, "node-a", stateJSON, nil, timestamp, 1) mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE id = $1")). WithArgs(cpID). WillReturnRows(rows) loaded, err := store.Load(context.Background(), cpID) assert.NoError(t, err) assert.Equal(t, cpID, loaded.ID) assert.Equal(t, "node-a", loaded.NodeName) assert.Equal(t, 1, loaded.Version) assert.NotNil(t, loaded.State) // Metadata should be nil when not present in DB (not initialized) assert.Nil(t, loaded.Metadata) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_List(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") executionID := "exec-1" timestamp := time.Now() // Create checkpoint data checkpoints := []struct { id string nodeName string state map[string]any metadata map[string]any version int }{ { id: "cp-1", nodeName: "node-a", state: map[string]any{"step": 1}, metadata: map[string]any{"execution_id": executionID}, version: 1, }, { id: "cp-2", nodeName: "node-b", state: map[string]any{"step": 2}, metadata: map[string]any{"execution_id": executionID}, version: 2, }, } rows := pgxmock.NewRows([]string{"id", "node_name", "state", "metadata", "timestamp", "version"}) for _, cp := range checkpoints { stateJSON, _ := json.Marshal(cp.state) metadataJSON, _ := json.Marshal(cp.metadata) rows.AddRow(cp.id, cp.nodeName, stateJSON, metadataJSON, timestamp, cp.version) } mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE execution_id = $1 ORDER BY timestamp ASC")). WithArgs(executionID). WillReturnRows(rows) loaded, err := store.List(context.Background(), executionID) assert.NoError(t, err) assert.Equal(t, 2, len(loaded)) // Check first checkpoint assert.Equal(t, "cp-1", loaded[0].ID) assert.Equal(t, "node-a", loaded[0].NodeName) assert.Equal(t, 1, loaded[0].Version) // Check second checkpoint assert.Equal(t, "cp-2", loaded[1].ID) assert.Equal(t, "node-b", loaded[1].NodeName) assert.Equal(t, 2, loaded[1].Version) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_List_EmptyResult(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") executionID := "exec-empty" rows := pgxmock.NewRows([]string{"id", "node_name", "state", "metadata", "timestamp", "version"}) mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE execution_id = $1 ORDER BY timestamp ASC")). WithArgs(executionID). WillReturnRows(rows) loaded, err := store.List(context.Background(), executionID) assert.NoError(t, err) assert.Equal(t, 0, len(loaded)) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_List_DatabaseError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") executionID := "exec-1" dbError := errors.New("database connection failed") mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE execution_id = $1 ORDER BY timestamp ASC")). WithArgs(executionID). WillReturnError(dbError) loaded, err := store.List(context.Background(), executionID) assert.Error(t, err) assert.Nil(t, loaded) assert.Contains(t, err.Error(), "failed to list checkpoints") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_List_ScanError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") executionID := "exec-1" rows := pgxmock.NewRows([]string{"id", "node_name", "state", "metadata", "timestamp", "version"}). AddRow("cp-1", "node-a", []byte("{invalid"), []byte("{}"), time.Now(), 1). AddRow("cp-2", "node-b", []byte("{}"), []byte("{}"), time.Now(), 2) mock.ExpectQuery(regexp.QuoteMeta("SELECT id, node_name, state, metadata, timestamp, version FROM checkpoints WHERE execution_id = $1 ORDER BY timestamp ASC")). WithArgs(executionID). WillReturnRows(rows) loaded, err := store.List(context.Background(), executionID) assert.Error(t, err) assert.Nil(t, loaded) // The error occurs during JSON unmarshaling, not scanning assert.Contains(t, err.Error(), "failed to unmarshal state") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Delete(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") checkpointID := "cp-1" mock.ExpectExec(regexp.QuoteMeta("DELETE FROM checkpoints WHERE id = $1")). WithArgs(checkpointID). WillReturnResult(pgxmock.NewResult("DELETE", 1)) err = store.Delete(context.Background(), checkpointID) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Delete_DatabaseError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") checkpointID := "cp-1" dbError := errors.New("database connection failed") mock.ExpectExec(regexp.QuoteMeta("DELETE FROM checkpoints WHERE id = $1")). WithArgs(checkpointID). WillReturnError(dbError) err = store.Delete(context.Background(), checkpointID) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to delete checkpoint") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Clear(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") executionID := "exec-1" mock.ExpectExec(regexp.QuoteMeta("DELETE FROM checkpoints WHERE execution_id = $1")). WithArgs(executionID). WillReturnResult(pgxmock.NewResult("DELETE", 5)) // 5 rows deleted err = store.Clear(context.Background(), executionID) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Clear_DatabaseError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") executionID := "exec-1" dbError := errors.New("database connection failed") mock.ExpectExec(regexp.QuoteMeta("DELETE FROM checkpoints WHERE execution_id = $1")). WithArgs(executionID). WillReturnError(dbError) err = store.Clear(context.Background(), executionID) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to clear checkpoints") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_InitSchema(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") mock.ExpectExec(regexp.QuoteMeta(` CREATE TABLE IF NOT EXISTS checkpoints ( id TEXT PRIMARY KEY, execution_id TEXT NOT NULL, thread_id TEXT, node_name TEXT NOT NULL, state JSONB NOT NULL, metadata JSONB, timestamp TIMESTAMPTZ NOT NULL, version INTEGER NOT NULL ); CREATE INDEX IF NOT EXISTS idx_checkpoints_execution_id ON checkpoints (execution_id); CREATE INDEX IF NOT EXISTS idx_checkpoints_thread_id ON checkpoints (thread_id); CREATE INDEX IF NOT EXISTS idx_checkpoints_execution_thread ON checkpoints (execution_id, thread_id); `)). WillReturnResult(pgxmock.NewResult("CREATE", 0)) err = store.InitSchema(context.Background()) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_InitSchema_CustomTable(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() tableName := "custom_checkpoints" store := NewPostgresCheckpointStoreWithPool(mock, tableName) mock.ExpectExec(regexp.QuoteMeta(` CREATE TABLE IF NOT EXISTS custom_checkpoints ( id TEXT PRIMARY KEY, execution_id TEXT NOT NULL, thread_id TEXT, node_name TEXT NOT NULL, state JSONB NOT NULL, metadata JSONB, timestamp TIMESTAMPTZ NOT NULL, version INTEGER NOT NULL ); CREATE INDEX IF NOT EXISTS idx_custom_checkpoints_execution_id ON custom_checkpoints (execution_id); CREATE INDEX IF NOT EXISTS idx_custom_checkpoints_thread_id ON custom_checkpoints (thread_id); CREATE INDEX IF NOT EXISTS idx_custom_checkpoints_execution_thread ON custom_checkpoints (execution_id, thread_id); `)). WillReturnResult(pgxmock.NewResult("CREATE", 0)) err = store.InitSchema(context.Background()) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_InitSchema_DatabaseError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") dbError := errors.New("database connection failed") mock.ExpectExec(regexp.QuoteMeta("CREATE TABLE IF NOT EXISTS checkpoints")). WillReturnError(dbError) err = store.InitSchema(context.Background()) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to create schema") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Close(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") // This should not panic assert.NotPanics(t, func() { store.Close() }) } func TestNewPostgresCheckpointStoreWithPool_DefaultTableName(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() // Pass empty table name, should default to "checkpoints" store := NewPostgresCheckpointStoreWithPool(mock, "") assert.NotNil(t, store) assert.Equal(t, "checkpoints", store.tableName) assert.Equal(t, mock, store.pool) } func TestPostgresCheckpointStore_Save_Conflict(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cp := &graph.Checkpoint{ ID: "cp-1", NodeName: "node-a", State: map[string]any{"foo": "bar"}, Timestamp: time.Now(), Version: 2, // Different version Metadata: map[string]any{ "execution_id": "exec-1", }, } stateJSON, _ := json.Marshal(cp.State) metadataJSON, _ := json.Marshal(cp.Metadata) // Expect UPDATE due to conflict mock.ExpectExec(regexp.QuoteMeta("INSERT INTO checkpoints")). WithArgs( cp.ID, "exec-1", "", // thread_id cp.NodeName, stateJSON, metadataJSON, cp.Timestamp, cp.Version, ). WillReturnResult(pgxmock.NewResult("UPDATE", 1)) err = store.Save(context.Background(), cp) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Save_DatabaseError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cp := &graph.Checkpoint{ ID: "cp-1", NodeName: "node-a", State: map[string]any{"foo": "bar"}, Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "execution_id": "exec-1", }, } stateJSON, _ := json.Marshal(cp.State) metadataJSON, _ := json.Marshal(cp.Metadata) dbError := errors.New("database connection failed") mock.ExpectExec(regexp.QuoteMeta("INSERT INTO checkpoints")). WithArgs( cp.ID, "exec-1", "", // thread_id cp.NodeName, stateJSON, metadataJSON, cp.Timestamp, cp.Version, ). WillReturnError(dbError) err = store.Save(context.Background(), cp) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to save checkpoint") assert.NoError(t, mock.ExpectationsWereMet()) } func TestPostgresCheckpointStore_Save_MarshalMetadataError(t *testing.T) { mock, err := pgxmock.NewPool() assert.NoError(t, err) defer mock.Close() store := NewPostgresCheckpointStoreWithPool(mock, "checkpoints") cp := &graph.Checkpoint{ ID: "cp-1", NodeName: "node-a", State: map[string]any{"foo": "bar"}, Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "invalid": make(chan int), // channels cannot be marshaled }, } err = store.Save(context.Background(), cp) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to marshal metadata") } func TestNewPostgresCheckpointStore_InvalidConnection(t *testing.T) { ctx := context.Background() opts := PostgresOptions{ ConnString: "invalid://connection-string", TableName: "test_checkpoints", } // This should return an error due to invalid connection string _, err := NewPostgresCheckpointStore(ctx, opts) assert.Error(t, err) assert.Contains(t, err.Error(), "unable to create connection pool") }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/redis/redis_test.go
store/redis/redis_test.go
package redis import ( "context" "testing" "time" "github.com/alicebob/miniredis/v2" "github.com/smallnest/langgraphgo/graph" "github.com/stretchr/testify/assert" ) func TestRedisCheckpointStore(t *testing.T) { // Start miniredis mr, err := miniredis.Run() assert.NoError(t, err) defer mr.Close() // Create store store := NewRedisCheckpointStore(RedisOptions{ Addr: mr.Addr(), }) ctx := context.Background() execID := "exec-123" // Create checkpoint cp := &graph.Checkpoint{ ID: "cp-1", NodeName: "node-a", State: map[string]any{"foo": "bar"}, Timestamp: time.Now(), Version: 1, Metadata: map[string]any{ "execution_id": execID, }, } // Test Save err = store.Save(ctx, cp) assert.NoError(t, err) // Test Load loaded, err := store.Load(ctx, "cp-1") assert.NoError(t, err) assert.Equal(t, cp.ID, loaded.ID) assert.Equal(t, cp.NodeName, loaded.NodeName) // JSON unmarshal converts numbers to float64, so exact map comparison might fail on types if not careful // But here we used string, so it should be fine. state, ok := loaded.State.(map[string]any) assert.True(t, ok) assert.Equal(t, "bar", state["foo"]) // Test List list, err := store.List(ctx, execID) assert.NoError(t, err) assert.Len(t, list, 1) assert.Equal(t, cp.ID, list[0].ID) // Test Delete err = store.Delete(ctx, "cp-1") assert.NoError(t, err) _, err = store.Load(ctx, "cp-1") assert.Error(t, err) list, err = store.List(ctx, execID) assert.NoError(t, err) assert.Len(t, list, 0) // Test Clear // Add multiple checkpoints cp2 := &graph.Checkpoint{ID: "cp-2", Metadata: map[string]any{"execution_id": execID}} cp3 := &graph.Checkpoint{ID: "cp-3", Metadata: map[string]any{"execution_id": execID}} store.Save(ctx, cp2) store.Save(ctx, cp3) list, err = store.List(ctx, execID) assert.NoError(t, err) assert.Len(t, list, 2) err = store.Clear(ctx, execID) assert.NoError(t, err) list, err = store.List(ctx, execID) assert.NoError(t, err) assert.Len(t, list, 0) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/redis/doc.go
store/redis/doc.go
// Package redis provides Redis-backed storage for LangGraph Go checkpoints and state. // // This package implements fast, in-memory checkpoint storage using Redis, ideal for // scenarios requiring low-latency access to checkpoints and supporting distributed // graph executions across multiple processes or servers. // // # Key Features // // - High-performance checkpoint storage with Redis // - Support for TTL (time-to-live) automatic expiration // - Atomic operations for consistency // - Distributed locking support // - Configurable key prefixes for multi-tenancy // - JSON serialization of complex state objects // - Connection pooling and clustering support // - Pub/Sub notifications for checkpoint changes // // # Basic Usage // // import ( // "context" // "github.com/smallnest/langgraphgo/store/redis" // ) // // // Create a Redis checkpoint store // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "localhost:6379", // Password: "yourpassword", // DB: 0, // Redis database number // Prefix: "langgraph:", // Optional key prefix // TTL: 24 * time.Hour, // Optional TTL for checkpoints // }) // // // Use with a graph // g := graph.NewStateGraph() // // ... configure graph ... // // // Enable checkpointing // compileConfig := graph.CompileConfig{ // CheckpointConfig: graph.CheckpointConfig{ // Store: store, // }, // } // // runnable, err := g.CompileWithOptions(compileConfig) // // # Configuration // // ## Connection Options // // // Single Redis instance // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "localhost:6379", // Password: "", // DB: 0, // }) // // // With authentication // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "redis.example.com:6379", // Password: "your-redis-password", // DB: 1, // }) // // // With Unix socket // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "/var/run/redis/redis.sock", // Password: "", // DB: 0, // }) // // ## TTL Configuration // // // Set default TTL for all checkpoints (24 hours) // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "localhost:6379", // TTL: 24 * time.Hour, // }) // // // No expiration (persistent) // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "localhost:6379", // TTL: 0, // }) // // # Advanced Features // // ## Custom Redis Client // // // Use a custom Redis client for more control // rdb := redis.NewClient(&redis.Options{ // Addr: "localhost:6379", // Password: "", // DB: 0, // MaxRetries: 3, // PoolSize: 10, // MinIdleConns: 5, // DialTimeout: 5 * time.Second, // ReadTimeout: 3 * time.Second, // WriteTimeout: 3 * time.Second, // PoolTimeout: 4 * time.Second, // }) // // store := redis.NewCheckpointStoreFromClient(rdb, "langgraph:", time.Hour) // // ## Clustering Support // // // Redis Cluster configuration // rdb := redis.NewClusterClient(&redis.ClusterOptions{ // Addrs: []string{ // "redis-node-1:6379", // "redis-node-2:6379", // "redis-node-3:6379", // }, // Password: "cluster-password", // }) // // store := redis.NewCheckpointStoreFromCluster(rdb, "langgraph:", time.Hour) // // ## Sentinel Support // // // Redis Sentinel for high availability // rdb := redis.NewFailoverClient(&redis.FailoverOptions{ // MasterName: "mymaster", // SentinelAddrs: []string{ // "sentinel-1:26379", // "sentinel-2:26379", // "sentinel-3:26379", // }, // Password: "sentinel-password", // }) // // store := redis.NewCheckpointStoreFromClient(rdb, "langgraph:", time.Hour) // // # Key Management // // // Checkpoints are stored with structured keys // // Format: {prefix}checkpoint:{checkpoint_id} // // Example: "langgraph:checkpoint:abc123" // // // Thread-specific checkpoints // // Format: {prefix}thread:{thread_id}:checkpoint:{checkpoint_id} // // Example: "langgraph:thread:xyz789:checkpoint:def456" // // // List all checkpoints for a thread // keys, err := store.client.Keys(ctx, "langgraph:thread:xyz789:checkpoint:*") // // ## Custom TTL per Checkpoint // // // Override default TTL for specific checkpoint // checkpoint := &graph.Checkpoint{ // ID: "checkpoint-123", // ThreadID: "thread-456", // State: state, // Metadata: map[string]any{ // "ttl": 2 * time.Hour, // Custom TTL // }, // } // // if err := store.PutWithTTL(ctx, checkpoint, 2*time.Hour); err != nil { // return err // } // // # Pub/Sub Notifications // // // Subscribe to checkpoint changes // pubsub := store.client.Subscribe(ctx, "langgraph:checkpoint:changes") // // go func() { // for msg := range pubsub.Channel() { // var event struct { // Action string `json:"action"` // CheckpointID string `json:"checkpoint_id"` // ThreadID string `json:"thread_id"` // Timestamp time.Time `json:"timestamp"` // } // json.Unmarshal([]byte(msg.Payload), &event) // // fmt.Printf("Checkpoint %s: %s\n", event.Action, event.CheckpointID) // } // }() // // # Performance Optimization // // ## Pipeline Operations // // // Batch operations with pipelining // pipe := store.client.Pipeline() // // checkpoints := []*graph.Checkpoint{cp1, cp2, cp3} // for _, cp := range checkpoints { // data, _ := json.Marshal(cp) // pipe.Set(ctx, store.checkpointKey(cp.ID), data, store.ttl) // } // // // Execute all operations atomically // _, err := pipe.Exec(ctx) // // ## Lua Scripts // // // Atomic update with Lua // updateScript := redis.NewScript(` // local key = KEYS[1] // local checkpoint_id = ARGV[1] // local new_data = ARGV[2] // // local old = redis.call('GET', key) // if old then // redis.call('SET', key, new_data, 'EX', ARGV[3]) // return old // end // return nil // `) // // result, err := updateScript.Run(ctx, store.client, // []string{store.checkpointKey(checkpointID)}, // checkpointID, newData, int(ttl.Seconds()), // ).Result() // // # Monitoring and Metrics // // // Get Redis information // info, err := store.client.Info(ctx).Result() // if err == nil { // fmt.Printf("Redis Info: %s\n", info) // } // // // Monitor memory usage // memStats, err := store.client.MemoryUsage(ctx, store.checkpointKey("*")).Result() // if err == nil { // fmt.Printf("Memory usage: %v\n", memStats) // } // // // Track operations // monitor := &RedisMonitor{ // client: store.client, // metrics: make(map[string]int64), // } // // store.SetMonitor(monitor) // // type RedisMonitor struct { // client *redis.Client // metrics map[string]int64 // mutex sync.RWMutex // } // // func (m *RedisMonitor) OnOperation(op string, duration time.Duration) { // m.mutex.Lock() // defer m.mutex.Unlock() // m.metrics[op]++ // } // // # Error Handling // // // Handle Redis-specific errors // if err := store.Put(ctx, checkpoint); err != nil { // if redis.IsNil(err) { // // Handle not found // } else if redis.IsPoolTimeout(err) { // // Handle connection pool timeout // } else if redis.IsConnectionError(err) { // // Handle connection error // } // } // // // Retry logic // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "localhost:6379", // }) // // // With retry policy // ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) // defer cancel() // // for i := 0; i < 3; i++ { // err := store.Put(ctx, checkpoint) // if err == nil { // break // } // if i == 2 { // return err // } // time.Sleep(time.Second * time.Duration(i+1)) // } // // # Integration Examples // // ## With Distributed Execution // // // Multiple processes sharing the same Redis store // store := redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "redis-cluster:6379", // Prefix: "distributed-langgraph:", // TTL: 6 * time.Hour, // Checkpoints persist for 6 hours // }) // // // Process 1 // runnable1, _ := g1.Compile() // go func() { // result, _ := runnable1.Invoke(ctx, input, // graph.WithExecutionID("shared-execution-123")) // }() // // // Process 2 can resume from the same execution // runnable2, _ := g2.Compile() // result, _ := runnable2.Resume(ctx, "shared-execution-123", "checkpoint-456") // // ## With Session Affinity // // // Store checkpoints per user session // func getUserStore(userID string) graph.CheckpointStore { // return redis.NewRedisCheckpointStore(redis.RedisOptions{ // Addr: "localhost:6379", // Prefix: fmt.Sprintf("user:%s:langgraph:", userID), // TTL: 2 * time.Hour, // Session timeout // }) // } // // userStore := getUserStore("user-123") // // # Best Practices // // 1. Use meaningful key prefixes for organization // 2. Set appropriate TTL to prevent memory bloat // 3. Use connection pooling in production // 4. Implement proper error handling with retries // 5. Monitor Redis memory usage // 6. Use Redis Cluster for high availability // 7. Enable persistence for critical data // 8. Use pipelining for batch operations // 9. Consider compression for large state objects // 10. Test failover scenarios // // # Security Considerations // // - Enable Redis AUTH in production // - Use TLS/SSL for network connections // - Implement proper network isolation // - Use Redis ACLs for fine-grained access control // - Encrypt sensitive data before storage // - Disable dangerous commands (CONFIG, FLUSHDB) // - Set up proper firewalls // - Monitor for suspicious activity // // # Docker Integration // // Use with Docker Compose: // // ```yaml // version: '3.8' // services: // // langgraph: // image: your-app // environment: // - REDIS_ADDR=redis:6379 // - REDIS_PASSWORD=yourpassword // depends_on: // - redis // // redis: // image: redis:7-alpine // command: redis-server --requirepass yourpassword // ports: // - "6379:6379" // volumes: // - redis_data:/data // // redis-commander: // image: rediscommander/redis-commander:latest // environment: // - REDIS_HOSTS=local:redis:6379:0:yourpassword // ports: // - "8081:8081" // // volumes: // // redis_data: // // ``` // // # Comparison with Other Stores // // | Feature | Redis Store | PostgreSQL Store | SQLite Store | // |---------------------|-------------|------------------|-------------| // | Performance | Very High | High | Medium | // | Persistence | Optional | Yes | Yes | // | Memory Usage | High | Low | Low | // | Scaling | Horizontal | Vertical | Single | // | Query Capabilities | Basic | Advanced | Basic | // | Transactions | Limited | Full | Full | // | TTL Support | Native | Manual | Manual | // | Best For | High-speed | Complex queries | Simple apps | package redis
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/store/redis/redis.go
store/redis/redis.go
package redis import ( "context" "encoding/json" "errors" "fmt" "time" "github.com/redis/go-redis/v9" "github.com/smallnest/langgraphgo/graph" ) // RedisCheckpointStore implements graph.CheckpointStore using Redis type RedisCheckpointStore struct { client *redis.Client prefix string ttl time.Duration } // RedisOptions configuration for Redis connection type RedisOptions struct { Addr string Password string DB int Prefix string // Key prefix, default "langgraph:" TTL time.Duration // Expiration for checkpoints, default 0 (no expiration) } // NewRedisCheckpointStore creates a new Redis checkpoint store func NewRedisCheckpointStore(opts RedisOptions) *RedisCheckpointStore { client := redis.NewClient(&redis.Options{ Addr: opts.Addr, Password: opts.Password, DB: opts.DB, }) prefix := opts.Prefix if prefix == "" { prefix = "langgraph:" } return &RedisCheckpointStore{ client: client, prefix: prefix, ttl: opts.TTL, } } func (s *RedisCheckpointStore) checkpointKey(id string) string { return fmt.Sprintf("%scheckpoint:%s", s.prefix, id) } func (s *RedisCheckpointStore) executionKey(id string) string { return fmt.Sprintf("%sexecution:%s:checkpoints", s.prefix, id) } func (s *RedisCheckpointStore) threadKey(id string) string { return fmt.Sprintf("%sthread:%s:checkpoints", s.prefix, id) } // Save stores a checkpoint func (s *RedisCheckpointStore) Save(ctx context.Context, checkpoint *graph.Checkpoint) error { data, err := json.Marshal(checkpoint) if err != nil { return fmt.Errorf("failed to marshal checkpoint: %w", err) } key := s.checkpointKey(checkpoint.ID) pipe := s.client.Pipeline() pipe.Set(ctx, key, data, s.ttl) // Index by execution_id if present if execID, ok := checkpoint.Metadata["execution_id"].(string); ok && execID != "" { execKey := s.executionKey(execID) pipe.ZAdd(ctx, execKey, redis.Z{Score: float64(checkpoint.Version), Member: checkpoint.ID}) if s.ttl > 0 { pipe.Expire(ctx, execKey, s.ttl) } } // Index by thread_id if present if threadID, ok := checkpoint.Metadata["thread_id"].(string); ok && threadID != "" { threadKey := s.threadKey(threadID) pipe.ZAdd(ctx, threadKey, redis.Z{Score: float64(checkpoint.Version), Member: checkpoint.ID}) if s.ttl > 0 { pipe.Expire(ctx, threadKey, s.ttl) } } _, err = pipe.Exec(ctx) if err != nil { return fmt.Errorf("failed to save checkpoint to redis: %w", err) } return nil } // Load retrieves a checkpoint by ID func (s *RedisCheckpointStore) Load(ctx context.Context, checkpointID string) (*graph.Checkpoint, error) { key := s.checkpointKey(checkpointID) data, err := s.client.Get(ctx, key).Bytes() if err != nil { if err == redis.Nil { return nil, fmt.Errorf("checkpoint not found: %s", checkpointID) } return nil, fmt.Errorf("failed to load checkpoint from redis: %w", err) } var checkpoint graph.Checkpoint if err := json.Unmarshal(data, &checkpoint); err != nil { return nil, fmt.Errorf("failed to unmarshal checkpoint: %w", err) } return &checkpoint, nil } // List returns all checkpoints for a given execution func (s *RedisCheckpointStore) List(ctx context.Context, executionID string) ([]*graph.Checkpoint, error) { execKey := s.executionKey(executionID) checkpointIDs, err := s.client.ZRange(ctx, execKey, 0, -1).Result() if err != nil { return nil, fmt.Errorf("failed to list checkpoints for execution %s: %w", executionID, err) } if len(checkpointIDs) == 0 { return []*graph.Checkpoint{}, nil } // Fetch all checkpoints var keys []string for _, id := range checkpointIDs { keys = append(keys, s.checkpointKey(id)) } // MGet might fail if some keys are missing (expired), so we handle them individually or filter results // But MGet returns nil for missing keys, which is fine. results, err := s.client.MGet(ctx, keys...).Result() if err != nil { return nil, fmt.Errorf("failed to fetch checkpoints: %w", err) } var checkpoints []*graph.Checkpoint for i, result := range results { if result == nil { continue } strData, ok := result.(string) if !ok { continue } var checkpoint graph.Checkpoint if err := json.Unmarshal([]byte(strData), &checkpoint); err != nil { // Log error or skip? Skipping for now continue } checkpoints = append(checkpoints, &checkpoint) // Sanity check ID - should match if order is preserved // If mismatch occurs, it indicates a Redis ordering issue _ = checkpointIDs[i] // Acknowledge ID is available for future validation } // return checkpoints, nil } // ListByThread returns all checkpoints for a specific thread_id func (s *RedisCheckpointStore) ListByThread(ctx context.Context, threadID string) ([]*graph.Checkpoint, error) { threadKey := s.threadKey(threadID) checkpointIDs, err := s.client.ZRange(ctx, threadKey, 0, -1).Result() if err != nil { return nil, fmt.Errorf("failed to list checkpoints for thread %s: %w", threadID, err) } if len(checkpointIDs) == 0 { return []*graph.Checkpoint{}, nil } // Fetch all checkpoints var keys []string for _, id := range checkpointIDs { keys = append(keys, s.checkpointKey(id)) } results, err := s.client.MGet(ctx, keys...).Result() if err != nil { return nil, fmt.Errorf("failed to fetch checkpoints: %w", err) } var checkpoints []*graph.Checkpoint for _, result := range results { if result == nil { continue } strData, ok := result.(string) if !ok { continue } var checkpoint graph.Checkpoint if err := json.Unmarshal([]byte(strData), &checkpoint); err != nil { continue } checkpoints = append(checkpoints, &checkpoint) } return checkpoints, nil } // GetLatestByThread returns the latest checkpoint for a thread_id func (s *RedisCheckpointStore) GetLatestByThread(ctx context.Context, threadID string) (*graph.Checkpoint, error) { threadKey := s.threadKey(threadID) // get latest checkpoint results, err := s.client.ZRevRangeWithScores(ctx, threadKey, 0, 0).Result() if err != nil { return nil, fmt.Errorf("failed to get latest checkpoint for thread %s: %w", threadID, err) } if len(results) == 0 { return nil, fmt.Errorf("no checkpoints found for thread: %s", threadID) } latestCheckpointID := results[0].Member.(string) key := s.checkpointKey(latestCheckpointID) data, err := s.client.Get(ctx, key).Result() if err != nil { if errors.Is(err, redis.Nil) { return nil, fmt.Errorf("checkpoint not found: %s", latestCheckpointID) } return nil, fmt.Errorf("failed to load checkpoint %s: %w", latestCheckpointID, err) } var checkpoint graph.Checkpoint if err := json.Unmarshal([]byte(data), &checkpoint); err != nil { return nil, fmt.Errorf("failed to unmarshal checkpoint: %w", err) } return &checkpoint, nil } // Delete removes a checkpoint func (s *RedisCheckpointStore) Delete(ctx context.Context, checkpointID string) error { // First load to get execution ID and thread ID for cleanup checkpoint, err := s.Load(ctx, checkpointID) if err != nil { return err // Or ignore if not found? } key := s.checkpointKey(checkpointID) pipe := s.client.Pipeline() pipe.Del(ctx, key) if execID, ok := checkpoint.Metadata["execution_id"].(string); ok && execID != "" { execKey := s.executionKey(execID) pipe.ZRem(ctx, execKey, checkpointID) } if threadID, ok := checkpoint.Metadata["thread_id"].(string); ok && threadID != "" { threadKey := s.threadKey(threadID) pipe.ZRem(ctx, threadKey, checkpointID) } _, err = pipe.Exec(ctx) if err != nil { return fmt.Errorf("failed to delete checkpoint: %w", err) } return nil } // Clear removes all checkpoints for an execution func (s *RedisCheckpointStore) Clear(ctx context.Context, executionID string) error { execKey := s.executionKey(executionID) checkpointIDs, err := s.client.ZRange(ctx, execKey, 0, -1).Result() if err != nil { return fmt.Errorf("failed to get checkpoints for clearing: %w", err) } if len(checkpointIDs) == 0 { return nil } pipe := s.client.Pipeline() // Delete all checkpoint keys for _, id := range checkpointIDs { pipe.Del(ctx, s.checkpointKey(id)) } // Delete execution index pipe.Del(ctx, execKey) _, err = pipe.Exec(ctx) if err != nil { return fmt.Errorf("failed to clear checkpoints: %w", err) } return nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/planning_agent_test.go
prebuilt/planning_agent_test.go
package prebuilt import ( "context" "testing" "github.com/smallnest/langgraphgo/graph" "github.com/stretchr/testify/assert" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // MockPlanningLLM is a mock LLM that returns a workflow plan type MockPlanningLLM struct { planJSON string responses []llms.ContentResponse callCount int capturedCalls [][]llms.MessageContent } func (m *MockPlanningLLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { m.capturedCalls = append(m.capturedCalls, messages) // First call is the planning call if m.callCount == 0 { m.callCount++ return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ { Content: m.planJSON, }, }, }, nil } // Subsequent calls use predefined responses if m.callCount-1 < len(m.responses) { resp := m.responses[m.callCount-1] m.callCount++ return &resp, nil } return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ {Content: "No more responses"}, }, }, nil } func (m *MockPlanningLLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", nil } func TestCreatePlanningAgentMap_SimpleWorkflow(t *testing.T) { // Define test nodes testNodes := []graph.TypedNode[map[string]any]{ { Name: "research", Description: "Research and gather information", Function: func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) researchMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart("Research completed")}, } return map[string]any{ "messages": append(messages, researchMsg), }, nil }, }, { Name: "analyze", Description: "Analyze the gathered information", Function: func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) analyzeMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart("Analysis completed")}, } return map[string]any{ "messages": append(messages, analyzeMsg), }, nil }, }, } // Create a workflow plan JSON planJSON := `{ "nodes": [ {"name": "research", "type": "process"}, {"name": "analyze", "type": "process"} ], "edges": [ {"from": "START", "to": "research"}, {"from": "research", "to": "analyze"}, {"from": "analyze", "to": "END"} ] }` // Setup Mock LLM mockLLM := &MockPlanningLLM{ planJSON: planJSON, responses: []llms.ContentResponse{}, } // Create Planning Agent agent, err := CreatePlanningAgentMap(mockLLM, testNodes, []tools.Tool{}) assert.NoError(t, err) assert.NotNil(t, agent) // Initial State initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Please research and analyze"), }, } // Run Agent res, err := agent.Invoke(context.Background(), initialState) assert.NoError(t, err) assert.NotNil(t, res) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/create_agent_test.go
prebuilt/create_agent_test.go
package prebuilt import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) func TestCreateAgentMap(t *testing.T) { mockLLM := &MockLLM{} inputTools := []tools.Tool{} systemMessage := "You are a helpful assistant." t.Run("Basic Agent Creation", func(t *testing.T) { agent, err := CreateAgentMap(mockLLM, inputTools, 0, WithSystemMessage(systemMessage)) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Agent with State Modifier", func(t *testing.T) { mockLLM := &MockLLMWithInputCapture{} modifier := func(messages []llms.MessageContent) []llms.MessageContent { return append(messages, llms.TextParts(llms.ChatMessageTypeHuman, "Modified")) } agent, err := CreateAgentMap(mockLLM, inputTools, 0, WithStateModifier(modifier)) assert.NoError(t, err) _, err = agent.Invoke(context.Background(), map[string]any{"messages": []llms.MessageContent{}}) assert.NoError(t, err) // Verify modifier was called (last message should be "Modified") assert.True(t, len(mockLLM.lastMessages) > 0) lastMsg := mockLLM.lastMessages[len(mockLLM.lastMessages)-1] assert.Equal(t, "Modified", lastMsg.Parts[0].(llms.TextContent).Text) }) t.Run("Agent with System Message", func(t *testing.T) { mockLLM := &MockLLMWithInputCapture{} systemMsg := "You are a specialized bot." agent, err := CreateAgentMap(mockLLM, inputTools, 0, WithSystemMessage(systemMsg)) assert.NoError(t, err) _, err = agent.Invoke(context.Background(), map[string]any{"messages": []llms.MessageContent{}}) assert.NoError(t, err) // Verify system message was prepended assert.True(t, len(mockLLM.lastMessages) > 0) firstMsg := mockLLM.lastMessages[0] assert.Equal(t, llms.ChatMessageTypeSystem, firstMsg.Role) assert.Equal(t, systemMsg, firstMsg.Parts[0].(llms.TextContent).Text) }) t.Run("Agent with Verbose option", func(t *testing.T) { // Test that WithVerbose option is properly set agent, err := CreateAgentMap(mockLLM, inputTools, 0, WithVerbose(true)) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Agent with tools", func(t *testing.T) { mockTool := &MockToolWithResponse{name: "test_tool", description: "A test tool", response: "Tool response"} agent, err := CreateAgentMap(mockLLM, []tools.Tool{mockTool}, 0) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Agent Invoke with messages", func(t *testing.T) { mockLLM := &MockLLMWithInputCapture{} agent, err := CreateAgentMap(mockLLM, inputTools, 0) assert.NoError(t, err) messages := []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Hello"), } result, err := agent.Invoke(context.Background(), map[string]any{"messages": messages}) assert.NoError(t, err) assert.NotNil(t, result) }) } func TestCreateAgentGeneric(t *testing.T) { mockLLM := &MockLLM{} t.Run("Create generic AgentState agent", func(t *testing.T) { inputTools := []tools.Tool{} agent, err := CreateAgent[AgentState]( mockLLM, inputTools, func(s AgentState) []llms.MessageContent { return s.Messages }, func(s AgentState, msgs []llms.MessageContent) AgentState { s.Messages = msgs return s }, func(s AgentState) []tools.Tool { return s.ExtraTools }, func(s AgentState, tools []tools.Tool) AgentState { s.ExtraTools = tools return s }, ) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Create generic agent with system message", func(t *testing.T) { inputTools := []tools.Tool{} systemMsg := "You are a helpful assistant." agent, err := CreateAgent[AgentState]( mockLLM, inputTools, func(s AgentState) []llms.MessageContent { return s.Messages }, func(s AgentState, msgs []llms.MessageContent) AgentState { s.Messages = msgs return s }, func(s AgentState) []tools.Tool { return s.ExtraTools }, func(s AgentState, tools []tools.Tool) AgentState { s.ExtraTools = tools return s }, WithSystemMessage(systemMsg), ) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Create generic agent with state modifier", func(t *testing.T) { inputTools := []tools.Tool{} modifier := func(messages []llms.MessageContent) []llms.MessageContent { return append(messages, llms.TextParts(llms.ChatMessageTypeSystem, "Modified")) } agent, err := CreateAgent[AgentState]( mockLLM, inputTools, func(s AgentState) []llms.MessageContent { return s.Messages }, func(s AgentState, msgs []llms.MessageContent) AgentState { s.Messages = msgs return s }, func(s AgentState) []tools.Tool { return s.ExtraTools }, func(s AgentState, tools []tools.Tool) AgentState { s.ExtraTools = tools return s }, WithStateModifier(modifier), ) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Generic agent invoke", func(t *testing.T) { inputTools := []tools.Tool{} agent, err := CreateAgent[AgentState]( mockLLM, inputTools, func(s AgentState) []llms.MessageContent { return s.Messages }, func(s AgentState, msgs []llms.MessageContent) AgentState { s.Messages = msgs return s }, func(s AgentState) []tools.Tool { return s.ExtraTools }, func(s AgentState, tools []tools.Tool) AgentState { s.ExtraTools = tools return s }, ) assert.NoError(t, err) state := AgentState{ Messages: []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Hello"), }, } result, err := agent.Invoke(context.Background(), state) assert.NoError(t, err) assert.NotNil(t, result) }) t.Run("Generic agent with extra tools", func(t *testing.T) { inputTools := []tools.Tool{ &MockToolWithResponse{name: "base_tool", description: "Base tool", response: "base response"}, } agent, err := CreateAgent[AgentState]( mockLLM, inputTools, func(s AgentState) []llms.MessageContent { return s.Messages }, func(s AgentState, msgs []llms.MessageContent) AgentState { s.Messages = msgs return s }, func(s AgentState) []tools.Tool { return s.ExtraTools }, func(s AgentState, tools []tools.Tool) AgentState { s.ExtraTools = tools return s }, ) assert.NoError(t, err) state := AgentState{ Messages: []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Hello"), }, ExtraTools: []tools.Tool{ &MockToolWithResponse{name: "extra_tool", description: "Extra tool", response: "extra response"}, }, } result, err := agent.Invoke(context.Background(), state) assert.NoError(t, err) assert.NotNil(t, result) }) } func TestCreateAgentWithToolCalls(t *testing.T) { t.Run("AgentMap with tool call response", func(t *testing.T) { mockLLM := &MockLLMWithToolCalls{} mockTool := &MockToolWithResponse{ name: "test_tool", description: "A test tool", response: "Tool executed successfully", } agent, err := CreateAgentMap(mockLLM, []tools.Tool{mockTool}, 0) assert.NoError(t, err) messages := []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Use the test tool"), } result, err := agent.Invoke(context.Background(), map[string]any{"messages": messages}) assert.NoError(t, err) assert.NotNil(t, result) }) t.Run("Generic agent with tool calls", func(t *testing.T) { mockLLM := &MockLLMWithToolCalls{} mockTool := &MockToolWithResponse{ name: "test_tool", description: "A test tool", response: "Tool executed successfully", } agent, err := CreateAgent[AgentState]( mockLLM, []tools.Tool{mockTool}, func(s AgentState) []llms.MessageContent { return s.Messages }, func(s AgentState, msgs []llms.MessageContent) AgentState { s.Messages = msgs return s }, func(s AgentState) []tools.Tool { return s.ExtraTools }, func(s AgentState, tools []tools.Tool) AgentState { s.ExtraTools = tools return s }, ) assert.NoError(t, err) state := AgentState{ Messages: []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Use the test tool"), }, } result, err := agent.Invoke(context.Background(), state) assert.NoError(t, err) assert.NotNil(t, result) }) } // Mock structures for testing type MockLLM struct { llms.Model } func (m *MockLLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ { Content: "Hello! I'm a mock AI.", }, }, }, nil } type MockLLMWithInputCapture struct { llms.Model lastMessages []llms.MessageContent } func (m *MockLLMWithInputCapture) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { m.lastMessages = messages return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ { Content: "Response", }, }, }, nil } type MockLLMWithToolCalls struct { llms.Model callCount int } func (m *MockLLMWithToolCalls) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { m.callCount++ if m.callCount == 1 { // First call returns a tool call return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ { Content: "I'll use the tool for you.", ToolCalls: []llms.ToolCall{ { ID: "call_123", Type: "function", FunctionCall: &llms.FunctionCall{ Name: "test_tool", Arguments: `{"input":"test input"}`, }, }, }, StopReason: "tool_calls", }, }, }, nil } // Second call returns final response return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ { Content: "Tool execution complete. Result: Tool executed successfully", StopReason: "stop", }, }, }, nil } type MockToolWithResponse struct { name string description string response string } func (t *MockToolWithResponse) Name() string { return t.name } func (t *MockToolWithResponse) Description() string { return t.description } func (t *MockToolWithResponse) Call(ctx context.Context, input string) (string, error) { if t.response != "" { return t.response, nil } return "Mock tool response", nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/supervisor.go
prebuilt/supervisor.go
package prebuilt import ( "context" "encoding/json" "fmt" "strings" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" ) // CreateSupervisorMap creates a supervisor graph with map[string]any state func CreateSupervisorMap(model llms.Model, members map[string]*graph.StateRunnable[map[string]any]) (*graph.StateRunnable[map[string]any], error) { workflow := graph.NewStateGraph[map[string]any]() schema := graph.NewMapSchema() schema.RegisterReducer("messages", graph.AppendReducer) workflow.SetSchema(schema) var memberNames []string for name := range members { memberNames = append(memberNames, name) } workflow.AddNode("supervisor", "Supervisor orchestration node", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages, ok := state["messages"].([]llms.MessageContent) if !ok { return nil, fmt.Errorf("messages key not found or invalid type") } options := append(memberNames, "FINISH") routeTool := llms.Tool{ Type: "function", Function: &llms.FunctionDefinition{ Name: "route", Description: "Select the next role.", Parameters: map[string]any{ "type": "object", "properties": map[string]any{ "next": map[string]any{ "type": "string", "enum": options, }, }, "required": []string{"next"}, }, }, } systemPrompt := fmt.Sprintf( "You are a supervisor tasked with managing a conversation between: %s. Respond with the worker to act next or FINISH. Use the 'route' tool.", strings.Join(memberNames, ", "), ) inputMessages := append([]llms.MessageContent{llms.TextParts(llms.ChatMessageTypeSystem, systemPrompt)}, messages...) toolChoice := llms.ToolChoice{Type: "function", Function: &llms.FunctionReference{Name: "route"}} resp, err := model.GenerateContent(ctx, inputMessages, llms.WithTools([]llms.Tool{routeTool}), llms.WithToolChoice(toolChoice)) if err != nil { return nil, err } choice := resp.Choices[0] if len(choice.ToolCalls) == 0 { return nil, fmt.Errorf("supervisor did not select a next step") } var args struct { Next string `json:"next"` } if err := json.Unmarshal([]byte(choice.ToolCalls[0].FunctionCall.Arguments), &args); err != nil { return nil, fmt.Errorf("failed to parse route arguments: %w", err) } return map[string]any{"next": args.Next}, nil }) for name, agent := range members { agentName := name agentRunnable := agent workflow.AddNode(agentName, "Agent: "+agentName, func(ctx context.Context, state map[string]any) (map[string]any, error) { return agentRunnable.Invoke(ctx, state) }) } workflow.SetEntryPoint("supervisor") workflow.AddConditionalEdge("supervisor", func(ctx context.Context, state map[string]any) string { next, _ := state["next"].(string) if next == "FINISH" || next == "" { return graph.END } return next }) for _, name := range memberNames { workflow.AddEdge(name, "supervisor") } return workflow.Compile() } // CreateSupervisor creates a generic supervisor graph func CreateSupervisor[S any]( model llms.Model, members map[string]*graph.StateRunnable[S], getMessages func(S) []llms.MessageContent, getNext func(S) string, setNext func(S, string) S, ) (*graph.StateRunnable[S], error) { workflow := graph.NewStateGraph[S]() var memberNames []string for name := range members { memberNames = append(memberNames, name) } workflow.AddNode("supervisor", "Supervisor orchestration node", func(ctx context.Context, state S) (S, error) { messages := getMessages(state) options := append(memberNames, "FINISH") routeTool := llms.Tool{ Type: "function", Function: &llms.FunctionDefinition{ Name: "route", Description: "Select the next role.", Parameters: map[string]any{ "type": "object", "properties": map[string]any{ "next": map[string]any{ "type": "string", "enum": options, }, }, "required": []string{"next"}, }, }, } systemPrompt := fmt.Sprintf( "You are a supervisor tasked with managing a conversation between: %s. Respond with the worker to act next or FINISH. Use the 'route' tool.", strings.Join(memberNames, ", "), ) inputMessages := append([]llms.MessageContent{llms.TextParts(llms.ChatMessageTypeSystem, systemPrompt)}, messages...) toolChoice := llms.ToolChoice{Type: "function", Function: &llms.FunctionReference{Name: "route"}} resp, err := model.GenerateContent(ctx, inputMessages, llms.WithTools([]llms.Tool{routeTool}), llms.WithToolChoice(toolChoice)) if err != nil { return state, err } choice := resp.Choices[0] if len(choice.ToolCalls) == 0 { return state, fmt.Errorf("supervisor did not select a next step") } var args struct { Next string `json:"next"` } if err := json.Unmarshal([]byte(choice.ToolCalls[0].FunctionCall.Arguments), &args); err != nil { return state, fmt.Errorf("failed to parse route arguments: %w", err) } return setNext(state, args.Next), nil }) for name, runnable := range members { agentName := name agentRunnable := runnable workflow.AddNode(agentName, "Agent: "+agentName, func(ctx context.Context, state S) (S, error) { return agentRunnable.Invoke(ctx, state) }) } workflow.SetEntryPoint("supervisor") workflow.AddConditionalEdge("supervisor", func(ctx context.Context, state S) string { next := getNext(state) if next == "FINISH" || next == "" { return graph.END } return next }) for name := range members { workflow.AddEdge(name, "supervisor") } return workflow.Compile() }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/tree_of_thoughts_test.go
prebuilt/tree_of_thoughts_test.go
package prebuilt import ( "context" "testing" "github.com/stretchr/testify/assert" ) // Mock implementations for testing type MockThoughtState struct { hash string isValid bool isGoal bool desc string } func (m *MockThoughtState) IsValid() bool { return m.isValid } func (m *MockThoughtState) IsGoal() bool { return m.isGoal } func (m *MockThoughtState) GetDescription() string { return m.desc } func (m *MockThoughtState) Hash() string { return m.hash } type MockThoughtGenerator struct { generateFunc func(ctx context.Context, current ThoughtState) ([]ThoughtState, error) } func (m *MockThoughtGenerator) Generate(ctx context.Context, current ThoughtState) ([]ThoughtState, error) { if m.generateFunc != nil { return m.generateFunc(ctx, current) } return []ThoughtState{ &MockThoughtState{hash: "state1", isValid: true, isGoal: false, desc: "Thought 1"}, &MockThoughtState{hash: "state2", isValid: true, isGoal: false, desc: "Thought 2"}, }, nil } type MockThoughtEvaluator struct { evaluateFunc func(ctx context.Context, state ThoughtState, pathLength int) (float64, error) } func (m *MockThoughtEvaluator) Evaluate(ctx context.Context, state ThoughtState, pathLength int) (float64, error) { if m.evaluateFunc != nil { return m.evaluateFunc(ctx, state, pathLength) } return 0.5, nil } type MockFailingGenerator struct{} func (m *MockFailingGenerator) Generate(ctx context.Context, current ThoughtState) ([]ThoughtState, error) { return nil, nil // Return empty slice to test edge cases } type MockInvalidStateGenerator struct{} func (m *MockInvalidStateGenerator) Generate(ctx context.Context, current ThoughtState) ([]ThoughtState, error) { return []ThoughtState{ &MockThoughtState{hash: "invalid", isValid: false, isGoal: false, desc: "Invalid"}, }, nil } func TestCreateTreeOfThoughtsAgentMap(t *testing.T) { t.Run("Create agent with valid config", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, MaxDepth: 3, MaxPaths: 2, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Create agent with goal as initial state", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "goal", isValid: true, isGoal: true, desc: "Goal"}, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Create agent with default MaxDepth and MaxPaths", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, // MaxDepth and MaxPaths are 0, should use defaults } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Create agent with missing generator", func(t *testing.T) { config := TreeOfThoughtsConfig{ Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.Error(t, err) assert.Nil(t, agent) assert.Contains(t, err.Error(), "generator") }) t.Run("Create agent with missing evaluator", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.Error(t, err) assert.Nil(t, agent) assert.Contains(t, err.Error(), "evaluator") }) t.Run("Create agent with missing initial state", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.Error(t, err) assert.Nil(t, agent) assert.Contains(t, err.Error(), "initial state") }) t.Run("Create agent with verbose option", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, Verbose: true, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.NoError(t, err) assert.NotNil(t, agent) }) } func TestTreeOfThoughtsAgentMap_Execution(t *testing.T) { t.Run("Execute agent - finds goal immediately", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "goal", isValid: true, isGoal: true, desc: "Goal reached"}, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.NoError(t, err) ctx := context.Background() result, err := agent.Invoke(ctx, map[string]any{}) assert.NoError(t, err) assert.NotNil(t, result) }) t.Run("Execute agent - expands and evaluates", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Start"}, MaxDepth: 2, MaxPaths: 3, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.NoError(t, err) ctx := context.Background() result, err := agent.Invoke(ctx, map[string]any{}) assert.NoError(t, err) assert.NotNil(t, result) }) t.Run("Execute agent with empty generator response", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockFailingGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Start"}, MaxDepth: 2, MaxPaths: 3, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.NoError(t, err) ctx := context.Background() result, err := agent.Invoke(ctx, map[string]any{}) assert.NoError(t, err) assert.NotNil(t, result) }) t.Run("Execute agent with invalid states", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockInvalidStateGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Start"}, MaxDepth: 2, MaxPaths: 3, } agent, err := CreateTreeOfThoughtsAgentMap(config) assert.NoError(t, err) ctx := context.Background() result, err := agent.Invoke(ctx, map[string]any{}) assert.NoError(t, err) assert.NotNil(t, result) }) } func TestCreateTreeOfThoughtsAgent_Generic(t *testing.T) { type TOTState struct { ActivePaths map[string]*SearchPath `json:"active_paths"` Solution string `json:"solution"` Visited map[string]bool `json:"visited"` Iteration int `json:"iteration"` } t.Run("Create generic agent with valid config", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, MaxDepth: 3, MaxPaths: 2, } agent, err := CreateTreeOfThoughtsAgent[TOTState]( config, func(s TOTState) map[string]*SearchPath { return s.ActivePaths }, func(s TOTState, p map[string]*SearchPath) TOTState { s.ActivePaths = p; return s }, func(s TOTState) string { return s.Solution }, func(s TOTState, sol string) TOTState { s.Solution = sol; return s }, func(s TOTState) map[string]bool { return s.Visited }, func(s TOTState, v map[string]bool) TOTState { s.Visited = v; return s }, func(s TOTState) int { return s.Iteration }, func(s TOTState, i int) TOTState { s.Iteration = i; return s }, ) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Create generic agent with goal state", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "goal", isValid: true, isGoal: true, desc: "Goal"}, } agent, err := CreateTreeOfThoughtsAgent[TOTState]( config, func(s TOTState) map[string]*SearchPath { return s.ActivePaths }, func(s TOTState, p map[string]*SearchPath) TOTState { s.ActivePaths = p; return s }, func(s TOTState) string { return s.Solution }, func(s TOTState, sol string) TOTState { s.Solution = sol; return s }, func(s TOTState) map[string]bool { return s.Visited }, func(s TOTState, v map[string]bool) TOTState { s.Visited = v; return s }, func(s TOTState) int { return s.Iteration }, func(s TOTState, i int) TOTState { s.Iteration = i; return s }, ) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Create generic agent with default values", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, } agent, err := CreateTreeOfThoughtsAgent[TOTState]( config, func(s TOTState) map[string]*SearchPath { return s.ActivePaths }, func(s TOTState, p map[string]*SearchPath) TOTState { s.ActivePaths = p; return s }, func(s TOTState) string { return s.Solution }, func(s TOTState, sol string) TOTState { s.Solution = sol; return s }, func(s TOTState) map[string]bool { return s.Visited }, func(s TOTState, v map[string]bool) TOTState { s.Visited = v; return s }, func(s TOTState) int { return s.Iteration }, func(s TOTState, i int) TOTState { s.Iteration = i; return s }, ) assert.NoError(t, err) assert.NotNil(t, agent) }) t.Run("Create generic agent with missing config", func(t *testing.T) { tests := []struct { name string config TreeOfThoughtsConfig errMsg string }{ { name: "missing generator", config: TreeOfThoughtsConfig{ Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, }, errMsg: "generator", }, { name: "missing evaluator", config: TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, }, errMsg: "evaluator", }, { name: "missing initial state", config: TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, }, errMsg: "initial state", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { agent, err := CreateTreeOfThoughtsAgent[TOTState]( tt.config, func(s TOTState) map[string]*SearchPath { return s.ActivePaths }, func(s TOTState, p map[string]*SearchPath) TOTState { s.ActivePaths = p; return s }, func(s TOTState) string { return s.Solution }, func(s TOTState, sol string) TOTState { s.Solution = sol; return s }, func(s TOTState) map[string]bool { return s.Visited }, func(s TOTState, v map[string]bool) TOTState { s.Visited = v; return s }, func(s TOTState) int { return s.Iteration }, func(s TOTState, i int) TOTState { s.Iteration = i; return s }, ) assert.Error(t, err) assert.Nil(t, agent) assert.Contains(t, err.Error(), tt.errMsg) }) } }) } func TestTreeOfThoughtsAgent_Generic_Execution(t *testing.T) { type TOTState struct { ActivePaths map[string]*SearchPath `json:"active_paths"` Solution string `json:"solution"` Visited map[string]bool `json:"visited"` Iteration int `json:"iteration"` } t.Run("Execute generic agent", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Start"}, MaxDepth: 2, MaxPaths: 3, } agent, err := CreateTreeOfThoughtsAgent[TOTState]( config, func(s TOTState) map[string]*SearchPath { return s.ActivePaths }, func(s TOTState, p map[string]*SearchPath) TOTState { s.ActivePaths = p; return s }, func(s TOTState) string { return s.Solution }, func(s TOTState, sol string) TOTState { s.Solution = sol; return s }, func(s TOTState) map[string]bool { return s.Visited }, func(s TOTState, v map[string]bool) TOTState { s.Visited = v; return s }, func(s TOTState) int { return s.Iteration }, func(s TOTState, i int) TOTState { s.Iteration = i; return s }, ) assert.NoError(t, err) ctx := context.Background() state := TOTState{} result, err := agent.Invoke(ctx, state) assert.NoError(t, err) assert.NotNil(t, result) }) t.Run("Execute generic agent with goal reached", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, InitialState: &MockThoughtState{hash: "goal", isValid: true, isGoal: true, desc: "Goal"}, } agent, err := CreateTreeOfThoughtsAgent[TOTState]( config, func(s TOTState) map[string]*SearchPath { return s.ActivePaths }, func(s TOTState, p map[string]*SearchPath) TOTState { s.ActivePaths = p; return s }, func(s TOTState) string { return s.Solution }, func(s TOTState, sol string) TOTState { s.Solution = sol; return s }, func(s TOTState) map[string]bool { return s.Visited }, func(s TOTState, v map[string]bool) TOTState { s.Visited = v; return s }, func(s TOTState) int { return s.Iteration }, func(s TOTState, i int) TOTState { s.Iteration = i; return s }, ) assert.NoError(t, err) ctx := context.Background() state := TOTState{} result, err := agent.Invoke(ctx, state) assert.NoError(t, err) assert.NotNil(t, result) assert.NotEmpty(t, result.Solution) }) } func TestSearchPath(t *testing.T) { t.Run("Create search path", func(t *testing.T) { states := []ThoughtState{ &MockThoughtState{hash: "s1", isValid: true, isGoal: false, desc: "State 1"}, &MockThoughtState{hash: "s2", isValid: true, isGoal: false, desc: "State 2"}, } path := SearchPath{States: states, Score: 0.8} assert.NotNil(t, path.States) assert.Len(t, path.States, 2) assert.Equal(t, 0.8, path.Score) }) t.Run("Search path with empty states", func(t *testing.T) { path := SearchPath{States: []ThoughtState{}, Score: 0} assert.NotNil(t, path.States) assert.Empty(t, path.States) }) } func TestThoughtGenerator(t *testing.T) { t.Run("Custom generator with function", func(t *testing.T) { generator := &MockThoughtGenerator{ generateFunc: func(ctx context.Context, current ThoughtState) ([]ThoughtState, error) { return []ThoughtState{ &MockThoughtState{hash: "custom1", isValid: true, isGoal: false, desc: "Custom 1"}, }, nil }, } ctx := context.Background() current := &MockThoughtState{hash: "current", isValid: true, isGoal: false, desc: "Current"} states, err := generator.Generate(ctx, current) assert.NoError(t, err) assert.Len(t, states, 1) assert.Equal(t, "custom1", states[0].Hash()) }) } func TestThoughtEvaluator(t *testing.T) { t.Run("Custom evaluator with function", func(t *testing.T) { evaluator := &MockThoughtEvaluator{ evaluateFunc: func(ctx context.Context, state ThoughtState, pathLength int) (float64, error) { return 0.95, nil }, } ctx := context.Background() state := &MockThoughtState{hash: "test", isValid: true, isGoal: false, desc: "Test"} score, err := evaluator.Evaluate(ctx, state, 3) assert.NoError(t, err) assert.Equal(t, 0.95, score) }) t.Run("Default evaluator", func(t *testing.T) { evaluator := &MockThoughtEvaluator{} ctx := context.Background() state := &MockThoughtState{hash: "test", isValid: true, isGoal: false, desc: "Test"} score, err := evaluator.Evaluate(ctx, state, 1) assert.NoError(t, err) assert.Equal(t, 0.5, score) }) } func TestThoughtState(t *testing.T) { t.Run("MockThoughtState methods", func(t *testing.T) { state := &MockThoughtState{ hash: "test-hash", isValid: true, isGoal: false, desc: "Test description", } assert.True(t, state.IsValid()) assert.False(t, state.IsGoal()) assert.Equal(t, "Test description", state.GetDescription()) assert.Equal(t, "test-hash", state.Hash()) }) t.Run("Invalid state", func(t *testing.T) { state := &MockThoughtState{ hash: "invalid", isValid: false, isGoal: false, desc: "Invalid state", } assert.False(t, state.IsValid()) assert.False(t, state.IsGoal()) }) t.Run("Goal state", func(t *testing.T) { state := &MockThoughtState{ hash: "goal", isValid: true, isGoal: true, desc: "Goal reached", } assert.True(t, state.IsValid()) assert.True(t, state.IsGoal()) }) } func TestTreeOfThoughtsConfig(t *testing.T) { t.Run("Config with all fields", func(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &MockThoughtGenerator{}, Evaluator: &MockThoughtEvaluator{}, MaxDepth: 5, MaxPaths: 10, Verbose: true, InitialState: &MockThoughtState{hash: "init", isValid: true, isGoal: false, desc: "Initial"}, } assert.NotNil(t, config.Generator) assert.NotNil(t, config.Evaluator) assert.Equal(t, 5, config.MaxDepth) assert.Equal(t, 10, config.MaxPaths) assert.True(t, config.Verbose) assert.NotNil(t, config.InitialState) }) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/react_agent_test.go
prebuilt/react_agent_test.go
package prebuilt import ( "context" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // WeatherTool implements tools.Tool for testing type WeatherTool struct { currentTemp int } func NewWeatherTool(temp int) *WeatherTool { return &WeatherTool{currentTemp: temp} } func (t *WeatherTool) Name() string { return "get_weather" } func (t *WeatherTool) Description() string { return "Get weather" } func (t *WeatherTool) Call(ctx context.Context, input string) (string, error) { return fmt.Sprintf("Weather: %d°C", t.currentTemp), nil } // ReactMockLLM implements llms.Model for testing type ReactMockLLM struct { responses []llms.ContentResponse callCount int } func (m *ReactMockLLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { if m.callCount >= len(m.responses) { return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ {Content: "No more responses"}, }, }, nil } resp := m.responses[m.callCount] m.callCount++ return &resp, nil } func (m *ReactMockLLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", nil } func TestReactAgentWithWeatherTool(t *testing.T) { weatherTool := NewWeatherTool(25) mockLLM := &ReactMockLLM{ responses: []llms.ContentResponse{ {Choices: []*llms.ContentChoice{{ToolCalls: []llms.ToolCall{{ID: "call-1", Type: "function", FunctionCall: &llms.FunctionCall{Name: "get_weather", Arguments: `{"input": "beijing"}`}}}}}}, {Choices: []*llms.ContentChoice{{Content: "Beijing is 25°C."}}}, }, } agent, err := CreateReactAgentMap(mockLLM, []tools.Tool{weatherTool}, 5) assert.NoError(t, err) res, err := agent.Invoke(context.Background(), map[string]any{"messages": []llms.MessageContent{llms.TextParts(llms.ChatMessageTypeHuman, "Weather in Beijing?")}}) assert.NoError(t, err) messages := res["messages"].([]llms.MessageContent) assert.True(t, len(messages) >= 2) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/pev_agent_test.go
prebuilt/pev_agent_test.go
package prebuilt import ( "testing" "github.com/tmc/langchaingo/tools" ) func TestCreatePEVAgentMap(t *testing.T) { mockLLM := &PEVMockLLM{ responses: []string{ "1. Step", `{"is_successful": true, "reasoning": "Ok"}`, "Final", }, } config := PEVAgentConfig{ Model: mockLLM, Tools: []tools.Tool{PEVMockTool{name: "calculator"}}, MaxRetries: 3, } agent, err := CreatePEVAgentMap(config) if err != nil { t.Fatalf("Failed: %v", err) } if agent == nil { t.Fatal("Agent is nil") } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/generic_agents_test.go
prebuilt/generic_agents_test.go
package prebuilt import ( "context" "testing" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // Mock Reflection LLM type MockReflectionLLM struct { responses []string callCount int } func (m *MockReflectionLLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { response := m.responses[m.callCount%len(m.responses)] m.callCount++ return &llms.ContentResponse{Choices: []*llms.ContentChoice{{Content: response}}}, nil } func (m *MockReflectionLLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", nil } // PEV Mock Tool type PEVMockTool struct { name string description string response string } func (m PEVMockTool) Name() string { return m.name } func (m PEVMockTool) Description() string { return m.description } func (m PEVMockTool) Call(ctx context.Context, input string) (string, error) { return m.response, nil } // PEV Mock LLM type PEVMockLLM struct { responses []string callCount int } func (m *PEVMockLLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { if m.callCount >= len(m.responses) { return &llms.ContentResponse{Choices: []*llms.ContentChoice{{Content: "Default response"}}}, nil } response := m.responses[m.callCount] m.callCount++ return &llms.ContentResponse{Choices: []*llms.ContentChoice{{Content: response}}}, nil } func (m *PEVMockLLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", nil } // Simple Thought State type SimpleThoughtState struct { isGoal bool isValid bool desc string } func (s *SimpleThoughtState) IsValid() bool { return s.isValid } func (s *SimpleThoughtState) IsGoal() bool { return s.isGoal } func (s *SimpleThoughtState) GetDescription() string { return s.desc } func (s *SimpleThoughtState) Hash() string { return s.desc } type SimpleThoughtGenerator struct{} func (g *SimpleThoughtGenerator) Generate(ctx context.Context, state ThoughtState) ([]ThoughtState, error) { return []ThoughtState{}, nil } type SimpleThoughtEvaluator struct{} func (e *SimpleThoughtEvaluator) Evaluate(ctx context.Context, state ThoughtState, depth int) (float64, error) { return 1.0, nil } // TestCreateReflectionAgent tests the ReflectionAgent func TestCreateReflectionAgent(t *testing.T) { mockLLM := &MockReflectionLLM{ responses: []string{ "Initial response", "**Strengths:** Good. **Weaknesses:** None. **Suggestions:** None.", }, } config := ReflectionAgentConfig{Model: mockLLM, MaxIterations: 2} agent, err := CreateReflectionAgent( config, func(s ReflectionAgentState) []llms.MessageContent { return s.Messages }, func(s ReflectionAgentState, m []llms.MessageContent) ReflectionAgentState { s.Messages = m; return s }, func(s ReflectionAgentState) string { return s.Draft }, func(s ReflectionAgentState, d string) ReflectionAgentState { s.Draft = d; return s }, func(s ReflectionAgentState) int { return s.Iteration }, func(s ReflectionAgentState, i int) ReflectionAgentState { s.Iteration = i; return s }, func(s ReflectionAgentState) string { return s.Reflection }, func(s ReflectionAgentState, r string) ReflectionAgentState { s.Reflection = r; return s }, ) if err != nil { t.Fatalf("Failed: %v", err) } _, err = agent.Invoke(context.Background(), ReflectionAgentState{Messages: []llms.MessageContent{llms.TextParts(llms.ChatMessageTypeHuman, "Test")}}) if err != nil { t.Fatalf("Invoke failed: %v", err) } } // TestCreatePEVAgent tests the PEVAgent func TestCreatePEVAgent(t *testing.T) { mockLLM := &PEVMockLLM{ responses: []string{ "1. Step", `{"tool": "calculator", "tool_input": "2+2"}`, `{"is_successful": true, "reasoning": "Ok"}`, "Final", }, } config := PEVAgentConfig{Model: mockLLM, Tools: []tools.Tool{PEVMockTool{name: "calculator"}}} agent, err := CreatePEVAgent( config, func(s PEVAgentState) []llms.MessageContent { return s.Messages }, func(s PEVAgentState, m []llms.MessageContent) PEVAgentState { s.Messages = m; return s }, func(s PEVAgentState) []string { return s.Plan }, func(s PEVAgentState, p []string) PEVAgentState { s.Plan = p; return s }, func(s PEVAgentState) int { return s.CurrentStep }, func(s PEVAgentState, i int) PEVAgentState { s.CurrentStep = i; return s }, func(s PEVAgentState) string { return s.LastToolResult }, func(s PEVAgentState, r string) PEVAgentState { s.LastToolResult = r; return s }, func(s PEVAgentState) []string { return s.IntermediateSteps }, func(s PEVAgentState, steps []string) PEVAgentState { s.IntermediateSteps = steps; return s }, func(s PEVAgentState) int { return s.Retries }, func(s PEVAgentState, r int) PEVAgentState { s.Retries = r; return s }, func(s PEVAgentState) string { return s.VerificationResult }, func(s PEVAgentState, r string) PEVAgentState { s.VerificationResult = r; return s }, func(s PEVAgentState) string { return s.FinalAnswer }, func(s PEVAgentState, a string) PEVAgentState { s.FinalAnswer = a; return s }, ) if err != nil { t.Fatalf("Failed: %v", err) } _, err = agent.Invoke(context.Background(), PEVAgentState{Messages: []llms.MessageContent{llms.TextParts(llms.ChatMessageTypeHuman, "Test")}}) if err != nil { t.Fatalf("Invoke failed: %v", err) } } func TestTreeOfThoughtsAgent(t *testing.T) { config := TreeOfThoughtsConfig{ Generator: &SimpleThoughtGenerator{}, Evaluator: &SimpleThoughtEvaluator{}, InitialState: &SimpleThoughtState{isGoal: true, isValid: true, desc: "Goal"}, } agent, err := CreateTreeOfThoughtsAgent( config, func(s TreeOfThoughtsState) map[string]*SearchPath { return s.ActivePaths }, func(s TreeOfThoughtsState, p map[string]*SearchPath) TreeOfThoughtsState { s.ActivePaths = p; return s }, func(s TreeOfThoughtsState) string { return s.Solution }, func(s TreeOfThoughtsState, sol string) TreeOfThoughtsState { s.Solution = sol; return s }, func(s TreeOfThoughtsState) map[string]bool { return s.VisitedStates }, func(s TreeOfThoughtsState, v map[string]bool) TreeOfThoughtsState { s.VisitedStates = v; return s }, func(s TreeOfThoughtsState) int { return s.Iteration }, func(s TreeOfThoughtsState, i int) TreeOfThoughtsState { s.Iteration = i; return s }, ) if err != nil { t.Fatalf("Failed: %v", err) } _, err = agent.Invoke(context.Background(), TreeOfThoughtsState{}) if err != nil { t.Fatalf("Invoke failed: %v", err) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/tree_of_thoughts.go
prebuilt/tree_of_thoughts.go
package prebuilt import ( "context" "fmt" "github.com/smallnest/langgraphgo/graph" ) type ThoughtState interface { IsValid() bool IsGoal() bool GetDescription() string Hash() string } type ThoughtGenerator interface { Generate(ctx context.Context, current ThoughtState) ([]ThoughtState, error) } type ThoughtEvaluator interface { Evaluate(ctx context.Context, state ThoughtState, pathLength int) (float64, error) } type SearchPath struct { States []ThoughtState Score float64 } type TreeOfThoughtsConfig struct { Generator ThoughtGenerator Evaluator ThoughtEvaluator MaxDepth int MaxPaths int Verbose bool InitialState ThoughtState } // CreateTreeOfThoughtsAgentMap creates a ToT agent with map[string]any state func CreateTreeOfThoughtsAgentMap(config TreeOfThoughtsConfig) (*graph.StateRunnable[map[string]any], error) { if config.Generator == nil || config.Evaluator == nil || config.InitialState == nil { return nil, fmt.Errorf("generator, evaluator and initial state are required") } if config.MaxDepth == 0 { config.MaxDepth = 10 } if config.MaxPaths == 0 { config.MaxPaths = 5 } workflow := graph.NewStateGraph[map[string]any]() workflow.AddNode("initialize", "Initialize search", func(ctx context.Context, state map[string]any) (map[string]any, error) { initialPath := SearchPath{States: []ThoughtState{config.InitialState}, Score: 0} visited := map[string]bool{config.InitialState.Hash(): true} return map[string]any{ "active_paths": []SearchPath{initialPath}, "solution": nil, "visited_states": visited, "iteration": 0, }, nil }) workflow.AddNode("expand", "Expand paths", func(ctx context.Context, state map[string]any) (map[string]any, error) { activePaths, _ := state["active_paths"].([]SearchPath) visitedStates, ok := state["visited_states"].(map[string]bool) if !ok || visitedStates == nil { visitedStates = make(map[string]bool) } iteration, _ := state["iteration"].(int) var newPaths []SearchPath for _, path := range activePaths { currentState := path.States[len(path.States)-1] if currentState.IsGoal() { return map[string]any{"solution": path}, nil } if len(path.States) >= config.MaxDepth { continue } nextStates, _ := config.Generator.Generate(ctx, currentState) for _, next := range nextStates { if !next.IsValid() || visitedStates[next.Hash()] { continue } newPathStates := append([]ThoughtState{}, path.States...) newPathStates = append(newPathStates, next) newPaths = append(newPaths, SearchPath{States: newPathStates, Score: 0}) visitedStates[next.Hash()] = true } } return map[string]any{"active_paths": newPaths, "visited_states": visitedStates, "iteration": iteration + 1}, nil }) workflow.AddNode("evaluate", "Evaluate paths", func(ctx context.Context, state map[string]any) (map[string]any, error) { activePaths, _ := state["active_paths"].([]SearchPath) for i := range activePaths { last := activePaths[i].States[len(activePaths[i].States)-1] score, _ := config.Evaluator.Evaluate(ctx, last, len(activePaths[i].States)) activePaths[i].Score = score } // Sort and prune (simple implementation) var pruned []SearchPath for _, p := range activePaths { if p.Score >= 0 { pruned = append(pruned, p) } } // Keep top MaxPaths (simplified) if len(pruned) > config.MaxPaths { pruned = pruned[:config.MaxPaths] } return map[string]any{"active_paths": pruned}, nil }) workflow.SetEntryPoint("initialize") workflow.AddEdge("initialize", "expand") workflow.AddConditionalEdge("expand", func(ctx context.Context, state map[string]any) string { if s, ok := state["solution"].(SearchPath); ok && s.States != nil { return graph.END } if p, _ := state["active_paths"].([]SearchPath); len(p) == 0 { return graph.END } if iter, _ := state["iteration"].(int); iter >= config.MaxDepth { return graph.END } return "evaluate" }) workflow.AddConditionalEdge("evaluate", func(ctx context.Context, state map[string]any) string { if p, _ := state["active_paths"].([]SearchPath); len(p) == 0 { return graph.END } return "expand" }) return workflow.Compile() } // CreateTreeOfThoughtsAgent creates a generic Tree of Thoughts Agent func CreateTreeOfThoughtsAgent[S any]( config TreeOfThoughtsConfig, getActivePaths func(S) map[string]*SearchPath, setActivePaths func(S, map[string]*SearchPath) S, getSolution func(S) string, setSolution func(S, string) S, getVisited func(S) map[string]bool, setVisited func(S, map[string]bool) S, getIteration func(S) int, setIteration func(S, int) S, ) (*graph.StateRunnable[S], error) { if config.Generator == nil || config.Evaluator == nil || config.InitialState == nil { return nil, fmt.Errorf("generator, evaluator and initial state are required") } if config.MaxDepth == 0 { config.MaxDepth = 10 } if config.MaxPaths == 0 { config.MaxPaths = 5 } workflow := graph.NewStateGraph[S]() workflow.AddNode("initialize", "Initialize search", func(ctx context.Context, state S) (S, error) { initialPath := SearchPath{States: []ThoughtState{config.InitialState}, Score: 0} paths := map[string]*SearchPath{"initial": &initialPath} visited := map[string]bool{config.InitialState.Hash(): true} state = setActivePaths(state, paths) state = setVisited(state, visited) state = setIteration(state, 0) return state, nil }) workflow.AddNode("expand", "Expand paths", func(ctx context.Context, state S) (S, error) { activePaths := getActivePaths(state) visitedStates := getVisited(state) iteration := getIteration(state) newPaths := make(map[string]*SearchPath) for id, path := range activePaths { currentState := path.States[len(path.States)-1] if currentState.IsGoal() { state = setSolution(state, "Goal reached in path: "+id) return state, nil } if len(path.States) >= config.MaxDepth { continue } nextStates, _ := config.Generator.Generate(ctx, currentState) for i, next := range nextStates { if !next.IsValid() || visitedStates[next.Hash()] { continue } newPathStates := append([]ThoughtState{}, path.States...) newPathStates = append(newPathStates, next) newPaths[fmt.Sprintf("%s-%d", id, i)] = &SearchPath{States: newPathStates, Score: 0} visitedStates[next.Hash()] = true } } state = setActivePaths(state, newPaths) state = setVisited(state, visitedStates) state = setIteration(state, iteration+1) return state, nil }) workflow.AddNode("evaluate", "Evaluate paths", func(ctx context.Context, state S) (S, error) { activePaths := getActivePaths(state) for _, path := range activePaths { last := path.States[len(path.States)-1] score, _ := config.Evaluator.Evaluate(ctx, last, len(path.States)) path.Score = score } // Simplified pruning and top-k state = setActivePaths(state, activePaths) // Update state return state, nil }) workflow.SetEntryPoint("initialize") workflow.AddEdge("initialize", "expand") workflow.AddConditionalEdge("expand", func(ctx context.Context, state S) string { if getSolution(state) != "" { return graph.END } if len(getActivePaths(state)) == 0 { return graph.END } if getIteration(state) >= config.MaxDepth { return graph.END } return "evaluate" }) workflow.AddConditionalEdge("evaluate", func(ctx context.Context, state S) string { if len(getActivePaths(state)) == 0 { return graph.END } return "expand" }) return workflow.Compile() }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/planning_agent.go
prebuilt/planning_agent.go
package prebuilt import ( "context" "encoding/json" "fmt" "regexp" "strings" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // CreatePlanningAgentMap creates a planning agent with map[string]any state func CreatePlanningAgentMap(model llms.Model, availableNodes []graph.TypedNode[map[string]any], inputTools []tools.Tool, opts ...CreateAgentOption) (*graph.StateRunnable[map[string]any], error) { options := &CreateAgentOptions{} for _, opt := range opts { opt(options) } nodeMap := make(map[string]graph.TypedNode[map[string]any]) for _, node := range availableNodes { nodeMap[node.Name] = node } workflow := graph.NewStateGraph[map[string]any]() agentSchema := graph.NewMapSchema() agentSchema.RegisterReducer("messages", graph.AppendReducer) agentSchema.RegisterReducer("workflow_plan", graph.OverwriteReducer) workflow.SetSchema(agentSchema) workflow.AddNode("planner", "Generates workflow plan", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages, ok := state["messages"].([]llms.MessageContent) if !ok { return nil, fmt.Errorf("messages not found") } nodeDescriptions := buildPlanningNodeDescriptions(availableNodes) planningPrompt := buildPlanningPrompt(nodeDescriptions) planningMessages := []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(planningPrompt)}}, } planningMessages = append(planningMessages, messages...) resp, err := model.GenerateContent(ctx, planningMessages) if err != nil { return nil, err } planText := resp.Choices[0].Content workflowPlan, err := parseWorkflowPlan(planText) if err != nil { return nil, err } aiMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart(fmt.Sprintf("Workflow plan created with %d nodes and %d edges", len(workflowPlan.Nodes), len(workflowPlan.Edges)))}, } return map[string]any{ "messages": []llms.MessageContent{aiMsg}, "workflow_plan": workflowPlan, }, nil }) workflow.AddNode("executor", "Executes the planned workflow", func(ctx context.Context, state map[string]any) (map[string]any, error) { workflowPlan, ok := state["workflow_plan"].(*WorkflowPlan) if !ok { return nil, fmt.Errorf("workflow_plan not found in state") } dynamicWorkflow := graph.NewStateGraph[map[string]any]() dynamicSchema := graph.NewMapSchema() dynamicSchema.RegisterReducer("messages", graph.AppendReducer) dynamicWorkflow.SetSchema(dynamicSchema) for _, planNode := range workflowPlan.Nodes { if planNode.Name == "START" || planNode.Name == "END" { continue } actualNode, exists := nodeMap[planNode.Name] if !exists { return nil, fmt.Errorf("node %s not found", planNode.Name) } dynamicWorkflow.AddNode(actualNode.Name, actualNode.Description, actualNode.Function) } var entryPoint string endNodes := make(map[string]bool) for _, edge := range workflowPlan.Edges { if edge.From == "START" { entryPoint = edge.To continue } if edge.To == "END" { endNodes[edge.From] = true continue } if edge.Condition != "" { dynamicWorkflow.AddConditionalEdge(edge.From, func(ctx context.Context, state map[string]any) string { return edge.To }) } else { dynamicWorkflow.AddEdge(edge.From, edge.To) } } for nodeName := range endNodes { dynamicWorkflow.AddEdge(nodeName, graph.END) } if entryPoint == "" { return nil, fmt.Errorf("no entry point in plan") } dynamicWorkflow.SetEntryPoint(entryPoint) runnable, err := dynamicWorkflow.Compile() if err != nil { return nil, err } return runnable.Invoke(ctx, state) }) workflow.SetEntryPoint("planner") workflow.AddEdge("planner", "executor") workflow.AddEdge("executor", graph.END) return workflow.Compile() } // CreatePlanningAgent creates a generic planning agent func CreatePlanningAgent[S any]( model llms.Model, availableNodes []graph.TypedNode[S], getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getPlan func(S) *WorkflowPlan, setPlan func(S, *WorkflowPlan) S, opts ...CreateAgentOption, ) (*graph.StateRunnable[S], error) { options := &CreateAgentOptions{} for _, opt := range opts { opt(options) } nodeMap := make(map[string]graph.TypedNode[S]) for _, node := range availableNodes { nodeMap[node.Name] = node } workflow := graph.NewStateGraph[S]() workflow.AddNode("planner", "Generates workflow plan", func(ctx context.Context, state S) (S, error) { messages := getMessages(state) if len(messages) == 0 { return state, fmt.Errorf("no messages found in state") } nodeDescriptions := buildPlanningNodeDescriptions(availableNodes) planningPrompt := buildPlanningPrompt(nodeDescriptions) planningMessages := []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(planningPrompt)}}, } planningMessages = append(planningMessages, messages...) resp, err := model.GenerateContent(ctx, planningMessages) if err != nil { return state, err } planText := resp.Choices[0].Content workflowPlan, err := parseWorkflowPlan(planText) if err != nil { return state, err } aiMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart(fmt.Sprintf("Workflow plan created with %d nodes and %d edges", len(workflowPlan.Nodes), len(workflowPlan.Edges)))}, } state = setMessages(state, append(messages, aiMsg)) state = setPlan(state, workflowPlan) return state, nil }) workflow.AddNode("executor", "Executes the planned workflow", func(ctx context.Context, state S) (S, error) { workflowPlan := getPlan(state) if workflowPlan == nil { return state, fmt.Errorf("workflow_plan not found in state") } dynamicWorkflow := graph.NewStateGraph[S]() // Note: We can't easily use Schema here without knowing more about S // So we assume nodes handle their own state merging if needed or S is simple for _, planNode := range workflowPlan.Nodes { if planNode.Name == "START" || planNode.Name == "END" { continue } actualNode, exists := nodeMap[planNode.Name] if !exists { return state, fmt.Errorf("node %s not found", planNode.Name) } dynamicWorkflow.AddNode(actualNode.Name, actualNode.Description, actualNode.Function) } var entryPoint string endNodes := make(map[string]bool) for _, edge := range workflowPlan.Edges { if edge.From == "START" { entryPoint = edge.To continue } if edge.To == "END" { endNodes[edge.From] = true continue } if edge.Condition != "" { dynamicWorkflow.AddConditionalEdge(edge.From, func(ctx context.Context, s S) string { return edge.To }) } else { dynamicWorkflow.AddEdge(edge.From, edge.To) } } for nodeName := range endNodes { dynamicWorkflow.AddEdge(nodeName, graph.END) } if entryPoint == "" { return state, fmt.Errorf("no entry point in plan") } dynamicWorkflow.SetEntryPoint(entryPoint) runnable, err := dynamicWorkflow.Compile() if err != nil { return state, err } return runnable.Invoke(ctx, state) }) workflow.SetEntryPoint("planner") workflow.AddEdge("planner", "executor") workflow.AddEdge("executor", graph.END) return workflow.Compile() } func buildPlanningNodeDescriptions[S any](nodes []graph.TypedNode[S]) string { var sb strings.Builder sb.WriteString("Available nodes:\n") for i, node := range nodes { sb.WriteString(fmt.Sprintf("%d. %s: %s\n", i+1, node.Name, node.Description)) } return sb.String() } func buildPlanningPrompt(nodeDescriptions string) string { return fmt.Sprintf(`You are a workflow planning assistant. Based on the user's request, create a workflow plan using the available nodes. %s Generate a workflow plan in the following JSON format: { "nodes": [ {"name": "node_name", "type": "process"} ], "edges": [ {"from": "START", "to": "first_node"}, {"from": "first_node", "to": "second_node"}, {"from": "last_node", "to": "END"} ] } Rules: 1. The workflow must start with an edge from "START" 2. The workflow must end with an edge to "END" 3. Only use nodes from the available nodes list 4. Each node should appear in the nodes array 5. Create a logical flow based on the user's request 6. Return ONLY the JSON object, no additional text`, nodeDescriptions) } func parseWorkflowPlan(planText string) (*WorkflowPlan, error) { jsonText := extractJSON(planText) var plan WorkflowPlan if err := json.Unmarshal([]byte(jsonText), &plan); err != nil { return nil, fmt.Errorf("failed to parse JSON: %w", err) } if len(plan.Nodes) == 0 || len(plan.Edges) == 0 { return nil, fmt.Errorf("invalid plan") } return &plan, nil } func extractJSON(text string) string { codeBlockRegex := regexp.MustCompile("(?s)```(?:json)?\\s*({.*?})\\s*```") matches := codeBlockRegex.FindStringSubmatch(text) if len(matches) > 1 { return matches[1] } jsonRegex := regexp.MustCompile("(?s){.*}") matches = jsonRegex.FindStringSubmatch(text) if len(matches) > 0 { return matches[0] } return text }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/tool_executor_test.go
prebuilt/tool_executor_test.go
package prebuilt import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/tmc/langchaingo/tools" ) // MockTool implements tools.Tool for testing type MockTool struct { name string } func (t *MockTool) Name() string { return t.name } func (t *MockTool) Description() string { return "A mock tool" } func (t *MockTool) Call(ctx context.Context, input string) (string, error) { return "Executed " + t.name + " with " + input, nil } func TestToolExecutor(t *testing.T) { mockTool := &MockTool{name: "test-tool"} executor := NewToolExecutor([]tools.Tool{mockTool}) // Test single invocation inv := ToolInvocation{ Tool: "test-tool", ToolInput: "input", } res, err := executor.Execute(context.Background(), inv) assert.NoError(t, err) assert.Equal(t, "Executed test-tool with input", res) // Test ToolNode with struct resNode, err := executor.ToolNode(context.Background(), inv) assert.NoError(t, err) assert.Equal(t, "Executed test-tool with input", resNode) // Test ToolNode with map mapState := map[string]any{ "tool": "test-tool", "tool_input": "map-input", } resMap, err := executor.ToolNode(context.Background(), mapState) assert.NoError(t, err) assert.Equal(t, "Executed test-tool with map-input", resMap) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/tool_node_test.go
prebuilt/tool_node_test.go
package prebuilt import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) func TestToolNodeMap(t *testing.T) { mockTool := &MockTool{name: "test-tool"} executor := NewToolExecutor([]tools.Tool{mockTool}) node := ToolNodeMap(executor) // Construct state with AIMessage containing ToolCall toolCall := llms.ToolCall{ ID: "call_1", Type: "function", FunctionCall: &llms.FunctionCall{ Name: "test-tool", Arguments: `{"input": "test-input"}`, }, } aiMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{ toolCall, }, } state := map[string]any{ "messages": []llms.MessageContent{aiMsg}, } // Invoke ToolNode res, err := node(context.Background(), state) assert.NoError(t, err) msgs, ok := res["messages"].([]llms.MessageContent) assert.True(t, ok) assert.Len(t, msgs, 1) toolMsg := msgs[0] assert.Equal(t, llms.ChatMessageTypeTool, toolMsg.Role) assert.Len(t, toolMsg.Parts, 1) toolResp, ok := toolMsg.Parts[0].(llms.ToolCallResponse) assert.True(t, ok) assert.Equal(t, "call_1", toolResp.ToolCallID) assert.Equal(t, "test-tool", toolResp.Name) assert.Equal(t, "Executed test-tool with test-input", toolResp.Content) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/pev_agent.go
prebuilt/pev_agent.go
package prebuilt import ( "context" "encoding/json" "fmt" "strings" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // PEVAgentConfig configures the PEV (Plan, Execute, Verify) agent type PEVAgentConfig struct { Model llms.Model Tools []tools.Tool MaxRetries int SystemMessage string VerificationPrompt string Verbose bool } // VerificationResult represents the result of verification type VerificationResult struct { IsSuccessful bool `json:"is_successful"` Reasoning string `json:"reasoning"` } // CreatePEVAgentMap creates a new PEV Agent with map[string]any state func CreatePEVAgentMap(config PEVAgentConfig) (*graph.StateRunnable[map[string]any], error) { if config.Model == nil { return nil, fmt.Errorf("model is required") } if config.MaxRetries == 0 { config.MaxRetries = 3 } if config.SystemMessage == "" { config.SystemMessage = buildPEVDefaultPlannerPrompt() } if config.VerificationPrompt == "" { config.VerificationPrompt = buildPEVDefaultVerificationPrompt() } toolExecutor := NewToolExecutor(config.Tools) workflow := graph.NewStateGraph[map[string]any]() agentSchema := graph.NewMapSchema() agentSchema.RegisterReducer("messages", graph.AppendReducer) agentSchema.RegisterReducer("intermediate_steps", graph.AppendReducer) workflow.SetSchema(agentSchema) workflow.AddNode("planner", "Create or revise execution plan", func(ctx context.Context, state map[string]any) (map[string]any, error) { retries, _ := state["retries"].(int) messages, ok := state["messages"].([]llms.MessageContent) if !ok || len(messages) == 0 { return nil, fmt.Errorf("no messages found") } var promptMessages []llms.MessageContent if retries == 0 { promptMessages = append([]llms.MessageContent{{Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.SystemMessage)}}}, messages...) } else { lastResult, _ := state["last_tool_result"].(string) vResult, _ := state["verification_result"].(string) replanPrompt := fmt.Sprintf("Previous failed verification. New plan needed.\nRequest: %s\nLast result: %s\nFeedback: %s", getPEVOriginalRequest(messages), lastResult, vResult) promptMessages = []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.SystemMessage)}}, {Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(replanPrompt)}}, } } resp, err := config.Model.GenerateContent(ctx, promptMessages) if err != nil { return nil, err } steps := parsePEVPlanSteps(resp.Choices[0].Content) return map[string]any{"plan": steps, "current_step": 0}, nil }) workflow.AddNode("executor", "Execute step", func(ctx context.Context, state map[string]any) (map[string]any, error) { plan, _ := state["plan"].([]string) currentStep, _ := state["current_step"].(int) if currentStep >= len(plan) { return nil, fmt.Errorf("step out of bounds") } stepDesc := plan[currentStep] result, err := executePEVStep(ctx, stepDesc, toolExecutor, config.Model) if err != nil { result = fmt.Sprintf("Error: %v", err) } return map[string]any{ "last_tool_result": result, "intermediate_steps": []string{fmt.Sprintf("Step %d: %s -> %s", currentStep+1, stepDesc, result)}, }, nil }) workflow.AddNode("verifier", "Verify result", func(ctx context.Context, state map[string]any) (map[string]any, error) { lastResult, _ := state["last_tool_result"].(string) plan, _ := state["plan"].([]string) currentStep, _ := state["current_step"].(int) stepDesc := plan[currentStep] verifyPrompt := fmt.Sprintf("Verify: Action: %s\nResult: %s", stepDesc, lastResult) promptMessages := []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.VerificationPrompt)}}, {Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(verifyPrompt)}}, } resp, err := config.Model.GenerateContent(ctx, promptMessages) if err != nil { return nil, err } var vResult VerificationResult _ = json.Unmarshal([]byte(extractPEVJSON(resp.Choices[0].Content)), &vResult) return map[string]any{"verification_result": vResult.Reasoning, "is_successful": vResult.IsSuccessful}, nil }) workflow.AddNode("synthesizer", "Synthesize final answer", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages, _ := state["messages"].([]llms.MessageContent) steps, _ := state["intermediate_steps"].([]string) prompt := fmt.Sprintf("Synthesize: Request: %s\nSteps: %s", getPEVOriginalRequest(messages), strings.Join(steps, "\n")) resp, err := config.Model.GenerateContent(ctx, []llms.MessageContent{{Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(prompt)}}}) if err != nil { return nil, err } answer := resp.Choices[0].Content return map[string]any{ "messages": []llms.MessageContent{{Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart(answer)}}}, "final_answer": answer, }, nil }) workflow.SetEntryPoint("planner") workflow.AddConditionalEdge("planner", func(ctx context.Context, state map[string]any) string { if p, ok := state["plan"].([]string); ok && len(p) > 0 { return "executor" } return graph.END }) workflow.AddEdge("executor", "verifier") workflow.AddConditionalEdge("verifier", func(ctx context.Context, state map[string]any) string { success, _ := state["is_successful"].(bool) currentStep, _ := state["current_step"].(int) plan, _ := state["plan"].([]string) if success { if currentStep+1 >= len(plan) { return "synthesizer" } state["current_step"] = currentStep + 1 return "executor" } retries, _ := state["retries"].(int) if retries >= config.MaxRetries { return "synthesizer" } state["retries"] = retries + 1 return "planner" }) workflow.AddEdge("synthesizer", graph.END) return workflow.Compile() } // CreatePEVAgent creates a generic PEV Agent func CreatePEVAgent[S any]( config PEVAgentConfig, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getPlan func(S) []string, setPlan func(S, []string) S, getCurrentStep func(S) int, setCurrentStep func(S, int) S, getLastToolResult func(S) string, setLastToolResult func(S, string) S, getIntermediateSteps func(S) []string, setIntermediateSteps func(S, []string) S, getRetries func(S) int, setRetries func(S, int) S, getVerificationResult func(S) string, setVerificationResult func(S, string) S, getFinalAnswer func(S) string, setFinalAnswer func(S, string) S, ) (*graph.StateRunnable[S], error) { if config.Model == nil { return nil, fmt.Errorf("model is required") } if config.MaxRetries == 0 { config.MaxRetries = 3 } if config.SystemMessage == "" { config.SystemMessage = buildPEVDefaultPlannerPrompt() } if config.VerificationPrompt == "" { config.VerificationPrompt = buildPEVDefaultVerificationPrompt() } toolExecutor := NewToolExecutor(config.Tools) workflow := graph.NewStateGraph[S]() workflow.AddNode("planner", "Create or revise execution plan", func(ctx context.Context, state S) (S, error) { retries := getRetries(state) messages := getMessages(state) if len(messages) == 0 { return state, fmt.Errorf("no messages") } var promptMessages []llms.MessageContent if retries == 0 { promptMessages = append([]llms.MessageContent{{Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.SystemMessage)}}}, messages...) } else { replanPrompt := fmt.Sprintf("Re-plan: Request: %s\nLast result: %s\nFeedback: %s", getPEVOriginalRequest(messages), getLastToolResult(state), getVerificationResult(state)) promptMessages = []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.SystemMessage)}}, {Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(replanPrompt)}}, } } resp, err := config.Model.GenerateContent(ctx, promptMessages) if err != nil { return state, err } state = setPlan(state, parsePEVPlanSteps(resp.Choices[0].Content)) state = setCurrentStep(state, 0) return state, nil }) workflow.AddNode("executor", "Execute step", func(ctx context.Context, state S) (S, error) { plan := getPlan(state) currentStep := getCurrentStep(state) if currentStep >= len(plan) { return state, fmt.Errorf("out of bounds") } result, err := executePEVStep(ctx, plan[currentStep], toolExecutor, config.Model) if err != nil { result = "Error: " + err.Error() } state = setLastToolResult(state, result) state = setIntermediateSteps(state, append(getIntermediateSteps(state), fmt.Sprintf("Step %d: %s -> %s", currentStep+1, plan[currentStep], result))) return state, nil }) workflow.AddNode("verifier", "Verify result", func(ctx context.Context, state S) (S, error) { prompt := fmt.Sprintf("Verify: Action: %s\nResult: %s", getPlan(state)[getCurrentStep(state)], getLastToolResult(state)) resp, err := config.Model.GenerateContent(ctx, []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.VerificationPrompt)}}, {Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(prompt)}}, }) if err != nil { return state, err } var vResult VerificationResult _ = json.Unmarshal([]byte(extractPEVJSON(resp.Choices[0].Content)), &vResult) // We need a way to pass isSuccessful to the router. For generic S, we can't easily add a field. // So we encode it in VerificationResult string or assume the state can hold it. if vResult.IsSuccessful { state = setVerificationResult(state, "SUCCESS: "+vResult.Reasoning) } else { state = setVerificationResult(state, "FAILED: "+vResult.Reasoning) } return state, nil }) workflow.AddNode("synthesizer", "Synthesize final answer", func(ctx context.Context, state S) (S, error) { prompt := fmt.Sprintf("Synthesize: Request: %s\nSteps: %s", getPEVOriginalRequest(getMessages(state)), strings.Join(getIntermediateSteps(state), "\n")) resp, err := config.Model.GenerateContent(ctx, []llms.MessageContent{{Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(prompt)}}}) if err != nil { return state, err } answer := resp.Choices[0].Content state = setMessages(state, append(getMessages(state), llms.MessageContent{Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart(answer)}})) state = setFinalAnswer(state, answer) return state, nil }) workflow.SetEntryPoint("planner") workflow.AddConditionalEdge("planner", func(ctx context.Context, state S) string { if len(getPlan(state)) > 0 { return "executor" } return graph.END }) workflow.AddEdge("executor", "verifier") workflow.AddConditionalEdge("verifier", func(ctx context.Context, state S) string { vResult := getVerificationResult(state) success := strings.HasPrefix(vResult, "SUCCESS:") currentStep := getCurrentStep(state) plan := getPlan(state) if success { if currentStep+1 >= len(plan) { return "synthesizer" } setCurrentStep(state, currentStep+1) return "executor" } retries := getRetries(state) if retries >= config.MaxRetries { return "synthesizer" } setRetries(state, retries+1) return "planner" }) workflow.AddEdge("synthesizer", graph.END) return workflow.Compile() } func parsePEVPlanSteps(planText string) []string { var steps []string for line := range strings.SplitSeq(planText, "\n") { if line = strings.TrimSpace(line); line != "" { steps = append(steps, line) } } return steps } func executePEVStep(ctx context.Context, step string, te *ToolExecutor, model llms.Model) (string, error) { if te == nil || len(te.Tools) == 0 { return "Error: No tools", nil } var toolsInfo strings.Builder for name, tool := range te.Tools { toolsInfo.WriteString(fmt.Sprintf("- %s: %s\n", name, tool.Description())) } prompt := fmt.Sprintf("Select tool for: %s\nTools:\n%s\nReturn JSON: {\"tool\": \"name\", \"tool_input\": \"input\"}", step, toolsInfo.String()) resp, err := model.GenerateContent(ctx, []llms.MessageContent{{Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(prompt)}}}) if err != nil { return "", err } var inv ToolInvocation if err := json.Unmarshal([]byte(extractPEVJSON(resp.Choices[0].Content)), &inv); err != nil { return "", err } return te.Execute(ctx, inv) } func extractPEVJSON(text string) string { start := strings.Index(text, "{") end := strings.LastIndex(text, "}") if start != -1 && end != -1 { return text[start : end+1] } return text } func getPEVOriginalRequest(messages []llms.MessageContent) string { for _, m := range messages { if m.Role == llms.ChatMessageTypeHuman { for _, p := range m.Parts { if t, ok := p.(llms.TextContent); ok { return t.Text } } } } return "" } func buildPEVDefaultPlannerPrompt() string { return "Expert planner. Break request into numbered steps." } func buildPEVDefaultVerificationPrompt() string { return "Verification specialist. Determine success/failure. Return JSON with is_successful and reasoning." }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/doc.go
prebuilt/doc.go
// Package prebuilt provides ready-to-use agent implementations for common AI patterns. // // This package offers a collection of pre-built agents that implement various reasoning // and execution patterns, from simple tool-using agents to complex multi-agent systems. // Each agent is implemented using the core graph package and can be easily customized // or extended for specific use cases. // // # Available Agents // // ## ReAct Agent (Reason + Act) // The ReAct agent combines reasoning and acting by having the model think about what to do, // choose tools to use, and act on the results. It's suitable for general-purpose tasks. // // import ( // "github.com/smallnest/langgraphgo/prebuilt" // "github.com/tmc/langchaingo/llms" // "github.com/tmc/langchaingo/tools" // ) // // // Create a ReAct agent with tools // agent, err := prebuilt.CreateReactAgent( // llm, // Language model // []tools.Tool{ // Available tools // &tools.CalculatorTool{}, // weatherTool, // }, // 10, // Max iterations // ) // // // Execute agent // result, err := agent.Invoke(ctx, map[string]any{ // "messages": []llms.MessageContent{ // { // Role: llms.ChatMessageTypeHuman, // Parts: []llms.ContentPart{ // llms.TextPart("What's the weather in London and calculate 15% of 100?"), // }, // }, // }, // }) // // ## Typed ReAct Agent // A type-safe version of the ReAct agent using Go generics: // // type AgentState struct { // Messages []llms.MessageContent `json:"messages"` // IterationCount int `json:"iteration_count"` // } // // agent, err := prebuilt.CreateReactAgentTyped[AgentState]( // llm, // tools, // 10, // func() AgentState { return AgentState{} }, // ) // // ## Supervisor Agent // Orchestrates multiple specialized agents, routing tasks to the appropriate agent: // // // Create specialized agents // weatherAgent, _ := prebuilt.CreateReactAgent(llm, weatherTools, 5) // calcAgent, _ := prebuilt.CreateReactAgent(llm, calcTools, 5) // searchAgent, _ := prebuilt.CreateReactAgent(llm, searchTools, 5) // // // Create supervisor // members := map[string]*graph.StateRunnableUntyped // "weather": weatherAgent, // "calculator": calcAgent, // "search": searchAgent, // } // // supervisor, err := prebuilt.CreateSupervisor( // llm, // members, // "Router", // Router agent name // ) // // // Use supervisor to route tasks // result, err := supervisor.Invoke(ctx, map[string]any{ // "messages": []llms.MessageContent{ // { // Role: llms.ChatMessageTypeHuman, // Parts: []llms.ContentPart{ // llms.TextPart("Calculate the distance between London and Paris"), // }, // }, // }, // }) // // ## Planning Agent // Creates and executes plans for complex tasks: // // planner, err := prebuilt.CreatePlanningAgent( // llm, // planningTools, // executionTools, // ) // // // The agent will create a plan, then execute each step // result, err := planner.Invoke(ctx, map[string]any{ // "messages": []llms.MessageContent{ // { // Role: llms.ChatMessageTypeHuman, // Parts: []llms.ContentPart{ // llms.TextPart("Plan and execute a research report on renewable energy"), // }, // }, // }, // }) // // ## Reflection Agent // Uses self-reflection to improve responses: // // reflectionAgent, err := prebuilt.CreateReflectionAgent( // llm, // tools, // ) // // // The agent will reflect on and potentially revise its answers // result, err := reflectionAgent.Invoke(ctx, map[string]any{ // "messages": []llms.MessageContent{ // { // Role: llms.ChatMessageTypeHuman, // Parts: []llms.ContentPart{ // llms.TextPart("Explain quantum computing"), // }, // }, // }, // }) // // ## Tree of Thoughts Agent // Explores multiple reasoning paths before choosing the best: // // totAgent, err := prebuilt.CreateTreeOfThoughtsAgent( // llm, // 3, // Number of thoughts to generate // 5, // Maximum steps // ) // // // The agent will generate and evaluate multiple reasoning paths // result, err := totAgent.Invoke(ctx, map[string]any{ // "messages": []llms.MessageContent{ // { // Role: llms.ChatMessageTypeHuman, // Parts: []llms.ContentPart{ // llms.TextPart("Solve this complex math problem step by step"), // }, // }, // }, // }) // // # RAG (Retrieval-Augmented Generation) // // ## Basic RAG Agent // Combines document retrieval with generation: // // rag, err := prebuilt.CreateRAGAgent( // llm, // documentLoader, // Loads documents // textSplitter, // Splits text into chunks // embedder, // Creates embeddings // vectorStore, // Stores and searches embeddings // 5, // Number of documents to retrieve // ) // // // The agent will retrieve relevant documents and generate answers // result, err := rag.Invoke(ctx, map[string]any{ // "messages": []llms.MessageContent{ // { // Role: llms.ChatMessageTypeHuman, // Parts: []llms.ContentPart{ // llms.TextPart("What are the benefits of renewable energy?"), // }, // }, // }, // }) // // ## Advanced RAG with Conditional Processing // // rag, err := prebuilt.CreateConditionalRAGAgent( // llm, // loader, // splitter, // embedder, // vectorStore, // 3, // Retrieve count // // Condition function to decide whether to use RAG // func(ctx context.Context, query string) bool { // return len(strings.Fields(query)) > 5 // }, // ) // // # Chat Agent // For conversational applications: // // chatAgent, err := prebuilt.CreateChatAgent( // llm, // systemPrompt, // Optional system prompt // memory, // Memory for conversation history // ) // // // The agent maintains conversation context // result, err := chatAgent.Invoke(ctx, map[string]any{ // "messages": []llms.MessageContent{ // { // Role: llms.ChatMessageTypeHuman, // Parts: []llms.ContentPart{ // llms.TextPart("Hello! How are you?"), // }, // }, // }, // }) // // # Custom Tools // // Create custom tools for agents: // // type WeatherTool struct{} // // func (t *WeatherTool) Name() string { return "get_weather" } // func (t *WeatherTool) Description() string { // return "Get current weather for a city" // } // // func (t *WeatherTool) Call(ctx context.Context, input string) (string, error) { // // Parse the city from input // var data struct { // City string `json:"city"` // } // if err := json.Unmarshal([]byte(input), &data); err != nil { // return "", err // } // // // Call weather API // // Implementation here... // // return fmt.Sprintf("The weather in %s is 22°C and sunny", data.City), nil // } // // // Use with any agent // weatherTool := &WeatherTool{} // agent, err := prebuilt.CreateReactAgent(llm, []tools.Tool{weatherTool}, 10) // // # Agent Configuration // // Most agents support configuration through options: // // agent, err := prebuilt.CreateReactAgent(llm, tools, 10, // prebuilt.WithMaxTokens(4000), // prebuilt.WithTemperature(0.7), // prebuilt.WithStreaming(true), // prebuilt.WithCheckpointing(checkpointer), // prebuilt.WithMemory(memory), // ) // // # Streaming Support // // Enable real-time streaming of agent thoughts and actions: // // // Create streaming agent // agent, _ := prebuilt.CreateReactAgent(llm, tools, 10) // streaming := prebuilt.NewStreamingAgent(agent) // // // Stream execution // stream, _ := streaming.Stream(ctx, input) // for event := range stream.Events { // fmt.Printf("Event: %v\n", event) // } // // # Memory Integration // // Agents can integrate with various memory strategies: // // import "github.com/smallnest/langgraphgo/memory" // // // Use buffer memory // bufferMemory := memory.NewBufferMemory(100) // agent, _ := prebuilt.CreateChatAgent(llm, "", bufferMemory) // // // Use summarization memory // summMemory := memory.NewSummarizationMemory(llm, 2000) // agent, _ := prebuilt.CreateChatAgent(llm, "", summMemory) // // # Best Practices // // 1. Choose the right agent pattern for your use case // 2. Provide clear tool descriptions and examples // 3. Set appropriate iteration limits to prevent infinite loops // 4. Use memory for conversational applications // 5. Enable streaming for better user experience // 6. Use checkpointing for long-running tasks // 7. Test with various input patterns // 8. Monitor token usage and costs // // # Error Handling // // Agents include built-in error handling: // // - Tool execution failures // - LLM API errors // - Timeout protection // - Iteration limit enforcement // - Graceful degradation strategies // // # Performance Considerations // // - Use typed agents for better performance // - Cache tool results when appropriate // - Batch tool calls when possible // - Monitor resource usage // - Consider parallel execution for independent tasks package prebuilt
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/chat_agent_test.go
prebuilt/chat_agent_test.go
package prebuilt import ( "context" "strings" "testing" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // MockModel is a simple mock for llms.Model type MockModel struct { responses []string callCount int } func (m *MockModel) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { resp := "" if m.callCount < len(m.responses) { resp = m.responses[m.callCount] } else { resp = "default response" } m.callCount++ // Parse options to check for streaming opts := llms.CallOptions{} for _, opt := range options { opt(&opts) } // If streaming function is provided, call it with chunks if opts.StreamingFunc != nil { // Simulate streaming by sending response in small chunks words := splitIntoWords(resp) for i, word := range words { chunk := word if i < len(words)-1 { chunk += " " } if err := opts.StreamingFunc(ctx, []byte(chunk)); err != nil { return nil, err } } } return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ {Content: resp}, }, }, nil } func (m *MockModel) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", nil } func TestChatAgent(t *testing.T) { // Setup mock model mockModel := &MockModel{ responses: []string{ "Hello! I am a bot.", "I remember you said hi.", }, } // Create ChatAgent agent, err := NewChatAgent(mockModel, nil) if err != nil { t.Fatalf("Failed to create ChatAgent: %v", err) } ctx := context.Background() // 1. Test first turn resp1, err := agent.Chat(ctx, "Hi") if err != nil { t.Errorf("Chat failed: %v", err) } if !strings.Contains(resp1, "Hello") { t.Errorf("Expected greeting, got: %s", resp1) } // 2. Test second turn (memory) // Note: The mock model itself doesn't actually "remember" in this simple implementation, // but the agent infrastructure should retrieve history. // To verify memory, we'd ideally check the input messages to the model in a real integration test or a more sophisticated mock. // For this unit test, we just verify the flow works and the thread ID persists. threadID1 := agent.ThreadID() if threadID1 == "" { t.Error("ThreadID should be set") } resp2, err := agent.Chat(ctx, "Do you remember me?") if err != nil { t.Errorf("Chat failed: %v", err) } if resp2 == "" { t.Error("Expected response, got empty") } if agent.ThreadID() != threadID1 { t.Error("ThreadID should be consistent across calls") } } func TestChatAgent_DynamicTools(t *testing.T) { // Setup mock model mockModel := &MockModel{ responses: []string{"Response"}, } // Create ChatAgent without initial tools agent, err := NewChatAgent(mockModel, nil) if err != nil { t.Fatalf("Failed to create ChatAgent: %v", err) } // Test initial state - no dynamic tools if len(agent.GetTools()) != 0 { t.Errorf("Expected 0 tools initially, got %d", len(agent.GetTools())) } // Test AddTool tool1 := &MockTool{name: "tool1"} agent.AddTool(tool1) if len(agent.GetTools()) != 1 { t.Errorf("Expected 1 tool after AddTool, got %d", len(agent.GetTools())) } if agent.GetTools()[0].Name() != "tool1" { t.Errorf("Expected tool name 'tool1', got '%s'", agent.GetTools()[0].Name()) } // Test adding another tool tool2 := &MockTool{name: "tool2"} agent.AddTool(tool2) if len(agent.GetTools()) != 2 { t.Errorf("Expected 2 tools after second AddTool, got %d", len(agent.GetTools())) } // Test replacing a tool with same name tool1Updated := &MockTool{name: "tool1"} agent.AddTool(tool1Updated) if len(agent.GetTools()) != 2 { t.Errorf("Expected 2 tools after updating tool1, got %d", len(agent.GetTools())) } // Test RemoveTool removed := agent.RemoveTool("tool1") if !removed { t.Error("Expected RemoveTool to return true for existing tool") } if len(agent.GetTools()) != 1 { t.Errorf("Expected 1 tool after RemoveTool, got %d", len(agent.GetTools())) } if agent.GetTools()[0].Name() != "tool2" { t.Errorf("Expected remaining tool to be 'tool2', got '%s'", agent.GetTools()[0].Name()) } // Test removing non-existent tool removed = agent.RemoveTool("nonexistent") if removed { t.Error("Expected RemoveTool to return false for non-existent tool") } // Test SetTools tool3 := &MockTool{name: "tool3"} tool4 := &MockTool{name: "tool4"} agent.SetTools([]tools.Tool{tool3, tool4}) if len(agent.GetTools()) != 2 { t.Errorf("Expected 2 tools after SetTools, got %d", len(agent.GetTools())) } if agent.GetTools()[0].Name() != "tool3" || agent.GetTools()[1].Name() != "tool4" { t.Error("SetTools did not correctly set the tools") } // Test ClearTools agent.ClearTools() if len(agent.GetTools()) != 0 { t.Errorf("Expected 0 tools after ClearTools, got %d", len(agent.GetTools())) } } func TestChatAgent_ToolsInChat(t *testing.T) { // Setup mock model that checks for tool calls mockModel := &MockModel{ responses: []string{"Using the calculator tool"}, } // Create ChatAgent agent, err := NewChatAgent(mockModel, nil) if err != nil { t.Fatalf("Failed to create ChatAgent: %v", err) } ctx := context.Background() // Add a tool dynamically calcTool := &MockTool{name: "calculator"} agent.AddTool(calcTool) // Chat should include the dynamic tool _, err = agent.Chat(ctx, "Calculate 2+2") if err != nil { t.Errorf("Chat with dynamic tool failed: %v", err) } // Verify tool is still available after chat if len(agent.GetTools()) != 1 { t.Errorf("Expected 1 tool after chat, got %d", len(agent.GetTools())) } // Remove tool and chat again agent.RemoveTool("calculator") _, err = agent.Chat(ctx, "Another message") if err != nil { t.Errorf("Chat after removing tool failed: %v", err) } // Verify tool was removed if len(agent.GetTools()) != 0 { t.Errorf("Expected 0 tools after removal, got %d", len(agent.GetTools())) } } func TestChatAgent_AsyncChat(t *testing.T) { // Setup mock model mockModel := &MockModel{ responses: []string{"Hello, how can I help you today?"}, } // Create ChatAgent agent, err := NewChatAgent(mockModel, nil) if err != nil { t.Fatalf("Failed to create ChatAgent: %v", err) } ctx := context.Background() // Test AsyncChat respChan, err := agent.AsyncChat(ctx, "Hi") if err != nil { t.Fatalf("AsyncChat failed: %v", err) } // Collect all chunks var fullResponse string for chunk := range respChan { fullResponse += chunk } // Verify we got the expected response expectedResponse := "Hello, how can I help you today?" if fullResponse != expectedResponse { t.Errorf("Expected response '%s', got '%s'", expectedResponse, fullResponse) } // Verify the response was streamed (we received multiple chunks) if len(fullResponse) == 0 { t.Error("Expected non-empty response") } } func TestChatAgent_AsyncChatWithChunks(t *testing.T) { // Setup mock model mockModel := &MockModel{ responses: []string{"Hello world, this is a test response."}, } // Create ChatAgent agent, err := NewChatAgent(mockModel, nil) if err != nil { t.Fatalf("Failed to create ChatAgent: %v", err) } ctx := context.Background() // Test AsyncChatWithChunks respChan, err := agent.AsyncChatWithChunks(ctx, "Hi") if err != nil { t.Fatalf("AsyncChatWithChunks failed: %v", err) } // Collect all chunks var chunks []string var fullResponse string for chunk := range respChan { chunks = append(chunks, chunk) fullResponse += chunk } // Verify we got the expected response expectedResponse := "Hello world, this is a test response." if fullResponse != expectedResponse { t.Errorf("Expected response '%s', got '%s'", expectedResponse, fullResponse) } // Verify we received multiple chunks (words + spaces) if len(chunks) < 2 { t.Errorf("Expected multiple chunks, got %d", len(chunks)) } t.Logf("Received %d chunks: %v", len(chunks), chunks) } func TestChatAgent_AsyncChatWithContext(t *testing.T) { // Setup mock model with a slow response mockModel := &MockModel{ responses: []string{"This is a long response that should be interrupted."}, } // Create ChatAgent agent, err := NewChatAgent(mockModel, nil) if err != nil { t.Fatalf("Failed to create ChatAgent: %v", err) } // Create a context that we can cancel ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Ensure cancel is always called to avoid context leak // Test AsyncChat respChan, err := agent.AsyncChat(ctx, "Hi") if err != nil { t.Fatalf("AsyncChat failed: %v", err) } // Read a few chunks then cancel chunksReceived := 0 for chunk := range respChan { _ = chunk chunksReceived++ if chunksReceived >= 5 { cancel() // Cancel the context early break } } // Continue reading to drain the channel for range respChan { chunksReceived++ } // We should have received some chunks but not all t.Logf("Received %d chunks before/after cancellation", chunksReceived) } func TestSplitIntoWords(t *testing.T) { tests := []struct { name string input string expected []string }{ { name: "Simple sentence", input: "Hello world", expected: []string{"Hello", "world"}, }, { name: "With punctuation", input: "Hello, world!", expected: []string{"Hello,", "world!"}, }, { name: "Multiple spaces", input: "Hello world", expected: []string{"Hello", "world"}, }, { name: "Empty string", input: "", expected: []string{}, }, { name: "Single word", input: "Hello", expected: []string{"Hello"}, }, { name: "With newlines", input: "Hello\nworld", expected: []string{"Hello", "world"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := splitIntoWords(tt.input) if len(result) != len(tt.expected) { t.Errorf("Expected %d words, got %d", len(tt.expected), len(result)) return } for i, word := range result { if word != tt.expected[i] { t.Errorf("Word %d: expected '%s', got '%s'", i, tt.expected[i], word) } } }) } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/planning_agent_example_test.go
prebuilt/planning_agent_example_test.go
package prebuilt_test import ( "context" "fmt" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" ) // Example demonstrates how to use CreatePlanningAgentMap to build // a dynamic workflow based on user requests func Example_planningAgent() { // Step 1: Define your custom nodes that can be used in workflows nodes := []graph.TypedNode[map[string]any]{ { Name: "fetch_data", Description: "Fetch data from external API or database", Function: func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) // Simulate fetching data fmt.Println("Fetching data from API...") msg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart("Data fetched successfully: [item1, item2, item3]")}, } return map[string]any{ "messages": append(messages, msg), }, nil }, }, { Name: "validate_data", Description: "Validate the integrity and format of data", Function: func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) // Simulate validation fmt.Println("Validating data...") msg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart("Data validation passed")}, } return map[string]any{ "messages": append(messages, msg), }, nil }, }, { Name: "transform_data", Description: "Transform and normalize data into required format", Function: func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) // Simulate transformation fmt.Println("Transforming data...") msg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart("Data transformed to JSON format")}, } return map[string]any{ "messages": append(messages, msg), }, nil }, }, { Name: "analyze_data", Description: "Perform statistical analysis on the data", Function: func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) // Simulate analysis fmt.Println("Analyzing data...") msg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart("Analysis complete: mean=42, median=40, std=5.2")}, } return map[string]any{ "messages": append(messages, msg), }, nil }, }, { Name: "save_results", Description: "Save processed results to storage", Function: func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) // Simulate saving fmt.Println("Saving results...") msg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart("Results saved to database")}, } return map[string]any{ "messages": append(messages, msg), }, nil }, }, } // Step 2: Create your LLM model // In a real application, you would use an actual LLM like OpenAI, Anthropic, etc. // var model llms.Model = openai.New(...) // For this example, we'll skip the actual LLM call // The LLM would receive the user request and available nodes, // then generate a workflow plan like: // { // "nodes": [ // {"name": "fetch_data", "type": "process"}, // {"name": "validate_data", "type": "process"}, // {"name": "transform_data", "type": "process"}, // {"name": "save_results", "type": "process"} // ], // "edges": [ // {"from": "START", "to": "fetch_data"}, // {"from": "fetch_data", "to": "validate_data"}, // {"from": "validate_data", "to": "transform_data"}, // {"from": "transform_data", "to": "save_results"}, // {"from": "save_results", "to": "END"} // ] // } fmt.Println("CreatePlanningAgent example:") fmt.Println("This agent dynamically creates workflows based on user requests") fmt.Println() fmt.Println("Available nodes:") for i, node := range nodes { fmt.Printf("%d. %s: %s\n", i+1, node.Name, node.Description) } fmt.Println() fmt.Println("User request: 'Fetch data, validate it, transform it, and save the results'") fmt.Println() fmt.Println("The LLM will:") fmt.Println("1. Analyze the user request") fmt.Println("2. Select appropriate nodes from available nodes") fmt.Println("3. Generate a workflow plan (similar to a mermaid diagram)") fmt.Println("4. The agent will execute the planned workflow") fmt.Println() fmt.Println("Expected workflow:") fmt.Println("START -> fetch_data -> validate_data -> transform_data -> save_results -> END") // Output: // CreatePlanningAgent example: // This agent dynamically creates workflows based on user requests // // Available nodes: // 1. fetch_data: Fetch data from external API or database // 2. validate_data: Validate the integrity and format of data // 3. transform_data: Transform and normalize data into required format // 4. analyze_data: Perform statistical analysis on the data // 5. save_results: Save processed results to storage // // User request: 'Fetch data, validate it, transform it, and save the results' // // The LLM will: // 1. Analyze the user request // 2. Select appropriate nodes from available nodes // 3. Generate a workflow plan (similar to a mermaid diagram) // 4. The agent will execute the planned workflow // // Expected workflow: // START -> fetch_data -> validate_data -> transform_data -> save_results -> END } // Example showing how to use the planning agent with verbose mode func Example_planningAgentWithVerbose() { // In a real application, you would define nodes and create the agent // nodes := []graph.TypedNode[map[string]any]{...} // agent, err := prebuilt.CreatePlanningAgentMap(model, nodes, []tools.Tool{}, prebuilt.WithVerbose(true)) fmt.Println("With verbose mode enabled, you will see:") fmt.Println("🤔 Planning workflow...") fmt.Println("📋 Generated plan: {...}") fmt.Println("🚀 Executing planned workflow...") fmt.Println(" ✓ Added node: step1") fmt.Println(" ✓ Added node: step2") fmt.Println(" ✓ Added edge: step1 -> step2") fmt.Println(" ✓ Added edge: step2 -> END") fmt.Println("✅ Workflow execution completed") // Output: // With verbose mode enabled, you will see: // 🤔 Planning workflow... // 📋 Generated plan: {...} // 🚀 Executing planned workflow... // ✓ Added node: step1 // ✓ Added node: step2 // ✓ Added edge: step1 -> step2 // ✓ Added edge: step2 -> END // ✅ Workflow execution completed } // Example showing real usage pattern func Example_planningAgentRealUsage() { fmt.Println("Real usage pattern:") fmt.Println() fmt.Println("// 1. Define your nodes") fmt.Println("nodes := []graph.TypedNode[map[string]any]{...}") fmt.Println() fmt.Println("// 2. Initialize your LLM model") fmt.Println("model := openai.New()") fmt.Println() fmt.Println("// 3. Create the planning agent") fmt.Println("agent, err := prebuilt.CreatePlanningAgentMap(") fmt.Println(" model,") fmt.Println(" nodes,") fmt.Println(" []tools.Tool{},") fmt.Println(" prebuilt.WithVerbose(true),") fmt.Println(")") fmt.Println() fmt.Println("// 4. Prepare initial state with user request") fmt.Println("initialState := map[string]any{") fmt.Println(" \"messages\": []llms.MessageContent{") fmt.Println(" llms.TextParts(llms.ChatMessageTypeHuman,") fmt.Println(" \"Fetch, validate, and save the customer data\"),") fmt.Println(" },") fmt.Println("}") fmt.Println() fmt.Println("// 5. Execute the agent") fmt.Println("result, err := agent.Invoke(context.Background(), initialState)") fmt.Println() fmt.Println("// 6. Access results") fmt.Println("mState := result") fmt.Println("messages := mState[\"messages\"].([]llms.MessageContent)") // Output: // Real usage pattern: // // // 1. Define your nodes // nodes := []graph.TypedNode[map[string]any]{...} // // // 2. Initialize your LLM model // model := openai.New() // // // 3. Create the planning agent // agent, err := prebuilt.CreatePlanningAgentMap( // model, // nodes, // []tools.Tool{}, // prebuilt.WithVerbose(true), // ) // // // 4. Prepare initial state with user request // initialState := map[string]any{ // "messages": []llms.MessageContent{ // llms.TextParts(llms.ChatMessageTypeHuman, // "Fetch, validate, and save the customer data"), // }, // } // // // 5. Execute the agent // result, err := agent.Invoke(context.Background(), initialState) // // // 6. Access results // mState := result // messages := mState["messages"].([]llms.MessageContent) } // Example showing how the LLM generates workflow plans func Example_workflowPlanFormat() { fmt.Println("Workflow Plan JSON Format:") fmt.Println() fmt.Println("{") fmt.Println(" \"nodes\": [") fmt.Println(" {\"name\": \"node_name\", \"type\": \"process\"},") fmt.Println(" {\"name\": \"another_node\", \"type\": \"process\"}") fmt.Println(" ],") fmt.Println(" \"edges\": [") fmt.Println(" {\"from\": \"START\", \"to\": \"node_name\"},") fmt.Println(" {\"from\": \"node_name\", \"to\": \"another_node\"},") fmt.Println(" {\"from\": \"another_node\", \"to\": \"END\"}") fmt.Println(" ]") fmt.Println("}") fmt.Println() fmt.Println("Rules:") fmt.Println("1. Workflow must start with edge from 'START'") fmt.Println("2. Workflow must end with edge to 'END'") fmt.Println("3. Only use nodes from available nodes list") fmt.Println("4. Create logical flow based on user request") // Output: // Workflow Plan JSON Format: // // { // "nodes": [ // {"name": "node_name", "type": "process"}, // {"name": "another_node", "type": "process"} // ], // "edges": [ // {"from": "START", "to": "node_name"}, // {"from": "node_name", "to": "another_node"}, // {"from": "another_node", "to": "END"} // ] // } // // Rules: // 1. Workflow must start with edge from 'START' // 2. Workflow must end with edge to 'END' // 3. Only use nodes from available nodes list // 4. Create logical flow based on user request }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/reflection_agent.go
prebuilt/reflection_agent.go
package prebuilt import ( "context" "fmt" "strings" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" ) // ReflectionAgentConfig configures the reflection agent type ReflectionAgentConfig struct { Model llms.Model ReflectionModel llms.Model MaxIterations int SystemMessage string ReflectionPrompt string Verbose bool } // CreateReflectionAgentMap creates a new Reflection Agent with map[string]any state func CreateReflectionAgentMap(config ReflectionAgentConfig) (*graph.StateRunnable[map[string]any], error) { if config.Model == nil { return nil, fmt.Errorf("model is required") } if config.MaxIterations == 0 { config.MaxIterations = 3 } reflectionModel := config.ReflectionModel if reflectionModel == nil { reflectionModel = config.Model } if config.SystemMessage == "" { config.SystemMessage = "You are a helpful assistant. Generate a high-quality response to the user's request." } if config.ReflectionPrompt == "" { config.ReflectionPrompt = buildDefaultReflectionPrompt() } workflow := graph.NewStateGraph[map[string]any]() agentSchema := graph.NewMapSchema() agentSchema.RegisterReducer("messages", graph.AppendReducer) workflow.SetSchema(agentSchema) workflow.AddNode("generate", "Generate or revise response", func(ctx context.Context, state map[string]any) (map[string]any, error) { iteration, _ := state["iteration"].(int) messages, ok := state["messages"].([]llms.MessageContent) if !ok || len(messages) == 0 { return nil, fmt.Errorf("no messages found") } var promptMessages []llms.MessageContent if iteration == 0 { promptMessages = append([]llms.MessageContent{{Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.SystemMessage)}}}, messages...) } else { reflection, _ := state["reflection"].(string) draft, _ := state["draft"].(string) revisionPrompt := fmt.Sprintf("Revise based on reflection:\nRequest: %s\nDraft: %s\nReflection: %s", getOriginalRequest(messages), draft, reflection) promptMessages = []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.SystemMessage)}}, {Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(revisionPrompt)}}, } } resp, err := config.Model.GenerateContent(ctx, promptMessages) if err != nil { return nil, err } draft := resp.Choices[0].Content return map[string]any{ "messages": []llms.MessageContent{{Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart(draft)}}}, "draft": draft, "iteration": iteration + 1, }, nil }) workflow.AddNode("reflect", "Reflect on response", func(ctx context.Context, state map[string]any) (map[string]any, error) { draft, _ := state["draft"].(string) messages := state["messages"].([]llms.MessageContent) reflectionMessages := []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.ReflectionPrompt)}}, {Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(fmt.Sprintf("Request: %s\nResponse: %s", getOriginalRequest(messages), draft))}}, } resp, err := reflectionModel.GenerateContent(ctx, reflectionMessages) if err != nil { return nil, err } return map[string]any{"reflection": resp.Choices[0].Content}, nil }) workflow.SetEntryPoint("generate") workflow.AddConditionalEdge("generate", func(ctx context.Context, state map[string]any) string { iteration, _ := state["iteration"].(int) if iteration >= config.MaxIterations { return graph.END } return "reflect" }) workflow.AddConditionalEdge("reflect", func(ctx context.Context, state map[string]any) string { reflection, _ := state["reflection"].(string) if isResponseSatisfactory(reflection) { return graph.END } return "generate" }) return workflow.Compile() } // CreateReflectionAgent creates a generic reflection agent func CreateReflectionAgent[S any]( config ReflectionAgentConfig, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getDraft func(S) string, setDraft func(S, string) S, getIteration func(S) int, setIteration func(S, int) S, getReflection func(S) string, setReflection func(S, string) S, ) (*graph.StateRunnable[S], error) { if config.Model == nil { return nil, fmt.Errorf("model is required") } if config.MaxIterations == 0 { config.MaxIterations = 3 } reflectionModel := config.ReflectionModel if reflectionModel == nil { reflectionModel = config.Model } if config.SystemMessage == "" { config.SystemMessage = "You are a helpful assistant. Generate a high-quality response to the user's request." } if config.ReflectionPrompt == "" { config.ReflectionPrompt = buildDefaultReflectionPrompt() } workflow := graph.NewStateGraph[S]() workflow.AddNode("generate", "Generate or revise response", func(ctx context.Context, state S) (S, error) { iteration := getIteration(state) messages := getMessages(state) if len(messages) == 0 { return state, fmt.Errorf("no messages found") } var promptMessages []llms.MessageContent if iteration == 0 { promptMessages = append([]llms.MessageContent{{Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.SystemMessage)}}}, messages...) } else { reflection := getReflection(state) draft := getDraft(state) revisionPrompt := fmt.Sprintf("Revise based on reflection:\nRequest: %s\nDraft: %s\nReflection: %s", getOriginalRequest(messages), draft, reflection) promptMessages = []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.SystemMessage)}}, {Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(revisionPrompt)}}, } } resp, err := config.Model.GenerateContent(ctx, promptMessages) if err != nil { return state, err } draft := resp.Choices[0].Content aiMsg := llms.MessageContent{Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart(draft)}} state = setMessages(state, append(messages, aiMsg)) state = setDraft(state, draft) state = setIteration(state, iteration+1) return state, nil }) workflow.AddNode("reflect", "Reflect on response", func(ctx context.Context, state S) (S, error) { draft := getDraft(state) messages := getMessages(state) reflectionMessages := []llms.MessageContent{ {Role: llms.ChatMessageTypeSystem, Parts: []llms.ContentPart{llms.TextPart(config.ReflectionPrompt)}}, {Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart(fmt.Sprintf("Request: %s\nResponse: %s", getOriginalRequest(messages), draft))}}, } resp, err := reflectionModel.GenerateContent(ctx, reflectionMessages) if err != nil { return state, err } state = setReflection(state, resp.Choices[0].Content) return state, nil }) workflow.SetEntryPoint("generate") workflow.AddConditionalEdge("generate", func(ctx context.Context, state S) string { if getIteration(state) >= config.MaxIterations { return graph.END } return "reflect" }) workflow.AddConditionalEdge("reflect", func(ctx context.Context, state S) string { if isResponseSatisfactory(getReflection(state)) { return graph.END } return "generate" }) return workflow.Compile() } func isResponseSatisfactory(reflection string) bool { reflectionLower := strings.ToLower(reflection) satisfactoryKeywords := []string{"excellent", "satisfactory", "no major issues", "well done", "accurate", "meets all requirements"} for _, keyword := range satisfactoryKeywords { if strings.Contains(reflectionLower, keyword) { return true } } return false } func getOriginalRequest(messages []llms.MessageContent) string { for _, msg := range messages { if msg.Role == llms.ChatMessageTypeHuman { for _, part := range msg.Parts { if textPart, ok := part.(llms.TextContent); ok { return textPart.Text } } } } return "" } func buildDefaultReflectionPrompt() string { return `You are a critical reviewer. Evaluate the response and provide strengths, weaknesses and suggestions.` }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/tool_node.go
prebuilt/tool_node.go
package prebuilt import ( "context" "encoding/json" "fmt" "github.com/tmc/langchaingo/llms" ) // ToolNodeMap is a reusable node that executes tool calls from the last AI message // for map[string]any state. func ToolNodeMap(executor *ToolExecutor) func(context.Context, map[string]any) (map[string]any, error) { return func(ctx context.Context, state map[string]any) (map[string]any, error) { messages, ok := state["messages"].([]llms.MessageContent) if !ok || len(messages) == 0 { return nil, fmt.Errorf("no messages found in state") } lastMsg := messages[len(messages)-1] if lastMsg.Role != llms.ChatMessageTypeAI { return nil, fmt.Errorf("last message is not an AI message") } var toolMessages []llms.MessageContent for _, part := range lastMsg.Parts { if tc, ok := part.(llms.ToolCall); ok { var args map[string]any _ = json.Unmarshal([]byte(tc.FunctionCall.Arguments), &args) inputVal := "" if val, ok := args["input"].(string); ok { inputVal = val } else { inputVal = tc.FunctionCall.Arguments } res, err := executor.Execute(ctx, ToolInvocation{ Tool: tc.FunctionCall.Name, ToolInput: inputVal, }) if err != nil { res = fmt.Sprintf("Error: %v", err) } toolMessages = append(toolMessages, llms.MessageContent{ Role: llms.ChatMessageTypeTool, Parts: []llms.ContentPart{ llms.ToolCallResponse{ ToolCallID: tc.ID, Name: tc.FunctionCall.Name, Content: res, }, }, }) } } return map[string]any{ "messages": toolMessages, }, nil } } // ToolNode creates a generic tool execution node func ToolNode[S any]( executor *ToolExecutor, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, ) func(context.Context, S) (S, error) { return func(ctx context.Context, state S) (S, error) { messages := getMessages(state) if len(messages) == 0 { return state, fmt.Errorf("no messages") } lastMsg := messages[len(messages)-1] if lastMsg.Role != llms.ChatMessageTypeAI { return state, fmt.Errorf("not an AI message") } var toolMessages []llms.MessageContent for _, part := range lastMsg.Parts { if tc, ok := part.(llms.ToolCall); ok { var args map[string]any _ = json.Unmarshal([]byte(tc.FunctionCall.Arguments), &args) inputVal := "" if val, ok := args["input"].(string); ok { inputVal = val } else { inputVal = tc.FunctionCall.Arguments } res, err := executor.Execute(ctx, ToolInvocation{ Tool: tc.FunctionCall.Name, ToolInput: inputVal, }) if err != nil { res = fmt.Sprintf("Error: %v", err) } toolMessages = append(toolMessages, llms.MessageContent{ Role: llms.ChatMessageTypeTool, Parts: []llms.ContentPart{ llms.ToolCallResponse{ ToolCallID: tc.ID, Name: tc.FunctionCall.Name, Content: res, }, }, }) } } return setMessages(state, append(messages, toolMessages...)), nil } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/create_agent.go
prebuilt/create_agent.go
package prebuilt import ( "context" "encoding/json" "fmt" "strings" "github.com/smallnest/goskills" adapter "github.com/smallnest/langgraphgo/adapter/goskills" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // CreateAgentOptions contains options for creating an agent type CreateAgentOptions struct { skillDir string Verbose bool SystemMessage string StateModifier func(messages []llms.MessageContent) []llms.MessageContent MaxIterations int } type CreateAgentOption func(*CreateAgentOptions) func WithSystemMessage(message string) CreateAgentOption { return func(o *CreateAgentOptions) { o.SystemMessage = message } } func WithStateModifier(modifier func(messages []llms.MessageContent) []llms.MessageContent) CreateAgentOption { return func(o *CreateAgentOptions) { o.StateModifier = modifier } } func WithSkillDir(skillDir string) CreateAgentOption { return func(o *CreateAgentOptions) { o.skillDir = skillDir } } func WithVerbose(verbose bool) CreateAgentOption { return func(o *CreateAgentOptions) { o.Verbose = verbose } } func WithMaxIterations(maxIterations int) CreateAgentOption { return func(o *CreateAgentOptions) { o.MaxIterations = maxIterations } } // CreateAgentMap creates a new agent graph with map[string]any state func CreateAgentMap(model llms.Model, inputTools []tools.Tool, maxIterations int, opts ...CreateAgentOption) (*graph.StateRunnable[map[string]any], error) { options := &CreateAgentOptions{} for _, opt := range opts { opt(options) } if maxIterations == 0 { maxIterations = 20 } if options.MaxIterations > 0 { maxIterations = options.MaxIterations } workflow := graph.NewStateGraph[map[string]any]() agentSchema := graph.NewMapSchema() agentSchema.RegisterReducer("messages", graph.AppendReducer) agentSchema.RegisterReducer("extra_tools", graph.AppendReducer) agentSchema.RegisterReducer("iteration_count", graph.OverwriteReducer) workflow.SetSchema(agentSchema) if options.skillDir != "" { workflow.AddNode("skill", "Skill discovery node", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages, _ := state["messages"].([]llms.MessageContent) if len(messages) == 0 { return nil, nil } userPrompt := "" for i := len(messages) - 1; i >= 0; i-- { if messages[i].Role == llms.ChatMessageTypeHuman { userPrompt = messages[i].Parts[0].(llms.TextContent).Text break } } if userPrompt == "" { return nil, nil } availableSkills, err := discoverSkills(options.skillDir) if err != nil { return nil, err } selectedSkillName, err := selectSkill(ctx, model, userPrompt, availableSkills) if err != nil || selectedSkillName == "" { return nil, err } selectedSkill, ok := availableSkills[selectedSkillName] if !ok { return nil, nil } skillTools, err := adapter.SkillsToTools(selectedSkill) if err != nil { return nil, err } return map[string]any{"extra_tools": skillTools}, nil }) } workflow.AddNode("agent", "Agent decision node", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages, _ := state["messages"].([]llms.MessageContent) var allTools []tools.Tool allTools = append(allTools, inputTools...) if extra, ok := state["extra_tools"].([]tools.Tool); ok { allTools = append(allTools, extra...) } // Check iteration count iterationCount := 0 if count, ok := state["iteration_count"].(int); ok { iterationCount = count } if iterationCount >= maxIterations { // Max iterations reached, return final message finalMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{ llms.TextPart("Maximum iterations reached. Please try a simpler query."), }, } return map[string]any{ "messages": []llms.MessageContent{finalMsg}, }, nil } var toolDefs []llms.Tool for _, t := range allTools { toolDefs = append(toolDefs, llms.Tool{ Type: "function", Function: &llms.FunctionDefinition{ Name: t.Name(), Description: t.Description(), Parameters: getToolSchema(t), }, }) } msgsToSend := messages if options.SystemMessage != "" { msgsToSend = append([]llms.MessageContent{llms.TextParts(llms.ChatMessageTypeSystem, options.SystemMessage)}, msgsToSend...) } if options.StateModifier != nil { msgsToSend = options.StateModifier(msgsToSend) } resp, err := model.GenerateContent(ctx, msgsToSend, llms.WithTools(toolDefs)) if err != nil { return nil, err } choice := resp.Choices[0] aiMsg := llms.MessageContent{Role: llms.ChatMessageTypeAI} if choice.Content != "" { aiMsg.Parts = append(aiMsg.Parts, llms.TextPart(choice.Content)) } for _, tc := range choice.ToolCalls { aiMsg.Parts = append(aiMsg.Parts, tc) } return map[string]any{ "messages": []llms.MessageContent{aiMsg}, "iteration_count": iterationCount + 1, }, nil }) workflow.AddNode("tools", "Tool execution node", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) lastMsg := messages[len(messages)-1] var allTools []tools.Tool allTools = append(allTools, inputTools...) if extra, ok := state["extra_tools"].([]tools.Tool); ok { allTools = append(allTools, extra...) } toolExecutor := NewToolExecutor(allTools) var toolMessages []llms.MessageContent for _, part := range lastMsg.Parts { if tc, ok := part.(llms.ToolCall); ok { // Get the tool to check if it has a custom schema tool, hasTool := toolExecutor.Tools[tc.FunctionCall.Name] var inputVal string if hasTool { // Check if tool has custom schema if _, hasCustomSchema := tool.(ToolWithSchema); hasCustomSchema { // Tool has custom schema, pass JSON arguments directly inputVal = tc.FunctionCall.Arguments } else { // Tool uses default schema, try to extract "input" field var args map[string]any _ = json.Unmarshal([]byte(tc.FunctionCall.Arguments), &args) if val, ok := args["input"].(string); ok { inputVal = val } else { inputVal = tc.FunctionCall.Arguments } } } else { // Tool not found, use arguments as-is inputVal = tc.FunctionCall.Arguments } res, err := toolExecutor.Execute(ctx, ToolInvocation{Tool: tc.FunctionCall.Name, ToolInput: inputVal}) if err != nil { res = fmt.Sprintf("Error: %v", err) } toolMessages = append(toolMessages, llms.MessageContent{ Role: llms.ChatMessageTypeTool, Parts: []llms.ContentPart{llms.ToolCallResponse{ToolCallID: tc.ID, Name: tc.FunctionCall.Name, Content: res}}, }) } } return map[string]any{"messages": toolMessages}, nil }) if options.skillDir != "" { workflow.SetEntryPoint("skill") workflow.AddEdge("skill", "agent") } else { workflow.SetEntryPoint("agent") } workflow.AddConditionalEdge("agent", func(ctx context.Context, state map[string]any) string { messages := state["messages"].([]llms.MessageContent) lastMsg := messages[len(messages)-1] for _, part := range lastMsg.Parts { if _, ok := part.(llms.ToolCall); ok { return "tools" } } return graph.END }) workflow.AddEdge("tools", "agent") return workflow.Compile() } // CreateAgent creates a generic agent graph func CreateAgent[S any]( model llms.Model, inputTools []tools.Tool, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getExtraTools func(S) []tools.Tool, setExtraTools func(S, []tools.Tool) S, opts ...CreateAgentOption, ) (*graph.StateRunnable[S], error) { options := &CreateAgentOptions{} for _, opt := range opts { opt(options) } workflow := graph.NewStateGraph[S]() workflow.AddNode("agent", "Agent decision node", func(ctx context.Context, state S) (S, error) { messages := getMessages(state) allTools := append(inputTools, getExtraTools(state)...) var toolDefs []llms.Tool for _, t := range allTools { toolDefs = append(toolDefs, llms.Tool{ Type: "function", Function: &llms.FunctionDefinition{ Name: t.Name(), Description: t.Description(), Parameters: getToolSchema(t), }, }) } msgsToSend := messages if options.SystemMessage != "" { msgsToSend = append([]llms.MessageContent{llms.TextParts(llms.ChatMessageTypeSystem, options.SystemMessage)}, msgsToSend...) } if options.StateModifier != nil { msgsToSend = options.StateModifier(msgsToSend) } resp, err := model.GenerateContent(ctx, msgsToSend, llms.WithTools(toolDefs)) if err != nil { return state, err } choice := resp.Choices[0] aiMsg := llms.MessageContent{Role: llms.ChatMessageTypeAI} if choice.Content != "" { aiMsg.Parts = append(aiMsg.Parts, llms.TextPart(choice.Content)) } for _, tc := range choice.ToolCalls { aiMsg.Parts = append(aiMsg.Parts, tc) } return setMessages(state, append(messages, aiMsg)), nil }) workflow.AddNode("tools", "Tool execution node", func(ctx context.Context, state S) (S, error) { messages := getMessages(state) lastMsg := messages[len(messages)-1] toolExecutor := NewToolExecutor(append(inputTools, getExtraTools(state)...)) var toolMessages []llms.MessageContent for _, part := range lastMsg.Parts { if tc, ok := part.(llms.ToolCall); ok { // Get the tool to check if it has a custom schema tool, hasTool := toolExecutor.Tools[tc.FunctionCall.Name] var inputVal string if hasTool { // Check if tool has custom schema if _, hasCustomSchema := tool.(ToolWithSchema); hasCustomSchema { // Tool has custom schema, pass JSON arguments directly inputVal = tc.FunctionCall.Arguments } else { // Tool uses default schema, try to extract "input" field var args map[string]any _ = json.Unmarshal([]byte(tc.FunctionCall.Arguments), &args) if val, ok := args["input"].(string); ok { inputVal = val } else { inputVal = tc.FunctionCall.Arguments } } } else { // Tool not found, use arguments as-is inputVal = tc.FunctionCall.Arguments } res, err := toolExecutor.Execute(ctx, ToolInvocation{Tool: tc.FunctionCall.Name, ToolInput: inputVal}) if err != nil { res = fmt.Sprintf("Error: %v", err) } toolMessages = append(toolMessages, llms.MessageContent{ Role: llms.ChatMessageTypeTool, Parts: []llms.ContentPart{llms.ToolCallResponse{ToolCallID: tc.ID, Name: tc.FunctionCall.Name, Content: res}}, }) } } return setMessages(state, append(messages, toolMessages...)), nil }) workflow.SetEntryPoint("agent") workflow.AddConditionalEdge("agent", func(ctx context.Context, state S) string { messages := getMessages(state) lastMsg := messages[len(messages)-1] for _, part := range lastMsg.Parts { if _, ok := part.(llms.ToolCall); ok { return "tools" } } return graph.END }) workflow.AddEdge("tools", "agent") return workflow.Compile() } func discoverSkills(skillDir string) (map[string]*goskills.SkillPackage, error) { packages, err := goskills.ParseSkillPackages(skillDir) if err != nil { return nil, err } skills := make(map[string]*goskills.SkillPackage) for _, pkg := range packages { skills[pkg.Meta.Name] = pkg } return skills, nil } func selectSkill(ctx context.Context, model llms.Model, userPrompt string, availableSkills map[string]*goskills.SkillPackage) (string, error) { var skillDescriptions strings.Builder for name, pkg := range availableSkills { skillDescriptions.WriteString(fmt.Sprintf("- %s: %s\n", name, pkg.Meta.Description)) } prompt := fmt.Sprintf("Select the most appropriate skill for: \"%s\"\n\nSkills:\n%s\nReturn only the skill name or 'None'.", userPrompt, skillDescriptions.String()) resp, err := model.GenerateContent(ctx, []llms.MessageContent{llms.TextParts(llms.ChatMessageTypeHuman, prompt)}) if err != nil { return "", err } return strings.TrimSpace(resp.Choices[0].Content), nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/react_agent.go
prebuilt/react_agent.go
package prebuilt import ( "context" "encoding/json" "fmt" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // CreateReactAgentMap creates a new ReAct agent graph with map[string]any state // // Deprecated: Use CreateAgentMap instead, which now includes the same iteration limiting functionality. // This function is kept for backward compatibility and will be removed in a future version. func CreateReactAgentMap(model llms.Model, inputTools []tools.Tool, maxIterations int) (*graph.StateRunnable[map[string]any], error) { if maxIterations == 0 { maxIterations = 20 } // Define the tool executor toolExecutor := NewToolExecutor(inputTools) // Define the graph workflow := graph.NewStateGraph[map[string]any]() // Define the state schema agentSchema := graph.NewMapSchema() agentSchema.RegisterReducer("messages", graph.AppendReducer) workflow.SetSchema(agentSchema) // Define the agent node workflow.AddNode("agent", "ReAct agent decision maker", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages, ok := state["messages"].([]llms.MessageContent) if !ok { return nil, fmt.Errorf("messages key not found or invalid type") } // Check iteration count iterationCount := 0 if count, ok := state["iteration_count"].(int); ok { iterationCount = count } if iterationCount >= maxIterations { // Max iterations reached, return final message finalMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{ llms.TextPart("Maximum iterations reached. Please try a simpler query."), }, } return map[string]any{ "messages": []llms.MessageContent{finalMsg}, }, nil } // Convert tools to ToolInfo for the model var toolDefs []llms.Tool for _, t := range inputTools { toolDefs = append(toolDefs, llms.Tool{ Type: "function", Function: &llms.FunctionDefinition{ Name: t.Name(), Description: t.Description(), Parameters: getToolSchema(t), }, }) } // Call model with tools resp, err := model.GenerateContent(ctx, messages, llms.WithTools(toolDefs)) if err != nil { return nil, err } choice := resp.Choices[0] aiMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, } if choice.Content != "" { aiMsg.Parts = append(aiMsg.Parts, llms.TextPart(choice.Content)) } for _, tc := range choice.ToolCalls { aiMsg.Parts = append(aiMsg.Parts, tc) } return map[string]any{ "messages": []llms.MessageContent{aiMsg}, "iteration_count": iterationCount + 1, }, nil }) // Define the tools node workflow.AddNode("tools", "Tool execution node", func(ctx context.Context, state map[string]any) (map[string]any, error) { messages := state["messages"].([]llms.MessageContent) lastMsg := messages[len(messages)-1] if lastMsg.Role != llms.ChatMessageTypeAI { return nil, fmt.Errorf("last message is not an AI message") } var toolMessages []llms.MessageContent for _, part := range lastMsg.Parts { if tc, ok := part.(llms.ToolCall); ok { // Get the tool to check if it has a custom schema tool, hasTool := toolExecutor.Tools[tc.FunctionCall.Name] var inputVal string if hasTool { // Check if tool has custom schema if _, hasCustomSchema := tool.(ToolWithSchema); hasCustomSchema { // Tool has custom schema, pass JSON arguments directly inputVal = tc.FunctionCall.Arguments } else { // Tool uses default schema, try to extract "input" field var args map[string]any _ = json.Unmarshal([]byte(tc.FunctionCall.Arguments), &args) if val, ok := args["input"].(string); ok { inputVal = val } else { inputVal = tc.FunctionCall.Arguments } } } else { // Tool not found, use arguments as-is inputVal = tc.FunctionCall.Arguments } res, err := toolExecutor.Execute(ctx, ToolInvocation{ Tool: tc.FunctionCall.Name, ToolInput: inputVal, }) if err != nil { res = fmt.Sprintf("Error: %v", err) } toolMsg := llms.MessageContent{ Role: llms.ChatMessageTypeTool, Parts: []llms.ContentPart{ llms.ToolCallResponse{ ToolCallID: tc.ID, Name: tc.FunctionCall.Name, Content: res, }, }, } toolMessages = append(toolMessages, toolMsg) } } return map[string]any{ "messages": toolMessages, }, nil }) workflow.SetEntryPoint("agent") workflow.AddConditionalEdge("agent", func(ctx context.Context, state map[string]any) string { messages := state["messages"].([]llms.MessageContent) lastMsg := messages[len(messages)-1] for _, part := range lastMsg.Parts { if _, ok := part.(llms.ToolCall); ok { return "tools" } } return graph.END }) workflow.AddEdge("tools", "agent") return workflow.Compile() } // CreateReactAgent creates a new typed ReAct agent graph func CreateReactAgent[S any]( model llms.Model, inputTools []tools.Tool, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getIterationCount func(S) int, setIterationCount func(S, int) S, maxIterations int, ) (*graph.StateRunnable[S], error) { if maxIterations == 0 { maxIterations = 20 } toolExecutor := NewToolExecutor(inputTools) workflow := graph.NewStateGraph[S]() workflow.AddNode("agent", "ReAct agent decision maker", func(ctx context.Context, state S) (S, error) { iterationCount := getIterationCount(state) if iterationCount >= maxIterations { finalMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{ llms.TextPart("Maximum iterations reached. Please try a simpler query."), }, } return setMessages(state, append(getMessages(state), finalMsg)), nil } var toolDefs []llms.Tool for _, t := range inputTools { toolDefs = append(toolDefs, llms.Tool{ Type: "function", Function: &llms.FunctionDefinition{ Name: t.Name(), Description: t.Description(), Parameters: getToolSchema(t), }, }) } messages := getMessages(state) resp, err := model.GenerateContent(ctx, messages, llms.WithTools(toolDefs)) if err != nil { return state, err } choice := resp.Choices[0] aiMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, } if choice.Content != "" { aiMsg.Parts = append(aiMsg.Parts, llms.TextPart(choice.Content)) } for _, tc := range choice.ToolCalls { aiMsg.Parts = append(aiMsg.Parts, tc) } state = setMessages(state, append(messages, aiMsg)) state = setIterationCount(state, iterationCount+1) return state, nil }) workflow.AddNode("tools", "Tool execution node", func(ctx context.Context, state S) (S, error) { messages := getMessages(state) lastMsg := messages[len(messages)-1] var toolMessages []llms.MessageContent for _, part := range lastMsg.Parts { if tc, ok := part.(llms.ToolCall); ok { // Get the tool to check if it has a custom schema tool, hasTool := toolExecutor.Tools[tc.FunctionCall.Name] var inputVal string if hasTool { // Check if tool has custom schema if _, hasCustomSchema := tool.(ToolWithSchema); hasCustomSchema { // Tool has custom schema, pass JSON arguments directly inputVal = tc.FunctionCall.Arguments } else { // Tool uses default schema, try to extract "input" field var args map[string]any _ = json.Unmarshal([]byte(tc.FunctionCall.Arguments), &args) if val, ok := args["input"].(string); ok { inputVal = val } else { inputVal = tc.FunctionCall.Arguments } } } else { // Tool not found, use arguments as-is inputVal = tc.FunctionCall.Arguments } res, err := toolExecutor.Execute(ctx, ToolInvocation{ Tool: tc.FunctionCall.Name, ToolInput: inputVal, }) if err != nil { res = fmt.Sprintf("Error: %v", err) } toolMsg := llms.MessageContent{ Role: llms.ChatMessageTypeTool, Parts: []llms.ContentPart{ llms.ToolCallResponse{ ToolCallID: tc.ID, Name: tc.FunctionCall.Name, Content: res, }, }, } toolMessages = append(toolMessages, toolMsg) } } return setMessages(state, append(getMessages(state), toolMessages...)), nil }) workflow.SetEntryPoint("agent") workflow.AddConditionalEdge("agent", func(ctx context.Context, state S) string { messages := getMessages(state) lastMsg := messages[len(messages)-1] for _, part := range lastMsg.Parts { if _, ok := part.(llms.ToolCall); ok { return "tools" } } return graph.END }) workflow.AddEdge("tools", "agent") return workflow.Compile() }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/agent_states.go
prebuilt/agent_states.go
package prebuilt import ( "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // AgentState represents the general agent state. // This is the default state type for generic agents. type AgentState struct { // Messages contains the conversation history Messages []llms.MessageContent // ExtraTools contains additional tools available to the agent ExtraTools []tools.Tool } // ReactAgentState represents the default state for a ReAct agent type ReactAgentState struct { // Messages contains the conversation history Messages []llms.MessageContent `json:"messages"` // IterationCount counts the current iteration number IterationCount int `json:"iteration_count"` } // PlanningAgentState represents the state for a planning agent. // The planning agent first generates a workflow plan using LLM, // then executes according to the generated plan. type PlanningAgentState struct { // Messages contains the conversation history Messages []llms.MessageContent // WorkflowPlan contains the parsed workflow plan from LLM WorkflowPlan *WorkflowPlan } // WorkflowPlan represents the parsed workflow plan from LLM type WorkflowPlan struct { Nodes []WorkflowNode `json:"nodes"` Edges []WorkflowEdge `json:"edges"` } // WorkflowNode represents a node in the workflow plan type WorkflowNode struct { Name string `json:"name"` Type string `json:"type"` // "start", "process", "end", "conditional" } // WorkflowEdge represents an edge in the workflow plan type WorkflowEdge struct { From string `json:"from"` To string `json:"to"` Condition string `json:"condition,omitempty"` // For conditional edges } // ReflectionAgentState represents the state for a reflection agent. // The reflection agent iteratively improves its response through // self-reflection and revision. type ReflectionAgentState struct { // Messages contains the conversation history Messages []llms.MessageContent // Iteration counts the current iteration number Iteration int // Reflection contains the agent's self-reflection on its draft Reflection string // Draft contains the current draft response being refined Draft string } // PEVAgentState represents the state for a Plan-Execute-Verify agent. // This agent follows a three-step process: plan, execute, and verify. type PEVAgentState struct { // Messages contains the conversation history Messages []llms.MessageContent // Plan contains the list of steps to execute Plan []string // CurrentStep is the index of the current step being executed CurrentStep int // LastToolResult contains the result of the last tool execution LastToolResult string // IntermediateSteps contains results from intermediate steps IntermediateSteps []string // Retries counts the number of retries attempted Retries int // VerificationResult contains the verification status VerificationResult string // FinalAnswer contains the final answer after verification FinalAnswer string } // TreeOfThoughtsState represents the state for a tree-of-thoughts agent. // This agent explores multiple reasoning paths in parallel to find // the best solution. type TreeOfThoughtsState struct { // ActivePaths contains all active reasoning paths being explored ActivePaths map[string]*SearchPath // Solution contains the best solution found so far Solution string // VisitedStates tracks which states have been visited to avoid cycles VisitedStates map[string]bool // Iteration counts the current iteration number Iteration int } // ChatAgentState represents the state for a chat agent. // This is a conversational agent that maintains message history. type ChatAgentState struct { // Messages contains the conversation history Messages []llms.MessageContent // SystemPrompt is the system prompt for the chat agent SystemPrompt string // ExtraTools contains additional tools available to the agent ExtraTools []tools.Tool } // SupervisorState represents the state for a supervisor workflow type SupervisorState struct { // Messages contains the conversation history Messages []llms.MessageContent `json:"messages"` // Next is the next worker to act Next string `json:"next,omitempty"` }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/tool_executor.go
prebuilt/tool_executor.go
package prebuilt import ( "context" "fmt" "github.com/tmc/langchaingo/tools" ) // ToolWithSchema is an optional interface that tools can implement to provide their parameter schema type ToolWithSchema interface { Schema() map[string]any } // ToolInvocation represents a request to execute a tool type ToolInvocation struct { Tool string `json:"tool"` ToolInput string `json:"tool_input"` } // ToolExecutor executes tools based on invocations type ToolExecutor struct { Tools map[string]tools.Tool } // NewToolExecutor creates a new ToolExecutor with the given tools func NewToolExecutor(inputTools []tools.Tool) *ToolExecutor { toolMap := make(map[string]tools.Tool) for _, t := range inputTools { toolMap[t.Name()] = t } return &ToolExecutor{ Tools: toolMap, } } // Execute executes a single tool invocation func (te *ToolExecutor) Execute(ctx context.Context, invocation ToolInvocation) (string, error) { tool, ok := te.Tools[invocation.Tool] if !ok { return "", fmt.Errorf("tool not found: %s", invocation.Tool) } return tool.Call(ctx, invocation.ToolInput) } // getToolSchema returns the parameter schema for a tool. // If the tool implements ToolWithSchema, it uses the tool's custom schema. // Otherwise, it returns the default simple schema with an "input" string field. func getToolSchema(tool tools.Tool) map[string]any { if st, ok := tool.(ToolWithSchema); ok { return st.Schema() } // Default schema for tools without custom schema return map[string]any{ "type": "object", "properties": map[string]any{ "input": map[string]any{ "type": "string", "description": "The input query for the tool", }, }, "required": []string{"input"}, "additionalProperties": false, } } // ExecuteMany executes multiple tool invocations in parallel (if needed, but here sequential for simplicity) // In a real graph, this might be a ParallelNode, but here we provide a helper. func (te *ToolExecutor) ExecuteMany(ctx context.Context, invocations []ToolInvocation) ([]string, error) { results := make([]string, len(invocations)) for i, inv := range invocations { res, err := te.Execute(ctx, inv) if err != nil { return nil, err // Or continue and return partial errors? } results[i] = res } return results, nil } // ToolNode is a graph node function that executes tools // It expects the state to contain a list of ToolInvocation or a single ToolInvocation // This is a simplified version. In a real agent, it would parse messages. func (te *ToolExecutor) ToolNode(ctx context.Context, state any) (any, error) { // Try to parse state as ToolInvocation if inv, ok := state.(ToolInvocation); ok { return te.Execute(ctx, inv) } // Try to parse as []ToolInvocation if invs, ok := state.([]ToolInvocation); ok { return te.ExecuteMany(ctx, invs) } // Try to parse from map if m, ok := state.(map[string]any); ok { // Check for "tool" and "tool_input" keys if t, ok := m["tool"].(string); ok { input := "" if i, ok := m["tool_input"].(string); ok { input = i } return te.Execute(ctx, ToolInvocation{Tool: t, ToolInput: input}) } } return nil, fmt.Errorf("invalid state for ToolNode: %T", state) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/chat_agent.go
prebuilt/chat_agent.go
package prebuilt import ( "context" "fmt" "io" "github.com/google/uuid" "github.com/smallnest/langgraphgo/graph" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // ChatAgent represents a session with a user and can handle multi-turn conversations. type ChatAgent struct { // The underlying agent runnable Runnable *graph.StateRunnable[map[string]any] // The session ID for this conversation threadID string // Conversation history messages []llms.MessageContent // Dynamic tools that can be updated at runtime dynamicTools []tools.Tool // Model reference for streaming (optional) model llms.Model // Options used when creating the agent options *CreateAgentOptions } // NewChatAgent creates a new ChatAgent. // It wraps the underlying agent graph and manages conversation history automatically. func NewChatAgent(model llms.Model, inputTools []tools.Tool, opts ...CreateAgentOption) (*ChatAgent, error) { // Parse options options := &CreateAgentOptions{} for _, opt := range opts { opt(options) } // Create the agent with options agent, err := CreateAgentMap(model, inputTools, options.MaxIterations, opts...) if err != nil { return nil, err } // Generate a random thread ID for this session threadID := uuid.New().String() return &ChatAgent{ Runnable: agent, threadID: threadID, messages: make([]llms.MessageContent, 0), dynamicTools: make([]tools.Tool, 0), model: model, options: options, }, nil } // ThreadID returns the current session ID. func (c *ChatAgent) ThreadID() string { return c.threadID } // Chat sends a message to the agent and returns the response. // It maintains the conversation context by accumulating message history. func (c *ChatAgent) Chat(ctx context.Context, message string) (string, error) { // 1. Add user message to history userMsg := llms.TextParts(llms.ChatMessageTypeHuman, message) c.messages = append(c.messages, userMsg) // 2. Construct input with full conversation history and dynamic tools input := map[string]any{ "messages": c.messages, } // Add dynamic tools if any if len(c.dynamicTools) > 0 { input["extra_tools"] = c.dynamicTools } // 3. Create config with thread_id config := &graph.Config{ Configurable: map[string]any{ "thread_id": c.threadID, }, } // 4. Invoke the agent resp, err := c.Runnable.InvokeWithConfig(ctx, input, config) if err != nil { return "", err } // 5. Extract messages from response // resp is already map[string]any from StateRunnable[map[string]any] messages, ok := resp["messages"].([]llms.MessageContent) if !ok || len(messages) == 0 { return "", fmt.Errorf("no messages in response") } // 6. Update conversation history with all new messages c.messages = messages // 7. Extract the last message for return value lastMsg := messages[len(messages)-1] if len(lastMsg.Parts) == 0 { return "", nil } switch part := lastMsg.Parts[0].(type) { case llms.TextContent: return part.Text, nil default: return fmt.Sprintf("%v", part), nil } } // PrintStream prints the agent's response to the provided writer (e.g., os.Stdout). // Note: This is a simplified version that uses Chat internally. // For true streaming support, you would need to use a graph that supports streaming. func (c *ChatAgent) PrintStream(ctx context.Context, message string, w io.Writer) error { response, err := c.Chat(ctx, message) if err != nil { return err } fmt.Fprintf(w, "Response: %s\n", response) return nil } // SetTools replaces all dynamic tools with the provided tools. // Note: This does not affect the base tools provided when creating the agent. func (c *ChatAgent) SetTools(newTools []tools.Tool) { c.dynamicTools = make([]tools.Tool, len(newTools)) copy(c.dynamicTools, newTools) } // AddTool adds a new tool to the dynamic tools list. // If a tool with the same name already exists, it will be replaced. func (c *ChatAgent) AddTool(tool tools.Tool) { // Check if tool with same name exists for i, t := range c.dynamicTools { if t.Name() == tool.Name() { c.dynamicTools[i] = tool return } } // Add new tool c.dynamicTools = append(c.dynamicTools, tool) } // RemoveTool removes a tool by name from the dynamic tools list. // Returns true if the tool was found and removed, false otherwise. func (c *ChatAgent) RemoveTool(toolName string) bool { for i, t := range c.dynamicTools { if t.Name() == toolName { // Remove tool by slicing c.dynamicTools = append(c.dynamicTools[:i], c.dynamicTools[i+1:]...) return true } } return false } // GetTools returns a copy of the current dynamic tools list. // Note: This does not include the base tools provided when creating the agent. func (c *ChatAgent) GetTools() []tools.Tool { toolsCopy := make([]tools.Tool, len(c.dynamicTools)) copy(toolsCopy, c.dynamicTools) return toolsCopy } // ClearTools removes all dynamic tools. func (c *ChatAgent) ClearTools() { c.dynamicTools = make([]tools.Tool, 0) } // AsyncChat sends a message to the agent and returns a channel for streaming the response. // This method provides TRUE streaming by using the LLM's streaming API. // Chunks are sent to the channel as they're generated by the LLM in real-time. // The channel will be closed when the response is complete or an error occurs. func (c *ChatAgent) AsyncChat(ctx context.Context, message string) (<-chan string, error) { // Create output channel outputChan := make(chan string, 100) // Add user message to history userMsg := llms.TextParts(llms.ChatMessageTypeHuman, message) c.messages = append(c.messages, userMsg) // Prepare messages to send msgsToSend := c.messages // Apply system message if provided if c.options != nil && c.options.SystemMessage != "" { sysMsg := llms.TextParts(llms.ChatMessageTypeSystem, c.options.SystemMessage) msgsToSend = append([]llms.MessageContent{sysMsg}, msgsToSend...) } // Apply state modifier if provided if c.options != nil && c.options.StateModifier != nil { msgsToSend = c.options.StateModifier(msgsToSend) } // Start goroutine to handle streaming go func() { defer close(outputChan) var fullResponse string // Create streaming function that sends chunks to the channel streamingFunc := func(ctx context.Context, chunk []byte) error { chunkStr := string(chunk) fullResponse += chunkStr select { case <-ctx.Done(): return ctx.Err() case outputChan <- chunkStr: return nil } } // Call model with streaming enabled _, err := c.model.GenerateContent(ctx, msgsToSend, llms.WithStreamingFunc(streamingFunc)) if err != nil { // Error during streaming, channel will be closed return } // Add AI response to history aiMsg := llms.MessageContent{ Role: llms.ChatMessageTypeAI, Parts: []llms.ContentPart{llms.TextPart(fullResponse)}, } c.messages = append(c.messages, aiMsg) }() return outputChan, nil } // AsyncChatWithChunks sends a message to the agent and returns a channel for streaming the response. // Unlike AsyncChat, this streams the response in word-sized chunks for better readability. // The channel will be closed when the response is complete. func (c *ChatAgent) AsyncChatWithChunks(ctx context.Context, message string) (<-chan string, error) { // Create output channel outputChan := make(chan string, 100) // Start goroutine to handle the chat go func() { defer close(outputChan) // Call the regular Chat method response, err := c.Chat(ctx, message) if err != nil { // If there's an error, we can't send it through the string channel // Just close the channel return } // Split response into words and stream them words := splitIntoWords(response) for i, word := range words { select { case <-ctx.Done(): // Context cancelled, stop streaming return case outputChan <- word: // Add space after word (except for last word) if i < len(words)-1 { select { case <-ctx.Done(): return case outputChan <- " ": } } } } }() return outputChan, nil } // splitIntoWords splits a string into words while preserving punctuation func splitIntoWords(text string) []string { if text == "" { return []string{} } var words []string var currentWord string for _, char := range text { if char == ' ' || char == '\n' || char == '\t' { if currentWord != "" { words = append(words, currentWord) currentWord = "" } } else { currentWord += string(char) } } // Don't forget the last word if currentWord != "" { words = append(words, currentWord) } return words }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/reflection_agent_test.go
prebuilt/reflection_agent_test.go
package prebuilt import ( "testing" ) func TestCreateReflectionAgentMap(t *testing.T) { mockLLM := &MockReflectionLLM{ responses: []string{ "Initial response", "**Strengths:** Good. **Weaknesses:** None.", }, } config := ReflectionAgentConfig{Model: mockLLM, MaxIterations: 2} agent, err := CreateReflectionAgentMap(config) if err != nil { t.Fatalf("Failed: %v", err) } if agent == nil { t.Fatal("Agent is nil") } }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/supervisor_test.go
prebuilt/supervisor_test.go
package prebuilt import ( "context" "errors" "testing" "github.com/smallnest/langgraphgo/graph" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tmc/langchaingo/llms" ) // SupervisorMockLLM for supervisor testing type SupervisorMockLLM struct { responses []llms.ContentResponse currentIdx int returnError error } func (m *SupervisorMockLLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { if m.returnError != nil { return nil, m.returnError } if m.currentIdx >= len(m.responses) { return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ {Content: "No more responses"}, }, }, nil } resp := m.responses[m.currentIdx] m.currentIdx++ return &resp, nil } func (m *SupervisorMockLLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", nil } // MockAgent for testing supervisor with various behaviors type MockAgent struct { name string response string shouldError bool errorMsg string } func NewMockAgent(name, response string) *MockAgent { return &MockAgent{ name: name, response: response, } } func NewMockErrorAgent(name, errorMsg string) *MockAgent { return &MockAgent{ name: name, shouldError: true, errorMsg: errorMsg, } } func (a *MockAgent) Invoke(ctx context.Context, state any) (any, error) { if a.shouldError { return nil, errors.New(a.errorMsg) } // Extract existing messages mState, ok := state.(map[string]any) if !ok { return nil, errors.New("invalid state type") } messages, ok := mState["messages"].([]llms.MessageContent) if !ok { return nil, errors.New("messages key not found or invalid type") } // Append agent response newMessages := append(messages, llms.TextParts(llms.ChatMessageTypeAI, a.response)) return map[string]any{ "messages": newMessages, }, nil } func (a *MockAgent) Compile() (*graph.StateRunnable[map[string]any], error) { workflow := graph.NewStateGraph[map[string]any]() // Define state schema schema := graph.NewMapSchema() schema.RegisterReducer("messages", graph.AppendReducer) workflow.SetSchema(schema) workflow.AddNode("run", "Agent run node", func(ctx context.Context, state map[string]any) (map[string]any, error) { result, err := a.Invoke(ctx, state) if err != nil { return nil, err } if m, ok := result.(map[string]any); ok { return m, nil } return state, nil }) workflow.SetEntryPoint("run") workflow.AddEdge("run", graph.END) return workflow.Compile() } func TestCreateSupervisor_DirectFinish(t *testing.T) { // Test supervisor that directly routes to FINISH mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{"next": "FINISH"}`, }, }, }, }, }, }, }, } agent := NewMockAgent("Agent", "Should not be called") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Complete immediately"), }, } res, err := supervisor.Invoke(context.Background(), initialState) assert.NoError(t, err) messages := res["messages"].([]llms.MessageContent) // Should only have initial message, no agent responses assert.Equal(t, 1, len(messages)) assert.Equal(t, "Complete immediately", messages[0].Parts[0].(llms.TextContent).Text) assert.Equal(t, "FINISH", res["next"]) } func TestCreateSupervisor_AgentError(t *testing.T) { // Test handling of agent errors mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{"next": "ErrorAgent"}`, }, }, }, }, }, }, }, } errorAgent := NewMockErrorAgent("ErrorAgent", "Agent failed to process") errorAgentRunnable, err := errorAgent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "ErrorAgent": errorAgentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Trigger error"), }, } // Should return error from agent _, err = supervisor.Invoke(context.Background(), initialState) assert.Error(t, err) assert.Contains(t, err.Error(), "Agent failed to process") } func TestCreateSupervisor_NoToolCall(t *testing.T) { // Test when LLM doesn't make a tool call mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { Content: "I don't know what to do", }, }, }, }, } agent := NewMockAgent("Agent", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Test"), }, } // Should return error about not selecting next step _, err = supervisor.Invoke(context.Background(), initialState) assert.Error(t, err) assert.Contains(t, err.Error(), "supervisor did not select a next step") } func TestCreateSupervisor_InvalidRouteArguments(t *testing.T) { // Test when route tool has invalid JSON mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{invalid json`, }, }, }, }, }, }, }, } agent := NewMockAgent("Agent", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Test"), }, } _, err = supervisor.Invoke(context.Background(), initialState) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to parse route arguments") } func TestCreateSupervisor_InvalidStateType(t *testing.T) { mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{"next": "Agent1"}`, }, }, }, }, }, }, }, } agent := NewMockAgent("Agent1", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent1": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) // Pass state without messages key _, err = supervisor.Invoke(context.Background(), map[string]any{"foo": "bar"}) assert.Error(t, err) } func TestCreateSupervisor_MissingMessages(t *testing.T) { mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{"next": "Agent1"}`, }, }, }, }, }, }, }, } agent := NewMockAgent("Agent1", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent1": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) // Pass state without messages invalidState := map[string]any{ "other": "value", } _, err = supervisor.Invoke(context.Background(), invalidState) assert.Error(t, err) assert.Contains(t, err.Error(), "messages key not found or invalid type") } func TestCreateSupervisor_LLMError(t *testing.T) { // Test when LLM returns an error mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{}, returnError: errors.New("LLM connection failed"), } agent := NewMockAgent("Agent", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Test"), }, } _, err = supervisor.Invoke(context.Background(), initialState) assert.Error(t, err) assert.Contains(t, err.Error(), "LLM connection failed") } func TestCreateSupervisor_EmptyMembers(t *testing.T) { // Test with no members mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{"next": "FINISH"}`, }, }, }, }, }, }, }, } // Empty members map members := map[string]*graph.StateRunnable[map[string]any]{} supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Test"), }, } res, err := supervisor.Invoke(context.Background(), initialState) assert.NoError(t, err) messages := res["messages"].([]llms.MessageContent) assert.Equal(t, 1, len(messages)) // Only initial message assert.Equal(t, "FINISH", res["next"]) } func TestCreateSupervisor_UnknownAgent(t *testing.T) { // Test when LLM routes to an unknown agent mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{"next": "UnknownAgent"}`, }, }, }, }, }, }, }, } agent := NewMockAgent("KnownAgent", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "KnownAgent": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Test unknown agent"), }, } _, err = supervisor.Invoke(context.Background(), initialState) assert.Error(t, err) // The error message will depend on graph implementation assert.Error(t, err) } func TestCreateSupervisor_RouteWithoutFunctionCall(t *testing.T) { // Test when tool call has no function call mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { // No FunctionCall field }, }, }, }, }, }, } agent := NewMockAgent("Agent", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Test"), }, } _, err = supervisor.Invoke(context.Background(), initialState) assert.Error(t, err) } func TestCreateSupervisor_NoChoices(t *testing.T) { // Test when LLM returns no choices mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{}, // Empty choices }, }, } agent := NewMockAgent("Agent", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Test"), }, } _, err = supervisor.Invoke(context.Background(), initialState) assert.Error(t, err) } func TestCreateSupervisor_EmptyRouteName(t *testing.T) { // Test when route tool call has empty name mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "", Arguments: `{"next": "Agent"}`, }, }, }, }, }, }, }, } agent := NewMockAgent("Agent", "Response") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Agent": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Test"), }, } _, err = supervisor.Invoke(context.Background(), initialState) assert.Error(t, err) assert.Contains(t, err.Error(), "supervisor did not select a next step") } func TestCreateSupervisor_SingleAgent(t *testing.T) { // Test with single agent mockLLM := &SupervisorMockLLM{ responses: []llms.ContentResponse{ { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{"next": "Worker"}`, }, }, }, }, }, }, { Choices: []*llms.ContentChoice{ { ToolCalls: []llms.ToolCall{ { FunctionCall: &llms.FunctionCall{ Name: "route", Arguments: `{"next": "FINISH"}`, }, }, }, }, }, }, }, } agent := NewMockAgent("Worker", "Task completed") agentRunnable, err := agent.Compile() require.NoError(t, err) members := map[string]*graph.StateRunnable[map[string]any]{ "Worker": agentRunnable, } supervisor, err := CreateSupervisorMap(mockLLM, members) assert.NoError(t, err) initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Single task"), }, } res, err := supervisor.Invoke(context.Background(), initialState) assert.NoError(t, err) messages := res["messages"].([]llms.MessageContent) // Should have initial + worker message + potential routing messages assert.True(t, len(messages) >= 2) assert.Equal(t, "Single task", messages[0].Parts[0].(llms.TextContent).Text) // Find the worker response found := false for _, msg := range messages[1:] { if msg.Role == llms.ChatMessageTypeAI { if txt, ok := msg.Parts[0].(llms.TextContent); ok && txt.Text == "Task completed" { found = true break } } } assert.True(t, found, "Worker response should be in messages") }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/chat_agent_example_test.go
prebuilt/chat_agent_example_test.go
package prebuilt_test import ( "context" "fmt" "os" "github.com/smallnest/langgraphgo/prebuilt" "github.com/tmc/langchaingo/llms/openai" ) // Example demonstrating multi-turn conversation with ChatAgent func ExampleChatAgent() { // Check if API key is available apiKey := os.Getenv("OPENAI_API_KEY") if apiKey == "" { fmt.Println("OPENAI_API_KEY not set, skipping example") return } // Create OpenAI model model, err := openai.New() if err != nil { fmt.Printf("Error creating model: %v\n", err) return } // Create ChatAgent with no tools agent, err := prebuilt.NewChatAgent(model, nil) if err != nil { fmt.Printf("Error creating agent: %v\n", err) return } ctx := context.Background() // First turn fmt.Println("User: Hello! My name is Alice.") resp1, err := agent.Chat(ctx, "Hello! My name is Alice.") if err != nil { fmt.Printf("Error: %v\n", err) return } fmt.Printf("Agent: %s\n\n", resp1) // Second turn - agent should remember the name fmt.Println("User: What's my name?") resp2, err := agent.Chat(ctx, "What's my name?") if err != nil { fmt.Printf("Error: %v\n", err) return } fmt.Printf("Agent: %s\n\n", resp2) // Third turn - continue the conversation fmt.Println("User: Tell me a short joke about programmers.") resp3, err := agent.Chat(ctx, "Tell me a short joke about programmers.") if err != nil { fmt.Printf("Error: %v\n", err) return } fmt.Printf("Agent: %s\n", resp3) // Display the session ID fmt.Printf("\nSession ID: %s\n", agent.ThreadID()) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/reflection_agent_example_test.go
prebuilt/reflection_agent_example_test.go
package prebuilt_test import ( "context" "fmt" "log" "github.com/smallnest/langgraphgo/prebuilt" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/llms/openai" ) func ExampleCreateReflectionAgent() { // Create LLM model, err := openai.New() if err != nil { log.Fatal(err) } // Configure reflection agent config := prebuilt.ReflectionAgentConfig{ Model: model, MaxIterations: 3, Verbose: true, SystemMessage: "You are an expert technical writer. Create clear, accurate, and comprehensive responses.", } // Create agent agent, err := prebuilt.CreateReflectionAgentMap(config) if err != nil { log.Fatal(err) } // Prepare initial state initialState := map[string]any{ "messages": []llms.MessageContent{ { Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart("Explain the CAP theorem in distributed systems")}, }, }, } // Invoke agent result, err := agent.Invoke(context.Background(), initialState) if err != nil { log.Fatal(err) } // Extract final response messages := result["messages"].([]llms.MessageContent) fmt.Println("=== Final Response ===") for _, msg := range messages { if msg.Role == llms.ChatMessageTypeAI { for _, part := range msg.Parts { if textPart, ok := part.(llms.TextContent); ok { fmt.Println(textPart.Text) } } } } } func ExampleCreateReflectionAgent_withSeparateReflector() { // Create generation model generationModel, err := openai.New(openai.WithModel("gpt-4")) if err != nil { log.Fatal(err) } // Create separate reflection model (could be a different model) reflectionModel, err := openai.New(openai.WithModel("gpt-4")) if err != nil { log.Fatal(err) } // Configure with separate models config := prebuilt.ReflectionAgentConfig{ Model: generationModel, ReflectionModel: reflectionModel, MaxIterations: 2, Verbose: true, SystemMessage: "You are a helpful assistant providing detailed explanations.", ReflectionPrompt: `You are a senior technical reviewer. Evaluate the response for: 1. Technical accuracy 2. Completeness of explanation 3. Clarity for the target audience 4. Use of examples Be specific in your feedback.`, } agent, err := prebuilt.CreateReflectionAgentMap(config) if err != nil { log.Fatal(err) } initialState := map[string]any{ "messages": []llms.MessageContent{ { Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart("What is a Merkle tree and how is it used in blockchain?")}, }, }, } result, err := agent.Invoke(context.Background(), initialState) if err != nil { log.Fatal(err) } draft := result["draft"].(string) iteration := result["iteration"].(int) fmt.Printf("Final draft (after %d iterations):\n%s\n", iteration, draft) } func ExampleCreateReflectionAgent_customCriteria() { model, err := openai.New() if err != nil { log.Fatal(err) } // Custom reflection criteria for code quality config := prebuilt.ReflectionAgentConfig{ Model: model, MaxIterations: 2, Verbose: true, SystemMessage: "You are a senior software engineer reviewing code.", ReflectionPrompt: `Evaluate the code review for: 1. **Security**: Are security issues identified? 2. **Performance**: Are performance concerns addressed? 3. **Maintainability**: Are code quality issues noted? 4. **Best Practices**: Are language/framework best practices mentioned? Provide specific, actionable feedback.`, } agent, err := prebuilt.CreateReflectionAgentMap(config) if err != nil { log.Fatal(err) } initialState := map[string]any{ "messages": []llms.MessageContent{ { Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextPart("Review this SQL query function for issues")}, }, }, } result, err := agent.Invoke(context.Background(), initialState) if err != nil { log.Fatal(err) } draft := result["draft"].(string) fmt.Printf("Code review:\n%s\n", draft) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/mock_errors.go
prebuilt/mock_errors.go
package prebuilt import ( "context" "fmt" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/tools" ) // Enforce that MockToolError implements tools.Tool var _ tools.Tool = (*MockToolError)(nil) // MockLLMError for testing GenerateContent error type MockLLMError struct{} func (m *MockLLMError) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { return nil, fmt.Errorf("mock LLM GenerateContent error") } func (m *MockLLMError) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", fmt.Errorf("mock LLM Call error") } // MockLLMEmptyContent for testing empty content response type MockLLMEmptyContent struct{} func (m *MockLLMEmptyContent) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { return &llms.ContentResponse{ Choices: []*llms.ContentChoice{ { Content: "", // Empty content }, }, }, nil } func (m *MockLLMEmptyContent) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { return "", nil // Not used for this test scenario } // MockToolError for testing tool execution error type MockToolError struct { name string } func (t *MockToolError) Name() string { return t.name } func (t *MockToolError) Description() string { return "A mock tool that returns an error" } func (t *MockToolError) Call(ctx context.Context, input string) (string, error) { return "", fmt.Errorf("mock tool execution error") }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/prebuilt/mock_errors_test.go
prebuilt/mock_errors_test.go
package prebuilt import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/tmc/langchaingo/llms" ) func TestMockLLMError(t *testing.T) { mock := &MockLLMError{} t.Run("GenerateContent returns error", func(t *testing.T) { ctx := context.Background() messages := []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "test"), } resp, err := mock.GenerateContent(ctx, messages) assert.Error(t, err) assert.Nil(t, resp) assert.Contains(t, err.Error(), "mock LLM GenerateContent error") }) t.Run("Call returns error", func(t *testing.T) { ctx := context.Background() resp, err := mock.Call(ctx, "test prompt") assert.Error(t, err) assert.Empty(t, resp) assert.Contains(t, err.Error(), "mock LLM Call error") }) } func TestMockLLMEmptyContent(t *testing.T) { mock := &MockLLMEmptyContent{} t.Run("GenerateContent returns empty content", func(t *testing.T) { ctx := context.Background() messages := []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "test"), } resp, err := mock.GenerateContent(ctx, messages) assert.NoError(t, err) assert.NotNil(t, resp) assert.Len(t, resp.Choices, 1) assert.Equal(t, "", resp.Choices[0].Content) }) t.Run("Call returns empty string", func(t *testing.T) { ctx := context.Background() resp, err := mock.Call(ctx, "test prompt") assert.NoError(t, err) assert.Equal(t, "", resp) }) } func TestMockToolError(t *testing.T) { mock := &MockToolError{name: "error_tool"} t.Run("Name returns tool name", func(t *testing.T) { assert.Equal(t, "error_tool", mock.Name()) }) t.Run("Description returns description", func(t *testing.T) { assert.Equal(t, "A mock tool that returns an error", mock.Description()) }) t.Run("Call returns error", func(t *testing.T) { ctx := context.Background() resp, err := mock.Call(ctx, "test input") assert.Error(t, err) assert.Empty(t, resp) assert.Contains(t, err.Error(), "mock tool execution error") }) t.Run("Call with empty input", func(t *testing.T) { ctx := context.Background() resp, err := mock.Call(ctx, "") assert.Error(t, err) assert.Empty(t, resp) }) } func TestMockErrorsImplementInterfaces(t *testing.T) { t.Run("MockLLMError implements Model interface", func(t *testing.T) { var _ llms.Model = (*MockLLMError)(nil) mock := &MockLLMError{} assert.NotNil(t, mock) }) t.Run("MockLLMEmptyContent implements Model interface", func(t *testing.T) { var _ llms.Model = (*MockLLMEmptyContent)(nil) mock := &MockLLMEmptyContent{} assert.NotNil(t, mock) }) t.Run("MockToolError implements Tool interface", func(t *testing.T) { var _ interface { Name() string Description() string Call(ctx context.Context, input string) (string, error) } = (*MockToolError)(nil) mock := &MockToolError{} assert.NotNil(t, mock) }) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/buffer.go
memory/buffer.go
package memory import ( "context" "sync" ) // BufferMemory is a simple buffer-based memory implementation // Similar to LangChain's ConversationBufferMemory // Combines sliding window with optional summarization type BufferMemory struct { messages []*Message maxMessages int // 0 = unlimited maxTokens int // 0 = unlimited autoSummarize bool // Auto-summarize when limits exceeded mu sync.RWMutex // Optional summarizer Summarizer func(ctx context.Context, messages []*Message) (string, error) } // BufferConfig holds configuration for buffer memory type BufferConfig struct { MaxMessages int // Maximum number of messages (0 = unlimited) MaxTokens int // Maximum total tokens (0 = unlimited) AutoSummarize bool // Enable automatic summarization Summarizer func(ctx context.Context, messages []*Message) (string, error) } // NewBufferMemory creates a new buffer memory func NewBufferMemory(config *BufferConfig) *BufferMemory { if config == nil { config = &BufferConfig{ MaxMessages: 0, // Unlimited by default MaxTokens: 0, AutoSummarize: false, } } summarizer := config.Summarizer if summarizer == nil { summarizer = defaultSummarizer } return &BufferMemory{ messages: make([]*Message, 0), maxMessages: config.MaxMessages, maxTokens: config.MaxTokens, autoSummarize: config.AutoSummarize, Summarizer: summarizer, } } // AddMessage adds a message to the buffer func (b *BufferMemory) AddMessage(ctx context.Context, msg *Message) error { b.mu.Lock() defer b.mu.Unlock() b.messages = append(b.messages, msg) // Check limits and trim if necessary if b.maxMessages > 0 && len(b.messages) > b.maxMessages { if b.autoSummarize { // Summarize oldest messages toSummarize := b.messages[:len(b.messages)-b.maxMessages] summary, err := b.Summarizer(ctx, toSummarize) if err == nil { summaryMsg := NewMessage("system", summary) b.messages = append([]*Message{summaryMsg}, b.messages[len(b.messages)-b.maxMessages:]...) } else { // Fallback: just trim b.messages = b.messages[len(b.messages)-b.maxMessages:] } } else { // Simple trim b.messages = b.messages[len(b.messages)-b.maxMessages:] } } // Check token limit if b.maxTokens > 0 { totalTokens := 0 for i := len(b.messages) - 1; i >= 0; i-- { totalTokens += b.messages[i].TokenCount if totalTokens > b.maxTokens { // Remove oldest messages if b.autoSummarize && i > 0 { toSummarize := b.messages[:i] summary, err := b.Summarizer(ctx, toSummarize) if err == nil { summaryMsg := NewMessage("system", summary) b.messages = append([]*Message{summaryMsg}, b.messages[i:]...) } else { b.messages = b.messages[i:] } } else { b.messages = b.messages[i:] } break } } } return nil } // GetContext returns all messages in the buffer func (b *BufferMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { b.mu.RLock() defer b.mu.RUnlock() result := make([]*Message, len(b.messages)) copy(result, b.messages) return result, nil } // Clear removes all messages func (b *BufferMemory) Clear(ctx context.Context) error { b.mu.Lock() defer b.mu.Unlock() b.messages = make([]*Message, 0) return nil } // GetStats returns buffer memory statistics func (b *BufferMemory) GetStats(ctx context.Context) (*Stats, error) { b.mu.RLock() defer b.mu.RUnlock() totalTokens := 0 for _, msg := range b.messages { totalTokens += msg.TokenCount } return &Stats{ TotalMessages: len(b.messages), TotalTokens: totalTokens, ActiveMessages: len(b.messages), ActiveTokens: totalTokens, CompressionRate: 1.0, }, nil } // GetMessages returns a copy of all messages func (b *BufferMemory) GetMessages() []*Message { b.mu.RLock() defer b.mu.RUnlock() result := make([]*Message, len(b.messages)) copy(result, b.messages) return result } // LoadMessages loads messages into the buffer (replaces existing) func (b *BufferMemory) LoadMessages(messages []*Message) { b.mu.Lock() defer b.mu.Unlock() b.messages = make([]*Message, len(messages)) copy(b.messages, messages) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/hierarchical.go
memory/hierarchical.go
package memory import ( "context" "sync" "time" ) // HierarchicalMemory organizes messages in layers based on importance and recency // Pros: Balances recent context with important historical information // Cons: More complex management, requires importance scoring type HierarchicalMemory struct { // Layer 1: Recent messages (always included) recentMessages []*Message recentLimit int // Layer 2: Important messages (high priority) importantMessages []*Message importantLimit int // Layer 3: Archived messages (low priority, rarely accessed) archivedMessages []*Message mu sync.RWMutex // ImportanceScorer determines message importance (0.0 to 1.0) // Higher scores = more important ImportanceScorer func(msg *Message) float64 } // HierarchicalConfig holds configuration for hierarchical memory type HierarchicalConfig struct { RecentLimit int // Max recent messages ImportantLimit int // Max important messages ImportanceScorer func(msg *Message) float64 // Custom importance scorer } // NewHierarchicalMemory creates a new hierarchical memory strategy func NewHierarchicalMemory(config *HierarchicalConfig) *HierarchicalMemory { if config == nil { config = &HierarchicalConfig{ RecentLimit: 10, ImportantLimit: 20, } } if config.RecentLimit <= 0 { config.RecentLimit = 10 } if config.ImportantLimit <= 0 { config.ImportantLimit = 20 } scorer := config.ImportanceScorer if scorer == nil { scorer = defaultImportanceScorer } return &HierarchicalMemory{ recentMessages: make([]*Message, 0), importantMessages: make([]*Message, 0), archivedMessages: make([]*Message, 0), recentLimit: config.RecentLimit, importantLimit: config.ImportantLimit, ImportanceScorer: scorer, } } // AddMessage adds a message and organizes it into appropriate layer func (h *HierarchicalMemory) AddMessage(ctx context.Context, msg *Message) error { h.mu.Lock() defer h.mu.Unlock() // Always add to recent messages h.recentMessages = append(h.recentMessages, msg) // Score the message for importance // Check if there's an explicit importance in metadata first if importance, ok := msg.Metadata["importance"].(float64); ok && importance > 0.7 { h.importantMessages = append(h.importantMessages, msg) } else { // Use scorer to determine importance score := h.ImportanceScorer(msg) if score > 0.7 { h.importantMessages = append(h.importantMessages, msg) } } // Manage recent layer size if len(h.recentMessages) > h.recentLimit { // Move oldest recent message to archive oldest := h.recentMessages[0] h.recentMessages = h.recentMessages[1:] // Only archive if not already in important layer if !h.isInImportant(oldest) { h.archivedMessages = append(h.archivedMessages, oldest) } } // Manage important layer size if len(h.importantMessages) > h.importantLimit { // Move lowest importance message to archive lowestIdx := h.findLowestImportance() if lowestIdx >= 0 { archived := h.importantMessages[lowestIdx] h.importantMessages = append(h.importantMessages[:lowestIdx], h.importantMessages[lowestIdx+1:]...) h.archivedMessages = append(h.archivedMessages, archived) } } return nil } // GetContext returns messages from all layers, prioritized by importance func (h *HierarchicalMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { h.mu.RLock() defer h.mu.RUnlock() result := make([]*Message, 0) // Layer 1: Important messages (highest priority) result = append(result, h.importantMessages...) // Layer 2: Recent messages (medium priority) // Avoid duplicates with important layer for _, msg := range h.recentMessages { if !h.containsMessage(result, msg) { result = append(result, msg) } } // Note: Archived messages are only included if specifically queried // This implementation doesn't include them by default return result, nil } // Clear removes all messages from all layers func (h *HierarchicalMemory) Clear(ctx context.Context) error { h.mu.Lock() defer h.mu.Unlock() h.recentMessages = make([]*Message, 0) h.importantMessages = make([]*Message, 0) h.archivedMessages = make([]*Message, 0) return nil } // GetStats returns statistics about hierarchical memory func (h *HierarchicalMemory) GetStats(ctx context.Context) (*Stats, error) { h.mu.RLock() defer h.mu.RUnlock() allMessages := len(h.recentMessages) + len(h.importantMessages) + len(h.archivedMessages) totalTokens := 0 activeTokens := 0 // Count tokens in all layers for _, msg := range h.recentMessages { totalTokens += msg.TokenCount activeTokens += msg.TokenCount } for _, msg := range h.importantMessages { totalTokens += msg.TokenCount activeTokens += msg.TokenCount } for _, msg := range h.archivedMessages { totalTokens += msg.TokenCount } activeMessages := len(h.recentMessages) + len(h.importantMessages) compressionRate := 1.0 if totalTokens > 0 { compressionRate = float64(activeTokens) / float64(totalTokens) } return &Stats{ TotalMessages: allMessages, TotalTokens: totalTokens, ActiveMessages: activeMessages, ActiveTokens: activeTokens, CompressionRate: compressionRate, }, nil } // isInImportant checks if a message is in the important layer func (h *HierarchicalMemory) isInImportant(msg *Message) bool { for _, m := range h.importantMessages { if m.ID == msg.ID { return true } } return false } // findLowestImportance finds the index of the least important message func (h *HierarchicalMemory) findLowestImportance() int { if len(h.importantMessages) == 0 { return -1 } lowestIdx := 0 lowestScore := h.ImportanceScorer(h.importantMessages[0]) for i, msg := range h.importantMessages { score := h.ImportanceScorer(msg) if score < lowestScore { lowestScore = score lowestIdx = i } } return lowestIdx } // containsMessage checks if a message is already in the result set func (h *HierarchicalMemory) containsMessage(messages []*Message, target *Message) bool { for _, msg := range messages { if msg.ID == target.ID { return true } } return false } // defaultImportanceScorer provides a simple importance scoring function // Scores based on: message length, role, and recency func defaultImportanceScorer(msg *Message) float64 { score := 0.5 // Base score // Boost for system messages if msg.Role == "system" { score += 0.2 } // Boost for longer messages (more content = potentially more important) if msg.TokenCount > 100 { score += 0.2 } // Boost for very recent messages age := time.Since(msg.Timestamp) if age < time.Minute*5 { score += 0.1 } // Check metadata for explicit importance if importance, ok := msg.Metadata["importance"].(float64); ok { score = importance } // Clamp to [0, 1] if score > 1.0 { score = 1.0 } if score < 0.0 { score = 0.0 } return score }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/langchain_adapter.go
memory/langchain_adapter.go
package memory import ( "context" "github.com/tmc/langchaingo/llms" langchainmemory "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/schema" ) // ChatMemory is the interface for conversation memory management in langgraphgo type ChatMemory interface { // SaveContext saves the context from this conversation to buffer SaveContext(ctx context.Context, inputValues map[string]any, outputValues map[string]any) error // LoadMemoryVariables loads memory variables LoadMemoryVariables(ctx context.Context, inputs map[string]any) (map[string]any, error) // Clear clears memory contents Clear(ctx context.Context) error // GetMessages returns all messages in memory GetMessages(ctx context.Context) ([]llms.ChatMessage, error) } // LangChainMemory adapts langchaingo's memory implementations to our ChatMemory interface type LangChainMemory struct { buffer schema.Memory } // NewLangChainMemory creates a new adapter for langchaingo memory // Supports ConversationBuffer, ConversationWindowBuffer, ConversationTokenBuffer, etc. func NewLangChainMemory(buffer schema.Memory) *LangChainMemory { return &LangChainMemory{ buffer: buffer, } } // NewConversationBufferMemory creates a new conversation buffer memory with default settings func NewConversationBufferMemory(options ...langchainmemory.ConversationBufferOption) *LangChainMemory { return &LangChainMemory{ buffer: langchainmemory.NewConversationBuffer(options...), } } // NewConversationWindowBufferMemory creates a new conversation window buffer memory // that keeps only the last N conversation turns func NewConversationWindowBufferMemory(windowSize int, options ...langchainmemory.ConversationBufferOption) *LangChainMemory { return &LangChainMemory{ buffer: langchainmemory.NewConversationWindowBuffer(windowSize, options...), } } // NewConversationTokenBufferMemory creates a new conversation token buffer memory // that keeps conversation history within a token limit func NewConversationTokenBufferMemory(llm llms.Model, maxTokenLimit int, options ...langchainmemory.ConversationBufferOption) *LangChainMemory { return &LangChainMemory{ buffer: langchainmemory.NewConversationTokenBuffer(llm, maxTokenLimit, options...), } } // SaveContext saves the context from this conversation to buffer func (m *LangChainMemory) SaveContext(ctx context.Context, inputValues map[string]any, outputValues map[string]any) error { return m.buffer.SaveContext(ctx, inputValues, outputValues) } // LoadMemoryVariables loads memory variables func (m *LangChainMemory) LoadMemoryVariables(ctx context.Context, inputs map[string]any) (map[string]any, error) { return m.buffer.LoadMemoryVariables(ctx, inputs) } // Clear clears memory contents func (m *LangChainMemory) Clear(ctx context.Context) error { return m.buffer.Clear(ctx) } // GetMessages returns all messages in memory // This is a convenience method that extracts messages from the memory buffer func (m *LangChainMemory) GetMessages(ctx context.Context) ([]llms.ChatMessage, error) { // Load memory variables to get the conversation history memVars, err := m.buffer.LoadMemoryVariables(ctx, map[string]any{}) if err != nil { return nil, err } // Try to get messages from any memory key // The default memory key is "history" for ConversationBuffer // but it can be customized with WithMemoryKey option for _, value := range memVars { // If return_messages is true, value will be []llms.ChatMessage if messages, ok := value.([]llms.ChatMessage); ok { return messages, nil } } // If return_messages is false, history will be a string // In this case, we can't easily convert back to messages // So we return an empty slice return []llms.ChatMessage{}, nil } // ChatMessageHistory provides direct access to chat message history type ChatMessageHistory struct { history *langchainmemory.ChatMessageHistory } // NewChatMessageHistory creates a new chat message history func NewChatMessageHistory(options ...langchainmemory.ChatMessageHistoryOption) *ChatMessageHistory { return &ChatMessageHistory{ history: langchainmemory.NewChatMessageHistory(options...), } } // AddMessage adds a message to the history func (h *ChatMessageHistory) AddMessage(ctx context.Context, message llms.ChatMessage) error { return h.history.AddMessage(ctx, message) } // AddUserMessage adds a user message to the history func (h *ChatMessageHistory) AddUserMessage(ctx context.Context, message string) error { return h.history.AddUserMessage(ctx, message) } // AddAIMessage adds an AI message to the history func (h *ChatMessageHistory) AddAIMessage(ctx context.Context, message string) error { return h.history.AddAIMessage(ctx, message) } // Messages returns all messages in the history func (h *ChatMessageHistory) Messages(ctx context.Context) ([]llms.ChatMessage, error) { return h.history.Messages(ctx) } // Clear clears all messages from the history func (h *ChatMessageHistory) Clear(ctx context.Context) error { return h.history.Clear(ctx) } // SetMessages sets the messages in the history func (h *ChatMessageHistory) SetMessages(ctx context.Context, messages []llms.ChatMessage) error { return h.history.SetMessages(ctx, messages) } // GetHistory returns the underlying langchaingo ChatMessageHistory func (h *ChatMessageHistory) GetHistory() schema.ChatMessageHistory { return h.history }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/retrieval.go
memory/retrieval.go
package memory import ( "context" "fmt" "math" "sort" "sync" ) // RetrievalMemory uses vector embeddings to retrieve relevant past messages // Pros: Only fetches contextually relevant history, efficient token usage // Cons: Requires embedding model, may miss chronologically important context type RetrievalMemory struct { messages []*Message embeddings map[string][]float64 // Message ID -> embedding vector topK int // Number of most relevant messages to retrieve mu sync.RWMutex // EmbeddingFunc generates embeddings for text // In production, this would call an embedding API like OpenAI embeddings EmbeddingFunc func(ctx context.Context, text string) ([]float64, error) } // RetrievalConfig holds configuration for retrieval-based memory type RetrievalConfig struct { TopK int // Number of messages to retrieve EmbeddingFunc func(ctx context.Context, text string) ([]float64, error) // Custom embedding function } // NewRetrievalMemory creates a new retrieval-based memory strategy func NewRetrievalMemory(config *RetrievalConfig) *RetrievalMemory { if config == nil { config = &RetrievalConfig{ TopK: 5, } } if config.TopK <= 0 { config.TopK = 5 } embeddingFunc := config.EmbeddingFunc if embeddingFunc == nil { embeddingFunc = defaultEmbeddingFunc } return &RetrievalMemory{ messages: make([]*Message, 0), embeddings: make(map[string][]float64), topK: config.TopK, EmbeddingFunc: embeddingFunc, } } // AddMessage adds a message and generates its embedding func (r *RetrievalMemory) AddMessage(ctx context.Context, msg *Message) error { r.mu.Lock() defer r.mu.Unlock() // Generate embedding for the message embedding, err := r.EmbeddingFunc(ctx, msg.Content) if err != nil { return fmt.Errorf("failed to generate embedding: %w", err) } r.messages = append(r.messages, msg) r.embeddings[msg.ID] = embedding return nil } // GetContext retrieves the most semantically similar messages to the query func (r *RetrievalMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { r.mu.RLock() defer r.mu.RUnlock() if len(r.messages) == 0 { return []*Message{}, nil } // Generate embedding for query queryEmbedding, err := r.EmbeddingFunc(ctx, query) if err != nil { return nil, fmt.Errorf("failed to generate query embedding: %w", err) } // Calculate similarity scores type scoredMessage struct { message *Message score float64 } scores := make([]scoredMessage, 0, len(r.messages)) for _, msg := range r.messages { msgEmbedding := r.embeddings[msg.ID] similarity := cosineSimilarity(queryEmbedding, msgEmbedding) scores = append(scores, scoredMessage{ message: msg, score: similarity, }) } // Sort by similarity (highest first) sort.Slice(scores, func(i, j int) bool { return scores[i].score > scores[j].score }) // Return top K messages k := min(r.topK, len(scores)) result := make([]*Message, k) for i := range k { result[i] = scores[i].message } return result, nil } // Clear removes all messages and embeddings func (r *RetrievalMemory) Clear(ctx context.Context) error { r.mu.Lock() defer r.mu.Unlock() r.messages = make([]*Message, 0) r.embeddings = make(map[string][]float64) return nil } // GetStats returns statistics about retrieval memory func (r *RetrievalMemory) GetStats(ctx context.Context) (*Stats, error) { r.mu.RLock() defer r.mu.RUnlock() totalTokens := 0 for _, msg := range r.messages { totalTokens += msg.TokenCount } // Active tokens = tokens in topK messages (approximate) activeTokens := 0 if len(r.messages) > 0 { k := min(r.topK, len(r.messages)) for i := range k { activeTokens += r.messages[i].TokenCount } } return &Stats{ TotalMessages: len(r.messages), TotalTokens: totalTokens, ActiveMessages: r.topK, ActiveTokens: activeTokens, CompressionRate: float64(activeTokens) / float64(totalTokens), }, nil } // SetTopK updates the number of messages to retrieve func (r *RetrievalMemory) SetTopK(k int) { r.mu.Lock() defer r.mu.Unlock() if k > 0 { r.topK = k } } // cosineSimilarity calculates cosine similarity between two vectors func cosineSimilarity(a, b []float64) float64 { if len(a) != len(b) { return 0.0 } var dotProduct, normA, normB float64 for i := range a { dotProduct += a[i] * b[i] normA += a[i] * a[i] normB += b[i] * b[i] } if normA == 0 || normB == 0 { return 0.0 } return dotProduct / (math.Sqrt(normA) * math.Sqrt(normB)) } // defaultEmbeddingFunc provides a simple embedding function // In production, use a proper embedding model (e.g., OpenAI embeddings) func defaultEmbeddingFunc(ctx context.Context, text string) ([]float64, error) { // Simple word-frequency based embedding (for demonstration) // This is NOT a proper embedding - use an actual model in production words := make(map[string]int) for _, char := range text { word := string(char) words[word]++ } // Create a fixed-size vector embedding := make([]float64, 128) for word, count := range words { // Hash word to index hash := 0 for _, c := range word { hash = (hash*31 + int(c)) % 128 } embedding[hash] += float64(count) } // Normalize var norm float64 for _, val := range embedding { norm += val * val } norm = math.Sqrt(norm) if norm > 0 { for i := range embedding { embedding[i] /= norm } } return embedding, nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/os_like.go
memory/os_like.go
package memory import ( "container/heap" "context" "sync" "time" ) // OSLikeMemory implements OS-inspired memory management with paging and eviction // Pros: Sophisticated lifecycle management, optimal memory usage // Cons: Complex implementation, overhead of management type OSLikeMemory struct { // Active memory (like RAM) - recently accessed activeMemory map[string]*MemoryPage // Cached memory - less recently accessed cache map[string]*MemoryPage // Archived (like disk) - rarely accessed archived map[string]*MemoryPage // LRU tracking lru *LRUHeap // Configuration activeLimit int // Max pages in active memory cacheLimit int // Max pages in cache accessWindow time.Duration // Time window for access tracking mu sync.RWMutex } // MemoryPage represents a page of memory (like OS paging) type MemoryPage struct { ID string Messages []*Message LastAccess time.Time AccessCount int Priority int // Higher priority = less likely to be evicted Dirty bool // Has been modified Size int // Token count } // OSLikeConfig holds configuration for OS-like memory type OSLikeConfig struct { ActiveLimit int // Pages in active memory CacheLimit int // Pages in cache AccessWindow time.Duration // Access tracking window } // NewOSLikeMemory creates a new OS-like memory strategy func NewOSLikeMemory(config *OSLikeConfig) *OSLikeMemory { if config == nil { config = &OSLikeConfig{ ActiveLimit: 10, CacheLimit: 20, AccessWindow: time.Minute * 5, } } if config.ActiveLimit <= 0 { config.ActiveLimit = 10 } if config.CacheLimit <= 0 { config.CacheLimit = 20 } if config.AccessWindow <= 0 { config.AccessWindow = time.Minute * 5 } return &OSLikeMemory{ activeMemory: make(map[string]*MemoryPage), cache: make(map[string]*MemoryPage), archived: make(map[string]*MemoryPage), lru: &LRUHeap{}, activeLimit: config.ActiveLimit, cacheLimit: config.CacheLimit, accessWindow: config.AccessWindow, } } // AddMessage adds a message using OS-like memory management func (o *OSLikeMemory) AddMessage(ctx context.Context, msg *Message) error { o.mu.Lock() defer o.mu.Unlock() // Create or update page pageID := o.getPageID(msg) page := o.findPage(pageID) if page == nil { // Create new page page = &MemoryPage{ ID: pageID, Messages: []*Message{msg}, LastAccess: time.Now(), AccessCount: 1, Priority: 0, Dirty: true, Size: msg.TokenCount, } } else { // Update existing page page.Messages = append(page.Messages, msg) page.LastAccess = time.Now() page.AccessCount++ page.Dirty = true page.Size += msg.TokenCount } // Add to active memory o.activeMemory[pageID] = page // Manage memory limits (eviction if needed) o.evictIfNeeded() return nil } // GetContext retrieves messages from memory hierarchy func (o *OSLikeMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { o.mu.Lock() defer o.mu.Unlock() result := make([]*Message, 0) // Collect from active memory (highest priority) for _, page := range o.activeMemory { page.LastAccess = time.Now() page.AccessCount++ result = append(result, page.Messages...) } // If not enough, fetch from cache if len(result) < 10 { for pageID, page := range o.cache { // "Page in" from cache to active page.LastAccess = time.Now() page.AccessCount++ result = append(result, page.Messages...) // Promote to active memory o.activeMemory[pageID] = page delete(o.cache, pageID) if len(result) >= 20 { break } } } return result, nil } // Clear removes all memory func (o *OSLikeMemory) Clear(ctx context.Context) error { o.mu.Lock() defer o.mu.Unlock() o.activeMemory = make(map[string]*MemoryPage) o.cache = make(map[string]*MemoryPage) o.archived = make(map[string]*MemoryPage) o.lru = &LRUHeap{} heap.Init(o.lru) return nil } // GetStats returns OS-like memory statistics func (o *OSLikeMemory) GetStats(ctx context.Context) (*Stats, error) { o.mu.RLock() defer o.mu.RUnlock() activeTokens := 0 cacheTokens := 0 archivedTokens := 0 activeCount := 0 cacheCount := 0 archivedCount := 0 for _, page := range o.activeMemory { activeTokens += page.Size activeCount += len(page.Messages) } for _, page := range o.cache { cacheTokens += page.Size cacheCount += len(page.Messages) } for _, page := range o.archived { archivedTokens += page.Size archivedCount += len(page.Messages) } totalMessages := activeCount + cacheCount + archivedCount totalTokens := activeTokens + cacheTokens + archivedTokens return &Stats{ TotalMessages: totalMessages, TotalTokens: totalTokens, ActiveMessages: activeCount, ActiveTokens: activeTokens, CompressionRate: float64(activeTokens) / float64(totalTokens), }, nil } // evictIfNeeded performs eviction when memory limits are exceeded // Must be called with lock held func (o *OSLikeMemory) evictIfNeeded() { // Evict from active to cache if over limit for len(o.activeMemory) > o.activeLimit { // Find least recently used page lruPage := o.findLRUPage(o.activeMemory) if lruPage == nil { break } // Move to cache o.cache[lruPage.ID] = lruPage delete(o.activeMemory, lruPage.ID) } // Evict from cache to archive if over limit for len(o.cache) > o.cacheLimit { // Find least recently used cached page lruPage := o.findLRUPage(o.cache) if lruPage == nil { break } // Move to archive o.archived[lruPage.ID] = lruPage delete(o.cache, lruPage.ID) } } // findLRUPage finds the least recently used page func (o *OSLikeMemory) findLRUPage(pages map[string]*MemoryPage) *MemoryPage { var lruPage *MemoryPage var oldestAccess time.Time for _, page := range pages { if lruPage == nil || page.LastAccess.Before(oldestAccess) { lruPage = page oldestAccess = page.LastAccess } } return lruPage } // findPage finds a page across all memory levels func (o *OSLikeMemory) findPage(pageID string) *MemoryPage { if page, ok := o.activeMemory[pageID]; ok { return page } if page, ok := o.cache[pageID]; ok { return page } if page, ok := o.archived[pageID]; ok { return page } return nil } // getPageID determines which page a message belongs to // Groups messages by time window func (o *OSLikeMemory) getPageID(msg *Message) string { // Group messages into 5-minute pages pageTime := msg.Timestamp.Truncate(time.Minute * 5) return pageTime.Format("2006-01-02-15:04") } // GetMemoryInfo returns detailed information about memory usage func (o *OSLikeMemory) GetMemoryInfo() map[string]any { o.mu.RLock() defer o.mu.RUnlock() return map[string]any{ "active_pages": len(o.activeMemory), "cached_pages": len(o.cache), "archived_pages": len(o.archived), "active_limit": o.activeLimit, "cache_limit": o.cacheLimit, } } // LRUHeap implements a min-heap for LRU tracking type LRUHeap []*MemoryPage func (h LRUHeap) Len() int { return len(h) } func (h LRUHeap) Less(i, j int) bool { return h[i].LastAccess.Before(h[j].LastAccess) } func (h LRUHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h *LRUHeap) Push(x any) { *h = append(*h, x.(*MemoryPage)) } func (h *LRUHeap) Pop() any { old := *h n := len(old) x := old[n-1] *h = old[0 : n-1] return x }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/strategy.go
memory/strategy.go
package memory import ( "context" "fmt" "sync/atomic" "time" ) // Message represents a single conversation message type Message struct { ID string // Unique identifier Role string // "user", "assistant", "system" Content string // Message content Timestamp time.Time // When the message was created Metadata map[string]any // Additional metadata TokenCount int // Approximate token count } // Memory defines the interface for memory management strategies // All memory strategies must implement these methods type Memory interface { // AddMessage adds a new message to memory AddMessage(ctx context.Context, msg *Message) error // GetContext retrieves relevant context for the current conversation // Returns messages that should be included in the LLM prompt GetContext(ctx context.Context, query string) ([]*Message, error) // Clear removes all messages from memory Clear(ctx context.Context) error // GetStats returns statistics about the current memory state GetStats(ctx context.Context) (*Stats, error) } // Stats contains statistics about memory usage type Stats struct { TotalMessages int // Total number of messages stored TotalTokens int // Total tokens across all messages ActiveMessages int // Messages currently in active context ActiveTokens int // Tokens in active context CompressionRate float64 // Compression rate (if applicable) } // NewMessage creates a new message with the given role and content func NewMessage(role, content string) *Message { return &Message{ ID: generateID(), Role: role, Content: content, Timestamp: time.Now(), Metadata: make(map[string]any), TokenCount: estimateTokens(content), } } // estimateTokens provides a rough estimate of token count // In production, use a proper tokenizer like tiktoken func estimateTokens(text string) int { // Rough approximation: ~4 characters per token return len(text) / 4 } // messageCounter is used to ensure unique IDs even when created in rapid succession var messageCounter uint64 // generateID generates a unique ID for a message func generateID() string { counter := atomic.AddUint64(&messageCounter, 1) return fmt.Sprintf("%s-%d", time.Now().Format("20060102150405.000000"), counter) }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/doc.go
memory/doc.go
// Package memory provides various memory management strategies for conversational AI applications. // // This package implements multiple approaches to managing conversation history and context, // from simple buffers to sophisticated OS-inspired memory management with paging and eviction. // It's designed to help maintain relevant context within token limits while preserving // important information from long conversations. // // # Core Interface // // The Memory interface defines the contract that all memory strategies must implement: // // - AddMessage: Add a new message to memory // - GetContext: Retrieve relevant context for the current query // - Clear: Remove all messages from memory // - GetStats: Get statistics about memory usage // // # Available Memory Strategies // // ## Buffer Memory // Simple first-in-first-out buffer with configurable size: // // buffer := memory.NewBufferMemory(100) // Keep last 100 messages // buffer.AddMessage(ctx, message) // context, _ := buffer.GetContext(ctx, "current query") // // ## Sliding Window Memory // Maintains a sliding window of recent messages with overlap: // // window := memory.NewSlidingWindowMemory(50, 5) // 50 messages with 5 overlap // // ## Summarization Memory // Automatically summarizes older messages to save tokens: // // summ := memory.NewSummarizationMemory(llmClient, 1000) // 1000 token limit // // ## Hierarchical Memory // Multi-level memory with different retention policies: // // hierarchical := memory.NewHierarchicalMemory( // &memory.Config{ // WorkingMemorySize: 50, // LongTermSize: 1000, // ArchiveSize: 10000, // }, // ) // // ## OS-Inspired Memory // Sophisticated memory management with active, cached, and archived pages: // // osMemory := memory.NewOSLikeMemory(&memory.OSLikeConfig{ // ActiveLimit: 100, // CacheLimit: 500, // AccessWindow: time.Hour, // }) // // ## Graph-Based Memory // Organizes messages as a graph for better context retrieval: // // graphMemory := memory.NewGraphBasedMemory( // embeddingModel, // &memory.GraphConfig{ // MaxNodes: 1000, // SimilarityThreshold: 0.7, // }, // ) // // # Message Structure // // Each message contains: // // type Message struct { // ID string // Unique identifier // Role string // "user", "assistant", "system" // Content string // Message content // Timestamp time.Time // When created // Metadata map[string]any // Additional metadata // TokenCount int // Approximate token count // } // // # Example Usage // // ## Basic Buffer Memory // // import ( // "context" // "time" // // "github.com/smallnest/langgraphgo/memory" // ) // // ctx := context.Background() // mem := memory.NewBufferMemory(50) // // // Add messages // mem.AddMessage(ctx, memory.NewMessage("user", "Hello!")) // mem.AddMessage(ctx, memory.NewMessage("assistant", "Hi there!")) // // // Get context for next query // context, err := mem.GetContext(ctx, "How are you?") // if err != nil { // return err // } // // // Use context in LLM prompt // for _, msg := range context { // prompt += fmt.Sprintf("%s: %s\n", msg.Role, msg.Content) // } // // ## Summarization Memory // // // Requires an LLM client that implements the Summarizer interface // type MyLLM struct{} // func (m *MyLLM) Summarize(ctx context.Context, messages []*memory.Message) (string, error) { // // Implementation for summarizing messages // return "", nil // } // // llm := &MyLLM{} // mem := memory.NewSummarizationMemory(llm, 2000) // 2000 token limit // // // Add many messages - older ones will be summarized // for i := 0; i < 100; i++ { // mem.AddMessage(ctx, &memory.Message{ // Role: "user", // Content: fmt.Sprintf("Message %d", i), // }) // } // // // Context will include recent messages + summaries of older ones // context, _ := mem.GetContext(ctx, "latest query") // // ## Hierarchical Memory // // config := &memory.HierarchicalConfig{ // WorkingMemorySize: 20, // Recent messages // LongTermSize: 200, // Important messages // ArchiveSize: 2000, // All other messages // ImportanceThreshold: 0.5, // } // // mem := memory.NewHierarchicalMemory(config) // // // Messages with metadata can be marked as important // mem.AddMessage(ctx, &memory.Message{ // Role: "user", // Content: "Critical information", // Metadata: map[string]any{"importance": 0.9}, // }) // // # Memory Statistics // // All implementations provide statistics: // // stats, _ := mem.GetStats(ctx) // fmt.Printf("Total messages: %d\n", stats.TotalMessages) // fmt.Printf("Total tokens: %d\n", stats.TotalTokens) // fmt.Printf("Active tokens: %d\n", stats.ActiveTokens) // fmt.Printf("Compression rate: %.2f\n", stats.CompressionRate) // // # Integration with LangChain // // The package includes adapters for LangChain compatibility: // // // Convert to LangChain ChatMemory // langchainMem := memory.NewLangchainAdapter(mem) // // # Compression Strategies // // For long conversations, the package provides compression: // // compressor := memory.NewSemanticCompressor(embeddings, 0.3) // compressed := compressor.Compress(messages) // // # Retrieval-Augmented Memory // // Combine with vector storage for semantic retrieval: // // retriever := memory.NewRetrievalMemory( // vectorStore, // embeddingModel, // &memory.RetrievalConfig{ // TopK: 5, // MinSimilarity: 0.7, // ContextWindow: 4000, // }, // ) // // # Choosing a Strategy // // - Buffer: Simple conversations, fixed context size // - Sliding Window: Need some context continuity // - Summarization: Long conversations, need to preserve all information // - Hierarchical: Complex applications with different retention needs // - OS-Inspired: Performance-critical applications with access patterns // - Graph-Based: Semantic relationships between messages matter // - Retrieval: Need to find relevant messages based on content similarity // // # Thread Safety // // All memory implementations are thread-safe and can be used concurrently from multiple // goroutines. They use internal mutexes or atomic operations for synchronization. // // # Custom Memory Strategies // // Implement the Memory interface for custom strategies: // // type CustomMemory struct { // // Custom fields // } // // func (m *CustomMemory) AddMessage(ctx context.Context, msg *memory.Message) error { // // Custom implementation // return nil // } // // func (m *CustomMemory) GetContext(ctx context.Context, query string) ([]*memory.Message, error) { // // Custom retrieval logic // return nil, nil // } // // func (m *CustomMemory) Clear(ctx context.Context) error { // // Clear memory // return nil // } // // func (m *CustomMemory) GetStats(ctx context.Context) (*memory.Stats, error) { // // Return statistics // return nil, nil // } // // # Best Practices // // 1. Choose appropriate strategy based on your use case // 2. Monitor memory usage with GetStats() // 3. Set reasonable limits to prevent memory bloat // 4. Use metadata to mark important messages // 5. Consider token costs when using LLM-based summarization // 6. Test with realistic conversation lengths // 7. Clear memory between unrelated conversations package memory
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/sequential.go
memory/sequential.go
package memory import ( "context" "sync" ) // SequentialMemory implements the "Keep-It-All" strategy // Stores complete conversation history in chronological order // Pros: Perfect recall of all interactions // Cons: Token costs grow unbounded with conversation length type SequentialMemory struct { messages []*Message mu sync.RWMutex } // NewSequentialMemory creates a new sequential memory strategy func NewSequentialMemory() *SequentialMemory { return &SequentialMemory{ messages: make([]*Message, 0), } } // AddMessage appends a new message to the conversation history func (s *SequentialMemory) AddMessage(ctx context.Context, msg *Message) error { s.mu.Lock() defer s.mu.Unlock() s.messages = append(s.messages, msg) return nil } // GetContext returns all messages in chronological order // The query parameter is ignored for sequential memory func (s *SequentialMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { s.mu.RLock() defer s.mu.RUnlock() // Return a copy to prevent external modification result := make([]*Message, len(s.messages)) copy(result, s.messages) return result, nil } // Clear removes all messages from memory func (s *SequentialMemory) Clear(ctx context.Context) error { s.mu.Lock() defer s.mu.Unlock() s.messages = make([]*Message, 0) return nil } // GetStats returns statistics about the sequential memory func (s *SequentialMemory) GetStats(ctx context.Context) (*Stats, error) { s.mu.RLock() defer s.mu.RUnlock() totalTokens := 0 for _, msg := range s.messages { totalTokens += msg.TokenCount } return &Stats{ TotalMessages: len(s.messages), TotalTokens: totalTokens, ActiveMessages: len(s.messages), ActiveTokens: totalTokens, CompressionRate: 1.0, // No compression }, nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/sliding_window.go
memory/sliding_window.go
package memory import ( "context" "sync" ) // SlidingWindowMemory maintains only the most recent N messages // Pros: Bounded context size, prevents unbounded token growth // Cons: Loses older context, may forget important earlier information type SlidingWindowMemory struct { messages []*Message windowSize int // Maximum number of messages to retain mu sync.RWMutex } // NewSlidingWindowMemory creates a new sliding window memory strategy // windowSize determines how many recent messages to keep func NewSlidingWindowMemory(windowSize int) *SlidingWindowMemory { if windowSize <= 0 { windowSize = 10 // Default window size } return &SlidingWindowMemory{ messages: make([]*Message, 0, windowSize), windowSize: windowSize, } } // AddMessage adds a new message, removing oldest if window is full func (s *SlidingWindowMemory) AddMessage(ctx context.Context, msg *Message) error { s.mu.Lock() defer s.mu.Unlock() s.messages = append(s.messages, msg) // If we exceed window size, remove oldest messages if len(s.messages) > s.windowSize { // Keep only the most recent windowSize messages s.messages = s.messages[len(s.messages)-s.windowSize:] } return nil } // GetContext returns messages within the sliding window func (s *SlidingWindowMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { s.mu.RLock() defer s.mu.RUnlock() // Return a copy result := make([]*Message, len(s.messages)) copy(result, s.messages) return result, nil } // Clear removes all messages from memory func (s *SlidingWindowMemory) Clear(ctx context.Context) error { s.mu.Lock() defer s.mu.Unlock() s.messages = make([]*Message, 0, s.windowSize) return nil } // GetStats returns statistics about the sliding window memory func (s *SlidingWindowMemory) GetStats(ctx context.Context) (*Stats, error) { s.mu.RLock() defer s.mu.RUnlock() totalTokens := 0 for _, msg := range s.messages { totalTokens += msg.TokenCount } return &Stats{ TotalMessages: len(s.messages), TotalTokens: totalTokens, ActiveMessages: len(s.messages), ActiveTokens: totalTokens, CompressionRate: 1.0, }, nil } // SetWindowSize updates the window size // If the new size is smaller than current messages, oldest are removed func (s *SlidingWindowMemory) SetWindowSize(size int) { s.mu.Lock() defer s.mu.Unlock() if size <= 0 { size = 10 } s.windowSize = size // Trim if necessary if len(s.messages) > size { s.messages = s.messages[len(s.messages)-size:] } } // GetWindowSize returns the current window size func (s *SlidingWindowMemory) GetWindowSize() int { s.mu.RLock() defer s.mu.RUnlock() return s.windowSize }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/graph_based.go
memory/graph_based.go
package memory import ( "context" "fmt" "sync" ) // GraphNode represents a node in the conversation graph type GraphNode struct { Message *Message Connections []string // IDs of connected messages Weight float64 // Importance/relevance weight } // GraphBasedMemory models conversations as knowledge graphs // Pros: Captures relationships between topics, better context understanding // Cons: More complex, requires relationship tracking type GraphBasedMemory struct { nodes map[string]*GraphNode // Message ID -> Node topK int // Number of messages to retrieve mu sync.RWMutex relations map[string][]string // Topic/entity -> related message IDs // RelationExtractor identifies relationships between messages // In production, this could use NER or topic modeling RelationExtractor func(msg *Message) []string } // GraphConfig holds configuration for graph-based memory type GraphConfig struct { TopK int // Number of messages to retrieve RelationExtractor func(msg *Message) []string // Custom relation extractor } // NewGraphBasedMemory creates a new graph-based memory strategy func NewGraphBasedMemory(config *GraphConfig) *GraphBasedMemory { if config == nil { config = &GraphConfig{ TopK: 10, } } if config.TopK <= 0 { config.TopK = 10 } extractor := config.RelationExtractor if extractor == nil { extractor = defaultRelationExtractor } return &GraphBasedMemory{ nodes: make(map[string]*GraphNode), topK: config.TopK, relations: make(map[string][]string), RelationExtractor: extractor, } } // AddMessage adds a message to the graph and establishes connections func (g *GraphBasedMemory) AddMessage(ctx context.Context, msg *Message) error { g.mu.Lock() defer g.mu.Unlock() // Create node node := &GraphNode{ Message: msg, Connections: make([]string, 0), Weight: 1.0, } // Extract entities/topics from message topics := g.RelationExtractor(msg) // Store node g.nodes[msg.ID] = node // Build connections based on shared topics for _, topic := range topics { // Link to existing messages with same topic if relatedIDs, exists := g.relations[topic]; exists { for _, relatedID := range relatedIDs { // Create bidirectional connection node.Connections = append(node.Connections, relatedID) if relatedNode, ok := g.nodes[relatedID]; ok { relatedNode.Connections = append(relatedNode.Connections, msg.ID) } } } // Add this message to topic index g.relations[topic] = append(g.relations[topic], msg.ID) } return nil } // GetContext retrieves messages based on graph traversal // Uses breadth-first search starting from most recent messages func (g *GraphBasedMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { g.mu.RLock() defer g.mu.RUnlock() if len(g.nodes) == 0 { return []*Message{}, nil } // Extract topics from query queryTopics := g.RelationExtractor(&Message{Content: query}) // Find seed messages (related to query topics) seedIDs := make(map[string]bool) for _, topic := range queryTopics { if relatedIDs, exists := g.relations[topic]; exists { for _, id := range relatedIDs { seedIDs[id] = true } } } // If no seed messages, use most recent messages if len(seedIDs) == 0 { // Get most recent messages as seeds count := 0 for id := range g.nodes { seedIDs[id] = true count++ if count >= 3 { break } } } // BFS traversal to collect connected messages visited := make(map[string]bool) queue := make([]string, 0) result := make([]*Message, 0) // Add seeds to queue for id := range seedIDs { queue = append(queue, id) } // Traverse graph for len(queue) > 0 && len(result) < g.topK { currentID := queue[0] queue = queue[1:] if visited[currentID] { continue } visited[currentID] = true // Add message to result if node, ok := g.nodes[currentID]; ok { result = append(result, node.Message) // Add connected nodes to queue for _, connID := range node.Connections { if !visited[connID] { queue = append(queue, connID) } } } if len(result) >= g.topK { break } } return result, nil } // Clear removes all nodes and relationships func (g *GraphBasedMemory) Clear(ctx context.Context) error { g.mu.Lock() defer g.mu.Unlock() g.nodes = make(map[string]*GraphNode) g.relations = make(map[string][]string) return nil } // GetStats returns statistics about the graph func (g *GraphBasedMemory) GetStats(ctx context.Context) (*Stats, error) { g.mu.RLock() defer g.mu.RUnlock() totalTokens := 0 for _, node := range g.nodes { totalTokens += node.Message.TokenCount } activeTokens := 0 compressionRate := 0.0 if len(g.nodes) > 0 { activeTokens = totalTokens / len(g.nodes) * g.topK compressionRate = float64(g.topK) / float64(len(g.nodes)) } return &Stats{ TotalMessages: len(g.nodes), TotalTokens: totalTokens, ActiveMessages: g.topK, ActiveTokens: activeTokens, CompressionRate: compressionRate, }, nil } // GetRelationships returns all topics and their associated message counts func (g *GraphBasedMemory) GetRelationships() map[string]int { g.mu.RLock() defer g.mu.RUnlock() result := make(map[string]int) for topic, ids := range g.relations { result[topic] = len(ids) } return result } // defaultRelationExtractor extracts simple keywords as topics // In production, use NER, topic modeling, or entity extraction func defaultRelationExtractor(msg *Message) []string { // Simple keyword extraction (for demonstration) content := msg.Content // Common topics/keywords (very basic implementation) keywords := []string{"price", "feature", "bug", "question", "help", "error"} topics := make([]string, 0) for _, keyword := range keywords { if contains(content, keyword) { topics = append(topics, keyword) } } // If no keywords found, use role as topic if len(topics) == 0 { topics = append(topics, fmt.Sprintf("role:%s", msg.Role)) } return topics } // contains checks if text contains substring (case-insensitive) func contains(text, substr string) bool { // Simple case-insensitive check textLower := text substrLower := substr for i := 0; i < len(textLower); i++ { if textLower[i] >= 'A' && textLower[i] <= 'Z' { textLower = textLower[:i] + string(textLower[i]+32) + textLower[i+1:] } } for i := 0; i < len(substrLower); i++ { if substrLower[i] >= 'A' && substrLower[i] <= 'Z' { substrLower = substrLower[:i] + string(substrLower[i]+32) + substrLower[i+1:] } } // Check if substring exists for i := 0; i <= len(textLower)-len(substrLower); i++ { if textLower[i:i+len(substrLower)] == substrLower { return true } } return false }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/summarization.go
memory/summarization.go
package memory import ( "context" "fmt" "strings" "sync" ) // SummarizationMemory condenses older messages into summaries // Pros: Maintains historical context while reducing token count // Cons: May lose specific details in summarization type SummarizationMemory struct { recentMessages []*Message // Recent messages kept verbatim summaries []string // Condensed summaries of older conversations recentWindowSize int // How many recent messages to keep full summarizeAfter int // Summarize when recent messages exceed this mu sync.RWMutex // Summarizer is a function that takes messages and returns a summary // In production, this would call an LLM Summarizer func(ctx context.Context, messages []*Message) (string, error) } // SummarizationConfig holds configuration for summarization memory type SummarizationConfig struct { RecentWindowSize int // Number of recent messages to keep SummarizeAfter int // Trigger summarization after this many messages Summarizer func(ctx context.Context, messages []*Message) (string, error) // Custom summarizer } // NewSummarizationMemory creates a new summarization-based memory strategy func NewSummarizationMemory(config *SummarizationConfig) *SummarizationMemory { if config == nil { config = &SummarizationConfig{ RecentWindowSize: 10, SummarizeAfter: 20, } } if config.RecentWindowSize <= 0 { config.RecentWindowSize = 10 } if config.SummarizeAfter <= 0 { config.SummarizeAfter = 20 } summarizer := config.Summarizer if summarizer == nil { summarizer = defaultSummarizer } return &SummarizationMemory{ recentMessages: make([]*Message, 0), summaries: make([]string, 0), recentWindowSize: config.RecentWindowSize, summarizeAfter: config.SummarizeAfter, Summarizer: summarizer, } } // AddMessage adds a new message and triggers summarization if needed func (s *SummarizationMemory) AddMessage(ctx context.Context, msg *Message) error { s.mu.Lock() defer s.mu.Unlock() s.recentMessages = append(s.recentMessages, msg) // Check if we need to summarize if len(s.recentMessages) > s.summarizeAfter { return s.triggerSummarization(ctx) } return nil } // triggerSummarization creates a summary of older messages // Must be called with lock held func (s *SummarizationMemory) triggerSummarization(ctx context.Context) error { // Determine how many messages to summarize toSummarize := len(s.recentMessages) - s.recentWindowSize if toSummarize <= 0 { return nil } // Get messages to summarize messagesToSummarize := s.recentMessages[:toSummarize] // Generate summary summary, err := s.Summarizer(ctx, messagesToSummarize) if err != nil { return fmt.Errorf("summarization failed: %w", err) } // Store summary and keep only recent messages s.summaries = append(s.summaries, summary) s.recentMessages = s.recentMessages[toSummarize:] return nil } // GetContext returns summaries plus recent messages func (s *SummarizationMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { s.mu.RLock() defer s.mu.RUnlock() result := make([]*Message, 0) // Add summaries as system messages for i, summary := range s.summaries { summaryMsg := &Message{ ID: fmt.Sprintf("summary_%d", i), Role: "system", Content: fmt.Sprintf("[Summary of earlier conversation]: %s", summary), Timestamp: s.recentMessages[0].Timestamp, // Use first recent message timestamp TokenCount: estimateTokens(summary), } result = append(result, summaryMsg) } // Add recent messages result = append(result, s.recentMessages...) return result, nil } // Clear removes all messages and summaries func (s *SummarizationMemory) Clear(ctx context.Context) error { s.mu.Lock() defer s.mu.Unlock() s.recentMessages = make([]*Message, 0) s.summaries = make([]string, 0) return nil } // GetStats returns statistics about the summarization memory func (s *SummarizationMemory) GetStats(ctx context.Context) (*Stats, error) { s.mu.RLock() defer s.mu.RUnlock() // Calculate tokens in recent messages recentTokens := 0 for _, msg := range s.recentMessages { recentTokens += msg.TokenCount } // Calculate tokens in summaries summaryTokens := 0 for _, summary := range s.summaries { summaryTokens += estimateTokens(summary) } totalTokens := recentTokens + summaryTokens // Estimate compression rate // Assume each summary represents ~summarizeAfter messages estimatedOriginalTokens := len(s.summaries)*s.summarizeAfter*100 + recentTokens compressionRate := 1.0 if estimatedOriginalTokens > 0 { compressionRate = float64(totalTokens) / float64(estimatedOriginalTokens) } return &Stats{ TotalMessages: len(s.summaries) + len(s.recentMessages), TotalTokens: totalTokens, ActiveMessages: len(s.recentMessages), ActiveTokens: totalTokens, CompressionRate: compressionRate, }, nil } // defaultSummarizer provides a simple summarization function // In production, this should call an LLM func defaultSummarizer(ctx context.Context, messages []*Message) (string, error) { if len(messages) == 0 { return "", nil } // Simple concatenation with role prefixes var parts []string for _, msg := range messages { // Truncate long messages content := msg.Content if len(content) > 200 { content = content[:200] + "..." } parts = append(parts, fmt.Sprintf("%s: %s", msg.Role, content)) } summary := fmt.Sprintf("Conversation with %d exchanges covering: %s", len(messages), strings.Join(parts, "; ")) return summary, nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false
smallnest/langgraphgo
https://github.com/smallnest/langgraphgo/blob/600df7fe3e6254f2329f606732feaecfbd52d9f2/memory/compression.go
memory/compression.go
package memory import ( "context" "fmt" "sync" "time" ) // CompressionMemory periodically compresses and consolidates memory // Pros: Maintains long-term context efficiently, removes redundancy // Cons: Compression requires LLM calls, may lose granular details type CompressionMemory struct { messages []*Message compressedBlocks []*CompressedBlock compressionTrigger int // Compress after N messages consolidateAfter time.Duration // Consolidate blocks after duration lastConsolidation time.Time mu sync.RWMutex // Compressor compresses a group of messages into a single block Compressor func(ctx context.Context, messages []*Message) (*CompressedBlock, error) // Consolidator merges multiple compressed blocks Consolidator func(ctx context.Context, blocks []*CompressedBlock) (*CompressedBlock, error) } // CompressedBlock represents a compressed group of messages type CompressedBlock struct { ID string // Unique block ID Summary string // Compressed summary OriginalCount int // Number of original messages OriginalTokens int // Original token count CompressedTokens int // Compressed token count TimeRange TimeRange // Time range of messages Topics []string // Main topics covered } // TimeRange represents a time period type TimeRange struct { Start time.Time End time.Time } // CompressionConfig holds configuration for compression memory type CompressionConfig struct { CompressionTrigger int // Messages before compression ConsolidateAfter time.Duration // Duration before consolidation Compressor func(ctx context.Context, messages []*Message) (*CompressedBlock, error) Consolidator func(ctx context.Context, blocks []*CompressedBlock) (*CompressedBlock, error) } // NewCompressionMemory creates a new compression-based memory strategy func NewCompressionMemory(config *CompressionConfig) *CompressionMemory { if config == nil { config = &CompressionConfig{ CompressionTrigger: 20, ConsolidateAfter: time.Hour * 1, } } if config.CompressionTrigger <= 0 { config.CompressionTrigger = 20 } if config.ConsolidateAfter <= 0 { config.ConsolidateAfter = time.Hour } compressor := config.Compressor if compressor == nil { compressor = defaultCompressor } consolidator := config.Consolidator if consolidator == nil { consolidator = defaultConsolidator } return &CompressionMemory{ messages: make([]*Message, 0), compressedBlocks: make([]*CompressedBlock, 0), compressionTrigger: config.CompressionTrigger, consolidateAfter: config.ConsolidateAfter, lastConsolidation: time.Now(), Compressor: compressor, Consolidator: consolidator, } } // AddMessage adds a message and triggers compression if needed func (c *CompressionMemory) AddMessage(ctx context.Context, msg *Message) error { c.mu.Lock() defer c.mu.Unlock() c.messages = append(c.messages, msg) // Check if compression is needed if len(c.messages) >= c.compressionTrigger { if err := c.compress(ctx); err != nil { return fmt.Errorf("compression failed: %w", err) } } // Check if consolidation is needed if time.Since(c.lastConsolidation) >= c.consolidateAfter { if err := c.consolidate(ctx); err != nil { return fmt.Errorf("consolidation failed: %w", err) } } return nil } // compress compresses current messages into a block // Must be called with lock held func (c *CompressionMemory) compress(ctx context.Context) error { if len(c.messages) == 0 { return nil } // Compress messages block, err := c.Compressor(ctx, c.messages) if err != nil { return err } // Store compressed block c.compressedBlocks = append(c.compressedBlocks, block) // Clear messages c.messages = make([]*Message, 0) return nil } // consolidate merges old compressed blocks // Must be called with lock held func (c *CompressionMemory) consolidate(ctx context.Context) error { if len(c.compressedBlocks) < 2 { c.lastConsolidation = time.Now() return nil } // Consolidate older blocks (keep most recent separate) blocksToConsolidate := c.compressedBlocks[:len(c.compressedBlocks)-1] if len(blocksToConsolidate) > 0 { consolidated, err := c.Consolidator(ctx, blocksToConsolidate) if err != nil { return err } // Replace old blocks with consolidated one c.compressedBlocks = []*CompressedBlock{consolidated, c.compressedBlocks[len(c.compressedBlocks)-1]} } c.lastConsolidation = time.Now() return nil } // GetContext returns compressed blocks and recent messages func (c *CompressionMemory) GetContext(ctx context.Context, query string) ([]*Message, error) { c.mu.RLock() defer c.mu.RUnlock() result := make([]*Message, 0) // Add compressed blocks as system messages for i, block := range c.compressedBlocks { blockMsg := &Message{ ID: fmt.Sprintf("block_%d", i), Role: "system", Content: fmt.Sprintf("[Compressed Memory Block %d]: %s", i+1, block.Summary), Metadata: map[string]any{ "block_id": block.ID, "original_count": block.OriginalCount, "topics": block.Topics, }, TokenCount: block.CompressedTokens, Timestamp: block.TimeRange.End, } result = append(result, blockMsg) } // Add recent uncompressed messages result = append(result, c.messages...) return result, nil } // Clear removes all memory func (c *CompressionMemory) Clear(ctx context.Context) error { c.mu.Lock() defer c.mu.Unlock() c.messages = make([]*Message, 0) c.compressedBlocks = make([]*CompressedBlock, 0) c.lastConsolidation = time.Now() return nil } // GetStats returns compression statistics func (c *CompressionMemory) GetStats(ctx context.Context) (*Stats, error) { c.mu.RLock() defer c.mu.RUnlock() // Count original and compressed tokens originalTokens := 0 compressedTokens := 0 for _, block := range c.compressedBlocks { originalTokens += block.OriginalTokens compressedTokens += block.CompressedTokens } // Add current messages currentTokens := 0 for _, msg := range c.messages { currentTokens += msg.TokenCount } totalCompressed := compressedTokens + currentTokens totalOriginal := originalTokens + currentTokens compressionRate := 1.0 if totalOriginal > 0 { compressionRate = float64(totalCompressed) / float64(totalOriginal) } totalMessages := 0 for _, block := range c.compressedBlocks { totalMessages += block.OriginalCount } totalMessages += len(c.messages) return &Stats{ TotalMessages: totalMessages, TotalTokens: totalOriginal, ActiveMessages: len(c.compressedBlocks) + len(c.messages), ActiveTokens: totalCompressed, CompressionRate: compressionRate, }, nil } // ForceCompression manually triggers compression func (c *CompressionMemory) ForceCompression(ctx context.Context) error { c.mu.Lock() defer c.mu.Unlock() return c.compress(ctx) } // ForceConsolidation manually triggers consolidation func (c *CompressionMemory) ForceConsolidation(ctx context.Context) error { c.mu.Lock() defer c.mu.Unlock() return c.consolidate(ctx) } // defaultCompressor provides a simple compression function func defaultCompressor(ctx context.Context, messages []*Message) (*CompressedBlock, error) { if len(messages) == 0 { return nil, fmt.Errorf("no messages to compress") } // Calculate statistics originalTokens := 0 for _, msg := range messages { originalTokens += msg.TokenCount } // Create simple summary summary := fmt.Sprintf("Compressed %d messages from %s to %s", len(messages), messages[0].Timestamp.Format("15:04"), messages[len(messages)-1].Timestamp.Format("15:04")) // Estimate compressed tokens (roughly 1/3 of original) compressedTokens := originalTokens / 3 block := &CompressedBlock{ ID: generateID(), Summary: summary, OriginalCount: len(messages), OriginalTokens: originalTokens, CompressedTokens: compressedTokens, TimeRange: TimeRange{ Start: messages[0].Timestamp, End: messages[len(messages)-1].Timestamp, }, Topics: []string{"general"}, } return block, nil } // defaultConsolidator merges multiple blocks func defaultConsolidator(ctx context.Context, blocks []*CompressedBlock) (*CompressedBlock, error) { if len(blocks) == 0 { return nil, fmt.Errorf("no blocks to consolidate") } // Merge statistics totalOriginalCount := 0 totalOriginalTokens := 0 totalCompressedTokens := 0 allTopics := make(map[string]bool) var start, end time.Time for i, block := range blocks { totalOriginalCount += block.OriginalCount totalOriginalTokens += block.OriginalTokens totalCompressedTokens += block.CompressedTokens for _, topic := range block.Topics { allTopics[topic] = true } if i == 0 || block.TimeRange.Start.Before(start) { start = block.TimeRange.Start } if i == 0 || block.TimeRange.End.After(end) { end = block.TimeRange.End } } // Collect topics topics := make([]string, 0, len(allTopics)) for topic := range allTopics { topics = append(topics, topic) } // Further compress (estimate 2/3 of combined compressed size) finalCompressedTokens := (totalCompressedTokens * 2) / 3 consolidated := &CompressedBlock{ ID: generateID(), Summary: fmt.Sprintf("Consolidated %d blocks covering %d messages", len(blocks), totalOriginalCount), OriginalCount: totalOriginalCount, OriginalTokens: totalOriginalTokens, CompressedTokens: finalCompressedTokens, TimeRange: TimeRange{ Start: start, End: end, }, Topics: topics, } return consolidated, nil }
go
MIT
600df7fe3e6254f2329f606732feaecfbd52d9f2
2026-01-07T10:38:05.929544Z
false