Spaces:
Sleeping
Sleeping
File size: 1,186 Bytes
64462d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 | """Callback utilities for agent execution."""
import inspect
from typing import Optional, Callable
from .models import ExecutionContext
from .llm import LlmRequest, LlmResponse
from .memory import count_tokens
def create_optimizer_callback(
apply_optimization: Callable,
threshold: int = 50000,
model_id: str = "gpt-4"
) -> Callable:
"""Factory function that creates a callback applying optimization strategy.
Args:
apply_optimization: Function that modifies the LlmRequest in place
threshold: Token count threshold to trigger optimization
model_id: Model identifier for token counting
Returns:
Callback function that can be used as before_llm_callback
"""
async def callback(
context: ExecutionContext,
request: LlmRequest
) -> Optional[LlmResponse]:
token_count = count_tokens(request, model_id=model_id)
if token_count < threshold:
return None
# Support both sync and async functions
result = apply_optimization(context, request)
if inspect.isawaitable(result):
await result
return None
return callback
|