Kacemath's picture
feat: update with latest changes
47bba68
"""Performance metrics collection service."""
import time
import psutil
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Generator, Callable, Any, Tuple
from ..models.state import SearchMetrics
@dataclass
class MetricsCollector:
"""Collects performance metrics during search execution."""
def __init__(self):
self.start_time: float = 0
self.end_time: float = 0
self.start_memory: int = 0
self.end_memory: int = 0
self.peak_memory: int = 0
self.memory_samples: list = []
self.cpu_samples: list = []
self._process = psutil.Process()
def start(self) -> None:
"""Start collecting metrics."""
self.start_time = time.perf_counter()
self.start_memory = self._process.memory_info().rss
self.peak_memory = self.start_memory
self.memory_samples = [self.start_memory]
self.cpu_samples = []
# Initial CPU sample
self._process.cpu_percent()
def sample(self) -> None:
"""Take a sample of current metrics."""
current_memory = self._process.memory_info().rss
self.memory_samples.append(current_memory)
self.peak_memory = max(self.peak_memory, current_memory)
self.cpu_samples.append(self._process.cpu_percent())
def stop(self) -> None:
"""Stop collecting metrics."""
self.end_time = time.perf_counter()
self.end_memory = self._process.memory_info().rss
self.memory_samples.append(self.end_memory)
self.peak_memory = max(self.peak_memory, self.end_memory)
# Final CPU sample
self.cpu_samples.append(self._process.cpu_percent())
@property
def runtime_ms(self) -> float:
"""Get runtime in milliseconds."""
return (self.end_time - self.start_time) * 1000
@property
def memory_kb(self) -> float:
"""Get memory usage in KB (peak minus baseline)."""
if len(self.memory_samples) > 1:
# Use max sample minus start for more accurate peak measurement
max_sample = max(self.memory_samples)
return (max_sample - self.start_memory) / 1024
return (self.peak_memory - self.start_memory) / 1024
@property
def cpu_percent(self) -> float:
"""Get average CPU percentage."""
if not self.cpu_samples:
return 0.0
return sum(self.cpu_samples) / len(self.cpu_samples)
def to_metrics(
self, nodes_expanded: int, path_cost: float, path_length: int
) -> SearchMetrics:
"""Convert to SearchMetrics object."""
return SearchMetrics(
runtime_ms=self.runtime_ms,
memory_kb=max(0, self.memory_kb), # Ensure non-negative
cpu_percent=self.cpu_percent,
nodes_expanded=nodes_expanded,
path_cost=path_cost,
path_length=path_length,
)
@contextmanager
def measure_performance() -> Generator[MetricsCollector, None, None]:
"""
Context manager for measuring search performance.
Usage:
with measure_performance() as metrics:
result = search.solve(strategy)
print(f"Runtime: {metrics.runtime_ms}ms")
"""
collector = MetricsCollector()
collector.start()
try:
yield collector
finally:
collector.stop()
def run_with_metrics(
func: Callable[..., Any], *args, **kwargs
) -> Tuple[Any, MetricsCollector]:
"""
Run a function and collect performance metrics.
Args:
func: Function to run
*args: Positional arguments for func
**kwargs: Keyword arguments for func
Returns:
Tuple of (function result, MetricsCollector)
"""
collector = MetricsCollector()
collector.start()
try:
result = func(*args, **kwargs)
finally:
collector.stop()
return result, collector
def format_metrics(metrics: SearchMetrics) -> str:
"""Format metrics for display."""
return (
f"Runtime: {metrics.runtime_ms:.2f}ms | "
f"Memory: {metrics.memory_mb:.2f}MB | "
f"CPU: {metrics.cpu_percent:.1f}% | "
f"Nodes: {metrics.nodes_expanded} | "
f"Cost: {metrics.path_cost} | "
f"Path Length: {metrics.path_length}"
)