/** * Trace Overview Section Component * * Displays trace statistics, performance metrics, and cost analytics */ import React from "react"; import { Button } from "@/components/ui/button"; import { Card, CardContent } from "@/components/ui/card"; import { Tooltip, TooltipContent, TooltipTrigger, } from "@/components/ui/tooltip"; import { Activity, AlertCircle, Eye, HelpCircle } from "lucide-react"; import { Trace } from "@/types"; import { CompactMetadataCard, CompactMetadataCardAdd, type MetadataCardConfig, } from "@/components/shared/CompactMetadataCard"; interface TraceOverviewSectionProps { trace: Trace; metadataOutdated: boolean; currentMetadata: any; enhancedStats: any; enhancedStatsLoading: boolean; onOpenTraceContent: () => void; metadataCards: MetadataCardConfig[]; onOpenMetadataCardSelector: () => void; } export function TraceOverviewSection({ trace: _trace, metadataOutdated, currentMetadata, enhancedStats, enhancedStatsLoading, onOpenTraceContent, metadataCards, onOpenMetadataCardSelector, }: TraceOverviewSectionProps) { return (
{/* Trace Overview Section */}

Trace Overview

{/* View Content Button in header */}

View Trace Content

View and edit the raw trace data in a larger window.

{/* Subtitle */}

Performance metrics and trace analytics

{/* Metadata Cards Section */}
{metadataCards.map((cardConfig) => ( ))}
{/* Metadata Outdated Warning Banner */} {metadataOutdated && (

Trace Statistics May Be Outdated

The trace content has been modified since statistics were generated. Consider updating for accurate metrics.

)}
{/* Trace Statistics & Performance Section */}

Trace Statistics & Performance

Key metrics and performance data from the trace analysis

Tokens

Total input and output tokens used in LLM calls

{(() => { const tokens = enhancedStats?.tokens?.total_tokens || currentMetadata?.schema_analytics?.numerical_overview ?.token_analytics?.total_tokens; return tokens ? tokens.toLocaleString() : "N/A"; })()}

Prompts

Number of LLM prompt calls made during execution

{enhancedStats?.prompt_calls?.total_calls || currentMetadata?.schema_analytics?.prompt_analytics ?.prompt_calls_detected || "N/A"}

Tools & Functions

Number of tools, functions, or agent components used during execution

{enhancedStats?.components?.total_components || currentMetadata?.schema_analytics?.numerical_overview ?.component_stats?.total_components || "N/A"}

Success Rate

Percentage of successful component executions

{(() => { const successRate = enhancedStats?.components?.success_rate || currentMetadata?.schema_analytics?.numerical_overview ?.component_stats?.success_rate; return successRate !== undefined && successRate !== null ? `${successRate.toFixed(1)}%` : "N/A"; })()}

Execution Time

Total execution time for the trace

{(() => { const totalTimeMs = enhancedStats?.performance?.total_execution_time_ms || currentMetadata?.schema_analytics?.numerical_overview ?.timing_analytics?.total_execution_time_ms; if (totalTimeMs && totalTimeMs > 0) { if (totalTimeMs >= 1000) { return `${(totalTimeMs / 1000).toFixed(1)}s`; } else { return `${totalTimeMs}ms`; } } return "N/A"; })()}

Call Stack Depth

Maximum depth of nested function calls (how many levels deep the execution goes)

{enhancedStats?.components?.max_depth || currentMetadata?.schema_analytics?.numerical_overview ?.component_stats?.max_depth || "N/A"}

{/* Cost & Token Analytics Section */} {(enhancedStats?.cost || enhancedStats?.tokens) && (

Cost & Token Analytics

Cost breakdown and token usage analytics for LLM calls

{enhancedStats?.cost && (
{/* Left side: Model and Cost */}

{enhancedStats?.cost?.model_used || "gpt-5-mini"}

Avg:{" "} {enhancedStatsLoading ? "Loading..." : enhancedStats?.cost?.avg_cost_per_call_usd ? `$${enhancedStats.cost.avg_cost_per_call_usd.toFixed( 6 )}/call` : "$0.00/call"}

{/* Right side: Model Information */} {enhancedStats?.cost?.model_metadata && (
{enhancedStats.cost.model_metadata .litellm_provider && (
Provider: { enhancedStats.cost.model_metadata .litellm_provider }
)} {enhancedStats.cost.model_metadata .max_input_tokens && (
Max Input: {enhancedStats.cost.model_metadata.max_input_tokens.toLocaleString()}{" "} tokens
)} {enhancedStats.cost.model_metadata .max_output_tokens && (
Max Output: {enhancedStats.cost.model_metadata.max_output_tokens.toLocaleString()}{" "} tokens
)} {/* Capabilities */}
{enhancedStats.cost.model_metadata .supports_function_calling && ( Functions

Model supports function calling - can execute predefined functions and tools during conversations

)} {enhancedStats.cost.model_metadata .supports_vision && ( Vision

Model supports vision capabilities - can analyze and understand images, charts, and visual content

)} {enhancedStats.cost.model_metadata .supports_response_schema && ( Schema

Model supports structured output schemas - can return responses in predefined JSON formats

)} {enhancedStats.cost.model_metadata .supports_prompt_caching && ( Caching

Model supports prompt caching - can cache common prompts to reduce latency and costs

)}
)}
)}
{/* Total Tokens & Cost */}

Total Tokens

{enhancedStatsLoading ? "..." : enhancedStats?.tokens?.total_tokens?.toLocaleString() || "N/A"}

{enhancedStats?.cost && (

{enhancedStatsLoading ? "..." : enhancedStats?.cost?.total_cost_usd ? `$${enhancedStats.cost.total_cost_usd.toFixed( 6 )}` : "$0.00"}

)}
{/* Input Tokens & Cost */}

Input Tokens

{enhancedStatsLoading ? "..." : enhancedStats?.tokens?.total_prompt_tokens?.toLocaleString() || "N/A"}

{enhancedStats?.cost && (

{enhancedStatsLoading ? "..." : enhancedStats?.cost?.input_cost_usd ? `$${enhancedStats.cost.input_cost_usd.toFixed( 6 )}` : "$0.00"}

)}
{/* Output Tokens & Cost */}

Output Tokens

{enhancedStatsLoading ? "..." : enhancedStats?.tokens?.total_completion_tokens?.toLocaleString() || "N/A"}

{enhancedStats?.cost && (

{enhancedStatsLoading ? "..." : enhancedStats?.cost?.output_cost_usd ? `$${enhancedStats.cost.output_cost_usd.toFixed( 6 )}` : "$0.00"}

)}
)}
); }