|
|
package main |
|
|
|
|
|
import ( |
|
|
"math" |
|
|
"runtime" |
|
|
"sync" |
|
|
"sync/atomic" |
|
|
"time" |
|
|
|
|
|
"github.com/gin-gonic/gin" |
|
|
) |
|
|
|
|
|
|
|
|
type EndpointStats struct { |
|
|
Total int64 `json:"total"` |
|
|
Today int64 `json:"today"` |
|
|
Week int64 `json:"week"` |
|
|
Month int64 `json:"month"` |
|
|
} |
|
|
|
|
|
|
|
|
type Request struct { |
|
|
Endpoint string `json:"endpoint"` |
|
|
Timestamp int64 `json:"timestamp"` |
|
|
} |
|
|
|
|
|
|
|
|
type TimeWindow struct { |
|
|
mu sync.RWMutex |
|
|
counters map[string]*atomic.Int64 |
|
|
requests []Request |
|
|
lastCleanup time.Time |
|
|
} |
|
|
|
|
|
|
|
|
type Stats struct { |
|
|
mu sync.RWMutex |
|
|
Total int64 `json:"total"` |
|
|
Endpoints map[string]*EndpointStats `json:"endpoints"` |
|
|
timeWindow *TimeWindow |
|
|
lastUpdate time.Time |
|
|
} |
|
|
|
|
|
|
|
|
type PerformanceMetrics struct { |
|
|
mu sync.RWMutex |
|
|
RequestsPerSec float64 `json:"requests_per_sec"` |
|
|
AvgResponseTime int64 `json:"avg_response_time_ms"` |
|
|
ErrorRate float64 `json:"error_rate"` |
|
|
MemoryUsageMB float64 `json:"memory_usage_mb"` |
|
|
GoroutineCount int `json:"goroutine_count"` |
|
|
LastUpdated int64 `json:"last_updated"` |
|
|
} |
|
|
|
|
|
|
|
|
var ( |
|
|
stats *Stats |
|
|
perfMetrics *PerformanceMetrics |
|
|
requestCount int64 |
|
|
errorCount int64 |
|
|
responseTimeSum int64 |
|
|
responseTimeCount int64 |
|
|
lastQPSUpdate int64 |
|
|
lastRequestCount int64 |
|
|
) |
|
|
|
|
|
|
|
|
func initStats() { |
|
|
stats = &Stats{ |
|
|
Endpoints: make(map[string]*EndpointStats), |
|
|
timeWindow: &TimeWindow{ |
|
|
counters: make(map[string]*atomic.Int64), |
|
|
requests: make([]Request, 0, 1000), |
|
|
}, |
|
|
lastUpdate: time.Now(), |
|
|
} |
|
|
|
|
|
perfMetrics = &PerformanceMetrics{ |
|
|
LastUpdated: time.Now().UnixMilli(), |
|
|
} |
|
|
|
|
|
|
|
|
endpoints := []string{ |
|
|
"/openai", "/gemini", "/claude", "/xai", "/cohere", "/fireworks", |
|
|
"/groq", "/huggingface", "/meta", "/novita", "/openrouter", |
|
|
"/portkey", "/sophnet", "/telegram", "/together", "/cerebras", |
|
|
"/discord", "/gnothink", |
|
|
} |
|
|
|
|
|
for _, endpoint := range endpoints { |
|
|
stats.Endpoints[endpoint] = &EndpointStats{} |
|
|
stats.timeWindow.counters[endpoint] = &atomic.Int64{} |
|
|
} |
|
|
|
|
|
|
|
|
go func() { |
|
|
ticker := time.NewTicker(3 * time.Second) |
|
|
defer ticker.Stop() |
|
|
for range ticker.C { |
|
|
stats.updateSummaryStats() |
|
|
} |
|
|
}() |
|
|
|
|
|
|
|
|
go func() { |
|
|
ticker := time.NewTicker(5 * time.Second) |
|
|
defer ticker.Stop() |
|
|
for range ticker.C { |
|
|
updatePerformanceMetrics() |
|
|
} |
|
|
}() |
|
|
} |
|
|
|
|
|
|
|
|
func (s *Stats) recordRequest(endpoint string) { |
|
|
|
|
|
if counter, exists := s.timeWindow.counters[endpoint]; exists { |
|
|
counter.Add(1) |
|
|
} |
|
|
|
|
|
|
|
|
go func() { |
|
|
s.timeWindow.mu.Lock() |
|
|
defer s.timeWindow.mu.Unlock() |
|
|
|
|
|
|
|
|
s.timeWindow.requests = append(s.timeWindow.requests, Request{ |
|
|
Endpoint: endpoint, |
|
|
Timestamp: time.Now().Unix(), |
|
|
}) |
|
|
|
|
|
|
|
|
s.cleanupOldRequests() |
|
|
}() |
|
|
} |
|
|
|
|
|
|
|
|
func (s *Stats) cleanupOldRequests() { |
|
|
now := time.Now() |
|
|
if now.Sub(s.timeWindow.lastCleanup) < 5*time.Minute { |
|
|
return |
|
|
} |
|
|
|
|
|
cutoff := now.Add(-30 * 24 * time.Hour).Unix() |
|
|
var newRequests []Request |
|
|
|
|
|
for _, req := range s.timeWindow.requests { |
|
|
if req.Timestamp > cutoff { |
|
|
newRequests = append(newRequests, req) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if len(newRequests) > 500 { |
|
|
newRequests = newRequests[len(newRequests)-500:] |
|
|
} |
|
|
|
|
|
s.timeWindow.requests = newRequests |
|
|
s.timeWindow.lastCleanup = now |
|
|
} |
|
|
|
|
|
|
|
|
func (s *Stats) updateSummaryStats() { |
|
|
s.mu.Lock() |
|
|
defer s.mu.Unlock() |
|
|
|
|
|
now := time.Now() |
|
|
today := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Unix() |
|
|
weekAgo := now.AddDate(0, 0, -7).Unix() |
|
|
monthAgo := now.AddDate(0, -1, 0).Unix() |
|
|
|
|
|
|
|
|
for _, endpointStats := range s.Endpoints { |
|
|
atomic.StoreInt64(&endpointStats.Today, 0) |
|
|
atomic.StoreInt64(&endpointStats.Week, 0) |
|
|
atomic.StoreInt64(&endpointStats.Month, 0) |
|
|
} |
|
|
|
|
|
|
|
|
totalRequests := int64(0) |
|
|
for endpoint, counter := range s.timeWindow.counters { |
|
|
if endpointStats, exists := s.Endpoints[endpoint]; exists { |
|
|
total := counter.Load() |
|
|
atomic.StoreInt64(&endpointStats.Total, total) |
|
|
totalRequests += total |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
s.Total = totalRequests |
|
|
|
|
|
|
|
|
s.timeWindow.mu.RLock() |
|
|
for _, req := range s.timeWindow.requests { |
|
|
if endpointStats, exists := s.Endpoints[req.Endpoint]; exists { |
|
|
if req.Timestamp >= today { |
|
|
atomic.AddInt64(&endpointStats.Today, 1) |
|
|
} |
|
|
if req.Timestamp >= weekAgo { |
|
|
atomic.AddInt64(&endpointStats.Week, 1) |
|
|
} |
|
|
if req.Timestamp >= monthAgo { |
|
|
atomic.AddInt64(&endpointStats.Month, 1) |
|
|
} |
|
|
} |
|
|
} |
|
|
s.timeWindow.mu.RUnlock() |
|
|
|
|
|
s.lastUpdate = now |
|
|
} |
|
|
|
|
|
|
|
|
func (s *Stats) getStatsSnapshot() *Stats { |
|
|
s.mu.RLock() |
|
|
defer s.mu.RUnlock() |
|
|
|
|
|
snapshot := &Stats{ |
|
|
Total: s.Total, |
|
|
Endpoints: make(map[string]*EndpointStats), |
|
|
} |
|
|
|
|
|
for endpoint, endpointStats := range s.Endpoints { |
|
|
snapshot.Endpoints[endpoint] = &EndpointStats{ |
|
|
Total: atomic.LoadInt64(&endpointStats.Total), |
|
|
Today: atomic.LoadInt64(&endpointStats.Today), |
|
|
Week: atomic.LoadInt64(&endpointStats.Week), |
|
|
Month: atomic.LoadInt64(&endpointStats.Month), |
|
|
} |
|
|
} |
|
|
|
|
|
return snapshot |
|
|
} |
|
|
|
|
|
|
|
|
func updatePerformanceMetrics() { |
|
|
perfMetrics.mu.Lock() |
|
|
defer perfMetrics.mu.Unlock() |
|
|
|
|
|
now := time.Now() |
|
|
|
|
|
|
|
|
var m runtime.MemStats |
|
|
runtime.ReadMemStats(&m) |
|
|
|
|
|
perfMetrics.MemoryUsageMB = math.Round(float64(m.Alloc)/1024/1024*100) / 100 |
|
|
perfMetrics.GoroutineCount = runtime.NumGoroutine() |
|
|
perfMetrics.LastUpdated = now.UnixMilli() |
|
|
|
|
|
|
|
|
totalReqs := int64(0) |
|
|
stats.mu.RLock() |
|
|
for _, counter := range stats.timeWindow.counters { |
|
|
totalReqs += counter.Load() |
|
|
} |
|
|
stats.mu.RUnlock() |
|
|
|
|
|
|
|
|
currentTime := now.Unix() |
|
|
lastUpdate := atomic.LoadInt64(&lastQPSUpdate) |
|
|
currentRequests := atomic.LoadInt64(&requestCount) |
|
|
|
|
|
if lastUpdate == 0 { |
|
|
|
|
|
atomic.StoreInt64(&lastQPSUpdate, currentTime) |
|
|
atomic.StoreInt64(&lastRequestCount, currentRequests) |
|
|
perfMetrics.RequestsPerSec = 0.0 |
|
|
} else { |
|
|
timeDiff := currentTime - lastUpdate |
|
|
if timeDiff > 0 { |
|
|
|
|
|
lastReqs := atomic.LoadInt64(&lastRequestCount) |
|
|
requestDiff := currentRequests - lastReqs |
|
|
|
|
|
|
|
|
qps := float64(requestDiff) / float64(timeDiff) |
|
|
|
|
|
|
|
|
if perfMetrics.RequestsPerSec == 0 { |
|
|
perfMetrics.RequestsPerSec = qps |
|
|
} else { |
|
|
|
|
|
perfMetrics.RequestsPerSec = 0.3*qps + 0.7*perfMetrics.RequestsPerSec |
|
|
} |
|
|
|
|
|
|
|
|
perfMetrics.RequestsPerSec = math.Round(perfMetrics.RequestsPerSec*100) / 100 |
|
|
|
|
|
|
|
|
atomic.StoreInt64(&lastQPSUpdate, currentTime) |
|
|
atomic.StoreInt64(&lastRequestCount, currentRequests) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
totalErrors := atomic.LoadInt64(&errorCount) |
|
|
if totalReqs > 0 { |
|
|
errorRate := float64(totalErrors) / float64(totalReqs) * 100 |
|
|
|
|
|
perfMetrics.ErrorRate = math.Round(errorRate*100) / 100 |
|
|
} |
|
|
|
|
|
|
|
|
totalResponseTime := atomic.LoadInt64(&responseTimeSum) |
|
|
responseCount := atomic.LoadInt64(&responseTimeCount) |
|
|
if responseCount > 0 { |
|
|
perfMetrics.AvgResponseTime = totalResponseTime / responseCount |
|
|
|
|
|
if responseCount > 1000 { |
|
|
atomic.StoreInt64(&responseTimeSum, 0) |
|
|
atomic.StoreInt64(&responseTimeCount, 0) |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func handleStats(c *gin.Context) { |
|
|
c.Header("Access-Control-Allow-Origin", "*") |
|
|
c.Header("Access-Control-Allow-Methods", "GET, OPTIONS") |
|
|
c.Header("Access-Control-Allow-Headers", "Content-Type") |
|
|
|
|
|
if c.Request.Method == "OPTIONS" { |
|
|
c.Status(204) |
|
|
return |
|
|
} |
|
|
|
|
|
snapshot := stats.getStatsSnapshot() |
|
|
|
|
|
|
|
|
stats.timeWindow.mu.RLock() |
|
|
requests := make([]Request, len(stats.timeWindow.requests)) |
|
|
copy(requests, stats.timeWindow.requests) |
|
|
stats.timeWindow.mu.RUnlock() |
|
|
|
|
|
|
|
|
perfMetrics.mu.RLock() |
|
|
response := gin.H{ |
|
|
"total": snapshot.Total, |
|
|
"endpoints": snapshot.Endpoints, |
|
|
"requests": requests, |
|
|
"performance": gin.H{ |
|
|
"requests_per_sec": perfMetrics.RequestsPerSec, |
|
|
"avg_response_time_ms": perfMetrics.AvgResponseTime, |
|
|
"error_rate": perfMetrics.ErrorRate, |
|
|
"memory_usage_mb": perfMetrics.MemoryUsageMB, |
|
|
"goroutine_count": perfMetrics.GoroutineCount, |
|
|
"last_updated": perfMetrics.LastUpdated, |
|
|
}, |
|
|
} |
|
|
perfMetrics.mu.RUnlock() |
|
|
|
|
|
c.JSON(200, response) |
|
|
} |
|
|
|