Spaces:
Sleeping
Sleeping
File size: 8,862 Bytes
74de430 ab19502 74de430 ab19502 19dd95f ab19502 19dd95f ab19502 19dd95f ab19502 19dd95f ab19502 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
"""
Cache utilities for dashboard and other high-frequency data.
Uses cachetools for in-memory caching with TTL (Time To Live).
No external dependencies like Redis needed - perfect for HuggingFace Spaces.
"""
from cachetools import TTLCache
from threading import RLock
from typing import Optional, Any
import logging
logger = logging.getLogger(__name__)
# Thread-safe dashboard cache with 5-minute TTL
# Stores up to 1000 project dashboards in memory
dashboard_cache = TTLCache(maxsize=1000, ttl=300)
dashboard_cache_lock = RLock()
# Trends cache with 10-minute TTL (trends change less frequently)
trends_cache = TTLCache(maxsize=500, ttl=600)
trends_cache_lock = RLock()
# Project overview cache with 12-hour TTL (structure changes rarely)
# Stores project structure: regions, roles, subcontractors, team info
overview_cache = TTLCache(maxsize=500, ttl=43200) # 12 hours = 43200 seconds
overview_cache_lock = RLock()
def get_cached_dashboard(project_id: str, user_id: str) -> Optional[dict]:
"""
Get cached dashboard data for a project and user.
Args:
project_id: UUID of the project
user_id: UUID of the user
Returns:
Cached dashboard dict or None if not found/expired
"""
try:
with dashboard_cache_lock:
key = f"dashboard:{project_id}:{user_id}"
cached_data = dashboard_cache.get(key)
if cached_data:
logger.debug(f"Cache HIT: {key}")
return cached_data
except Exception as e:
logger.error(f"Error retrieving from cache: {e}")
return None
def set_cached_dashboard(project_id: str, user_id: str, data: dict) -> None:
"""
Cache dashboard data for a project and user.
Args:
project_id: UUID of the project
user_id: UUID of the user
data: Dashboard data to cache
"""
try:
with dashboard_cache_lock:
key = f"dashboard:{project_id}:{user_id}"
dashboard_cache[key] = data
logger.debug(f"Cache SET: {key}")
except Exception as e:
logger.error(f"Error setting cache: {e}")
def invalidate_dashboard_cache(project_id: str) -> None:
"""
Invalidate all cached dashboards for a project.
Called when project data changes (new ticket, sales order, etc.)
Args:
project_id: UUID of the project
"""
try:
with dashboard_cache_lock:
# Find all keys for this project
keys_to_delete = [
k for k in dashboard_cache.keys()
if k.startswith(f"dashboard:{project_id}:")
]
# Delete them
for key in keys_to_delete:
dashboard_cache.pop(key, None)
if keys_to_delete:
logger.info(f"Invalidated {len(keys_to_delete)} cache entries for project {project_id}")
except Exception as e:
logger.error(f"Error invalidating cache: {e}")
def get_cached_trends(project_id: str, metric: str, period: str) -> Optional[dict]:
"""
Get cached trend data.
Args:
project_id: UUID of the project
metric: Metric name (sales_orders, tickets, etc.)
period: Time period (7days, 30days, etc.)
Returns:
Cached trends dict or None if not found/expired
"""
try:
with trends_cache_lock:
key = f"trends:{project_id}:{metric}:{period}"
cached_data = trends_cache.get(key)
if cached_data:
logger.debug(f"Trends cache HIT: {key}")
return cached_data
except Exception as e:
logger.error(f"Error retrieving trends from cache: {e}")
return None
def set_cached_trends(project_id: str, metric: str, period: str, data: dict) -> None:
"""
Cache trend data.
Args:
project_id: UUID of the project
metric: Metric name
period: Time period
data: Trends data to cache
"""
try:
with trends_cache_lock:
key = f"trends:{project_id}:{metric}:{period}"
trends_cache[key] = data
logger.debug(f"Trends cache SET: {key}")
except Exception as e:
logger.error(f"Error setting trends cache: {e}")
def invalidate_trends_cache(project_id: str, metric: Optional[str] = None) -> None:
"""
Invalidate trend cache for a project.
Args:
project_id: UUID of the project
metric: Specific metric to invalidate, or None for all
"""
try:
with trends_cache_lock:
if metric:
# Invalidate specific metric
keys_to_delete = [
k for k in trends_cache.keys()
if k.startswith(f"trends:{project_id}:{metric}:")
]
else:
# Invalidate all trends for project
keys_to_delete = [
k for k in trends_cache.keys()
if k.startswith(f"trends:{project_id}:")
]
for key in keys_to_delete:
trends_cache.pop(key, None)
if keys_to_delete:
logger.info(f"Invalidated {len(keys_to_delete)} trends cache entries")
except Exception as e:
logger.error(f"Error invalidating trends cache: {e}")
def clear_all_caches() -> None:
"""
Clear all caches. Use with caution.
Typically only needed for testing or maintenance.
"""
try:
with dashboard_cache_lock:
dashboard_cache.clear()
logger.info("Cleared dashboard cache")
with trends_cache_lock:
trends_cache.clear()
logger.info("Cleared trends cache")
except Exception as e:
logger.error(f"Error clearing caches: {e}")
def get_cached_overview(project_id: str, user_id: str) -> Optional[dict]:
"""
Get cached project overview data.
Args:
project_id: UUID of the project
user_id: UUID of the user
Returns:
Cached overview dict or None if not found/expired
"""
try:
with overview_cache_lock:
key = f"overview:{project_id}:{user_id}"
cached_data = overview_cache.get(key)
if cached_data:
logger.debug(f"Overview cache HIT: {key}")
return cached_data
except Exception as e:
logger.error(f"Error retrieving overview from cache: {e}")
return None
def set_cached_overview(project_id: str, user_id: str, data: dict) -> None:
"""
Cache project overview data.
Args:
project_id: UUID of the project
user_id: UUID of the user
data: Overview data to cache
"""
try:
with overview_cache_lock:
key = f"overview:{project_id}:{user_id}"
overview_cache[key] = data
logger.debug(f"Overview cache SET: {key}")
except Exception as e:
logger.error(f"Error setting overview cache: {e}")
def invalidate_overview_cache(project_id: str) -> None:
"""
Invalidate all cached overviews for a project.
Called when project structure changes (regions, roles, team, subcontractors).
Args:
project_id: UUID of the project
"""
try:
with overview_cache_lock:
keys_to_delete = [
k for k in overview_cache.keys()
if k.startswith(f"overview:{project_id}:")
]
for key in keys_to_delete:
overview_cache.pop(key, None)
if keys_to_delete:
logger.info(f"Invalidated {len(keys_to_delete)} overview cache entries for project {project_id}")
except Exception as e:
logger.error(f"Error invalidating overview cache: {e}")
def get_cache_stats() -> dict:
"""
Get cache statistics for monitoring.
Returns:
Dict with cache size, hits, etc.
"""
try:
with dashboard_cache_lock, trends_cache_lock, overview_cache_lock:
return {
"dashboard_cache": {
"size": len(dashboard_cache),
"maxsize": dashboard_cache.maxsize,
"ttl": dashboard_cache.ttl
},
"trends_cache": {
"size": len(trends_cache),
"maxsize": trends_cache.maxsize,
"ttl": trends_cache.ttl
},
"overview_cache": {
"size": len(overview_cache),
"maxsize": overview_cache.maxsize,
"ttl": overview_cache.ttl
}
}
except Exception as e:
logger.error(f"Error getting cache stats: {e}")
return {}
|