Spaces:
Sleeping
Sleeping
| """ | |
| Utility module for caching LLM responses | |
| """ | |
| import os | |
| import json | |
| import hashlib | |
| from typing import Dict, Any, Optional | |
| from config import CACHE_ENABLED, DEBUG_MODE | |
| # Cache directory | |
| CACHE_DIR = "cache" | |
| os.makedirs(CACHE_DIR, exist_ok=True) | |
| def generate_cache_key(prompt_type: str, params: Dict[str, Any]) -> str: | |
| """ | |
| Generate a cache key based on prompt type and parameters | |
| Args: | |
| prompt_type: Prompt type, such as 'decompose' or 'explain' | |
| params: Prompt parameters | |
| Returns: | |
| Cache key string | |
| """ | |
| # Convert parameters to a standardized JSON string | |
| params_str = json.dumps(params, sort_keys=True, ensure_ascii=False) | |
| # Calculate hash value | |
| hash_obj = hashlib.md5(f"{prompt_type}:{params_str}".encode('utf-8')) | |
| return hash_obj.hexdigest() | |
| def save_to_cache(cache_key: str, data: Dict[str, Any]) -> None: | |
| """ | |
| Save data to cache file | |
| Args: | |
| cache_key: Cache key | |
| data: Data to be cached | |
| """ | |
| cache_path = os.path.join(CACHE_DIR, f"{cache_key}.json") | |
| with open(cache_path, 'w', encoding='utf-8') as f: | |
| json.dump(data, f, ensure_ascii=False, indent=2) | |
| def load_from_cache(cache_key: str) -> Optional[Dict[str, Any]]: | |
| """ | |
| Load data from cache | |
| Args: | |
| cache_key: Cache key | |
| Returns: | |
| Cached data, or None if it doesn't exist | |
| """ | |
| cache_path = os.path.join(CACHE_DIR, f"{cache_key}.json") | |
| if not os.path.exists(cache_path): | |
| return None | |
| try: | |
| with open(cache_path, 'r', encoding='utf-8') as f: | |
| return json.load(f) | |
| except (json.JSONDecodeError, IOError): | |
| # Return None if file is corrupted or cannot be read | |
| return None | |
| def cached_llm_call(prompt_type: str, params: Dict[str, Any], call_function) -> Dict[str, Any]: | |
| """ | |
| Cache decorator for LLM calls | |
| Args: | |
| prompt_type: Prompt type | |
| params: Prompt parameters | |
| call_function: Actual function to call LLM | |
| Returns: | |
| LLM response or cached response | |
| """ | |
| # Generate cache key | |
| cache_key = generate_cache_key(prompt_type, params) | |
| # Try to load from cache | |
| cached_result = load_from_cache(cache_key) | |
| if cached_result: | |
| print(f"[Cache] Using cached response: {prompt_type}") | |
| return cached_result | |
| # If not cached, call LLM | |
| result = call_function(params) | |
| # Save to cache | |
| save_to_cache(cache_key, result) | |
| return result | |
| def get_from_cache(cache_key: str) -> Optional[str]: | |
| """ | |
| Get data directly from cache | |
| Args: | |
| cache_key: Cache key (can be a string) | |
| Returns: | |
| Cached data, or None if it doesn't exist | |
| """ | |
| if not CACHE_ENABLED: | |
| return None | |
| # Hash the cache key | |
| hash_obj = hashlib.md5(cache_key.encode('utf-8')) | |
| hashed_key = hash_obj.hexdigest() | |
| cache_path = os.path.join(CACHE_DIR, f"{hashed_key}.json") | |
| if not os.path.exists(cache_path): | |
| return None | |
| try: | |
| with open(cache_path, 'r', encoding='utf-8') as f: | |
| if DEBUG_MODE: | |
| print(f"Loading from cache: {cache_key[:30]}...") | |
| return f.read() | |
| except (IOError): | |
| # Return None if file cannot be read | |
| return None | |
| def save_to_cache(cache_key: str, data: str) -> None: | |
| """ | |
| Save data to cache file | |
| Args: | |
| cache_key: Cache key (can be a string) | |
| data: Data string to cache | |
| """ | |
| if not CACHE_ENABLED: | |
| return | |
| # Hash the cache key | |
| hash_obj = hashlib.md5(cache_key.encode('utf-8')) | |
| hashed_key = hash_obj.hexdigest() | |
| cache_path = os.path.join(CACHE_DIR, f"{hashed_key}.json") | |
| with open(cache_path, 'w', encoding='utf-8') as f: | |
| f.write(data) | |
| if DEBUG_MODE: | |
| print(f"Data cached: {cache_key[:30]}...") |