faq-rag-chatbot / src /utils.py
Techbite's picture
initial commit
26d1a81
raw
history blame
2.03 kB
import time
import functools
from typing import Callable, Any, Dict
def time_function(func: Callable) -> Callable:
"""
Decorator to time function execution
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(f"{func.__name__} took {end_time - start_time:.2f} seconds to execute")
return result
return wrapper
def evaluate_response(generated_response: str, ground_truth: str = None) -> Dict[str, Any]:
"""
Basic evaluation of generated response
"""
results = {
"length": len(generated_response),
"word_count": len(generated_response.split())
}
# If ground truth is provided, we could add metrics like BLEU or ROUGE
if ground_truth:
# Simplified evaluation - word overlap
generated_words = set(generated_response.lower().split())
ground_truth_words = set(ground_truth.lower().split())
overlap = len(generated_words.intersection(ground_truth_words))
results["word_overlap"] = overlap / len(ground_truth_words) if ground_truth_words else 0
return results
def format_memory_stats():
"""
Format memory usage statistics for display
"""
import torch
import psutil
import os
# System memory
system_stats = {
"RAM": f"{psutil.virtual_memory().used / (1024 ** 3):.1f}GB / {psutil.virtual_memory().total / (1024 ** 3):.1f}GB",
"RAM Usage": f"{psutil.virtual_memory().percent}%",
}
# GPU memory if available
if torch.cuda.is_available():
gpu_stats = {}
for i in range(torch.cuda.device_count()):
gpu_stats[f"GPU {i}"] = f"{torch.cuda.get_device_name(i)}"
gpu_stats[f"GPU {i} Memory"] = f"{torch.cuda.memory_allocated(i) / (1024 ** 3):.1f}GB / {torch.cuda.get_device_properties(i).total_memory / (1024 ** 3):.1f}GB"
system_stats.update(gpu_stats)
return system_stats