File size: 2,034 Bytes
26d1a81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import time
import functools
from typing import Callable, Any, Dict

def time_function(func: Callable) -> Callable:
    """
    Decorator to time function execution
    """
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print(f"{func.__name__} took {end_time - start_time:.2f} seconds to execute")
        return result
    return wrapper

def evaluate_response(generated_response: str, ground_truth: str = None) -> Dict[str, Any]:
    """
    Basic evaluation of generated response
    """
    results = {
        "length": len(generated_response),
        "word_count": len(generated_response.split())
    }
    
    # If ground truth is provided, we could add metrics like BLEU or ROUGE
    if ground_truth:
        # Simplified evaluation - word overlap
        generated_words = set(generated_response.lower().split())
        ground_truth_words = set(ground_truth.lower().split())
        overlap = len(generated_words.intersection(ground_truth_words))
        results["word_overlap"] = overlap / len(ground_truth_words) if ground_truth_words else 0
    
    return results

def format_memory_stats():
    """
    Format memory usage statistics for display
    """
    import torch
    import psutil
    import os
    
    # System memory
    system_stats = {
        "RAM": f"{psutil.virtual_memory().used / (1024 ** 3):.1f}GB / {psutil.virtual_memory().total / (1024 ** 3):.1f}GB",
        "RAM Usage": f"{psutil.virtual_memory().percent}%",
    }
    
    # GPU memory if available
    if torch.cuda.is_available():
        gpu_stats = {}
        for i in range(torch.cuda.device_count()):
            gpu_stats[f"GPU {i}"] = f"{torch.cuda.get_device_name(i)}"
            gpu_stats[f"GPU {i} Memory"] = f"{torch.cuda.memory_allocated(i) / (1024 ** 3):.1f}GB / {torch.cuda.get_device_properties(i).total_memory / (1024 ** 3):.1f}GB"
        system_stats.update(gpu_stats)
    
    return system_stats