File size: 2,962 Bytes
ae29340
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import warnings

warnings.simplefilter("ignore", FutureWarning)
warnings.simplefilter("ignore", UserWarning)

import torch
from torch.profiler import profile, ProfilerActivity
from model.model import Thundernet
from models_repo.model_attention import Thundernet as Thundernet_attention
from models_repo.model_attention_2 import Thundernet as Thundernet_attention2
from models_repo.model_ppm_factors import Thundernet as Thundernet_ppm

import time
import cv2
import numpy as np
import tensorflow as tf

device: torch.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Define input shape
input_shape = (480, 640, 3)


def execute_profiler(model: Thundernet) -> None:
    """
    Function to measure de CPU and CUDA times.
    It prints the results on the console
    Args:
    - model: loaded model to profile
    Returns:
    - None
    """
    image = torch.randn(1, 480, 640, 3).cpu().numpy()

    # model = torch.jit.trace(model, (image, depth))
    with profile(
        activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
        record_shapes=True,
        profile_memory=True,
    ) as prof:
        with torch.no_grad():
            _ = model.predict(image)

    print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=20))

    return


def measure_inference_time(model: Thundernet) -> None:
    """
    Function to measure the average inference time
    and the FPS of a given AsymFormer model.
    It prints the results on the console
    Args:
    - model: loaded model to profile
    Returns:
    - None
    """
    image = torch.randn(1, 480, 640, 3).cpu().numpy()

    for _ in range(5):
        _ = model.predict(image)

    times = []
    for _ in range(20):
        tf.constant(0).numpy()
        start = time.time()
        _ = model.predict(image)
        tf.constant(0).numpy()
        times.append((time.time() - start) * 1000)

    avg_time = sum(times) / len(times)
    print(f"Average inference time: {avg_time:.2f} ms")

    fps = 1000 / avg_time
    print(f"FPS: {fps:.2f}")
    return


def main() -> None:

    # PATH TO THE BEST MODEL SO FAR (.hdf5)
    # weights_path = "D:/RealTimeSemanticSegmentation_Sofia/keras.hdf5"
    # weights_path = "C:/Users/user/Documents/Thundernet/pruebas_modelos/32_ppm/BS4_lossBCE_weights_lr_0.00013713842558297858_reg-1.1743577101671763e-05-ep-13-val_loss0.11463435739278793-train_loss0.053004469722509384-val_iou0.8959722518920898-train_iou0.9606077075004578.hdf5"
    weights_path = "keras.hdf5"
    # Load the model. Change it depending on where it was trained
    # ThunderNet = Thundernet_ppm(input_shape=input_shape, resnet_trainable=False, n_classes = 2)
    ThunderNet = Thundernet(
        input_shape=input_shape, resnet_trainable=False, n_classes=2
    )
    model = ThunderNet.model
    ThunderNet.model.load_weights(weights_path)
    execute_profiler(model)
    measure_inference_time(model)
    return


if __name__ == "__main__":
    main()