File size: 629 Bytes
beb7843 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
import torch
from thop import profile
def FLOPs_and_Params(model, img_size, len_clip, device):
# generate init video clip
video_clip = torch.randn(1, 3, len_clip, img_size, img_size).to(device)
# set eval mode
model.trainable = False
model.eval()
print('==============================')
flops, params = profile(model, inputs=(video_clip, ))
print('==============================')
print('FLOPs : {:.2f} G'.format(flops / 1e9))
print('Params : {:.2f} M'.format(params / 1e6))
# set train mode.
model.trainable = True
model.train()
if __name__ == "__main__":
pass
|