|
|
|
|
|
|
|
|
""" |
|
|
This script tests the execution time of the DTLN model on a CPU. |
|
|
Please use TF 2.2 for comparability. |
|
|
|
|
|
Just run "python measure_execution_time.py" |
|
|
|
|
|
Author: Nils L. Westhausen (nils.westhausen@uol.de) |
|
|
Version: 13.05.2020 |
|
|
|
|
|
This code is licensed under the terms of the MIT-license. |
|
|
""" |
|
|
|
|
|
import time |
|
|
import tensorflow as tf |
|
|
import numpy as np |
|
|
import os |
|
|
|
|
|
|
|
|
os.environ["CUDA_VISIBLE_DEVICES"]='' |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
|
model = tf.saved_model.load('./pretrained_model/dtln_saved_model') |
|
|
|
|
|
infer = model.signatures["serving_default"] |
|
|
|
|
|
exec_time = [] |
|
|
|
|
|
x = np.random.randn(1,512).astype('float32') |
|
|
for idx in range(1010): |
|
|
|
|
|
start_time = time.time() |
|
|
|
|
|
y = infer(tf.constant(x))['conv1d_1'] |
|
|
exec_time.append((time.time() - start_time)) |
|
|
|
|
|
print('Execution time per block: ' + |
|
|
str( np.round(np.mean(np.stack(exec_time[10:]))*1000, 2)) + ' ms') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|