import os import argparse import sys import subprocess import psutil import os import collections import csv import numpy as np import pandas as pd import matplotlib from matplotlib import pyplot as plt import matplotlib.ticker as mticker import xlsxwriter import seaborn as sns from matplotlib.ticker import FormatStrFormatter from matplotlib.legend_handler import HandlerTuple from subprocess import Popen, PIPE from scipy.stats import gmean prefix = 'run_' parameter_super_list = ['super'] config_super_list = ['standard', 'async', 'uvm', 'uvm_prefetch', 'uvm_prefetch_async'] workload_super_list = ['gemm', 'lud'] darknet_super_list = ['yolov3'] def dict_to_list(input_dict): return_list = [] for elemement in input_dict: return_list.append(elemement) return return_list def addOptions(parser): parser.add_argument("-i", "--iterations", type=int, default=1, help="Number of iterations") parser.add_argument("-c", "--csv", type=str, default='output.xlsx', help="output trace log file") parser.add_argument("-f", "--figure", type=str, default='micro', help="output pdf file") def get_config_list(root_directory): config_list = [] for dict in os.listdir(root_directory): if os.path.isdir(os.path.join(root_directory, dict)) and dict in config_super_list: print(dict) config_list.append(dict) return config_list def get_workload_dict(root_directory, config_list): workload_list = [] workload_dict = dict() for config in config_list: config_dir = root_directory + '/' + config for root, directories, files in os.walk(config_dir, topdown=False): for dir in directories: if dir in workload_super_list: if dir not in workload_dict: workload_dict[dir] = dict() workload_dict[dir][config] = os.path.join(root, dir + '_perf') if dir not in workload_list: workload_list.append(dir) if dir == 'darknet': for root_darnet, directories_darknet, files_darknet in os.walk(config_dir + '/darknet_perf', topdown=False): for dir in directories_darknet: if dir in darknet_super_list: if dir not in workload_dict: workload_dict[dir] = dict() workload_dict[dir][config] = os.path.join(root_darnet, dir) if dir not in workload_list: workload_list.append(dir) return workload_list, workload_dict def get_run_cmd(bash_file): return_txt = '' text = open(bash_file, "r") # line = text[0] # print(line) for line in text: return_txt += line.rstrip() return return_txt def process_file(log_file, perf_list): result_dict = dict() result_map = dict() result_map['memory'] = [1] result_map['control'] = [2] result_map['int'] = [3] result_map['fp'] = [4, 5, 6] result_map['load'] = [7] result_map['load_hit'] = [8] result_map['store'] = [9] result_map['store_hit'] = [10] perf_list.append('l1tex__t_sectors_pipe_lsu_mem_global_op_ld_lookup_hit.sum') perf_list.append('l1tex__t_sectors_pipe_lsu_mem_global_op_st_lookup_hit.sum') perf_list.append('l1tex__t_sectors_pipe_lsu_mem_global_op_ld.sum') perf_list.append('l1tex__t_sectors_pipe_lsu_mem_global_op_st.sum') lines = [] print(log_file) text = open(log_file, "r") index = 0 for line in text: if "==PROF==" not in line: lines.append(line) text.close() os.remove(log_file) out = open(log_file, "w") for line in lines: out.write(line) out.close() content_dict = dict() content = pd.read_csv(log_file) # print(content) content = content[["Metric Name", "Metric Value"]].to_numpy() # print(content) for ele in content: if ele[0] in perf_list: if ele[0] not in content_dict: content_dict[ele[0]] = [] content_dict[ele[0]].append(int(ele[1].replace(',', ''))) # print(content_dict) for ele in result_map: result_dict[ele] = 0 for index in result_map[ele]: result_dict[ele] += sum(content_dict[perf_list[index]]) return_dict = dict() return_dict['memory'] = result_dict['memory'] return_dict['control'] = result_dict['control'] return_dict['int'] = result_dict['int'] return_dict['fp'] = result_dict['fp'] return_dict['load_miss_rate'] = (result_dict['load'] - result_dict['load_hit']) / result_dict['load'] return_dict['store_miss_rate'] = (result_dict['store'] - result_dict['store_hit']) / result_dict['store'] print(return_dict) return return_dict def process_results(workload_dict, workload_list, iterations, perf_list): result_dict = dict() for workload in workload_dict: if workload in workload_list: for config in workload_dict[workload]: if config in config_super_list: cur_dir = workload_dict[workload][config] for para in parameter_super_list: if para not in result_dict: result_dict[para] = dict() if workload not in result_dict[para]: result_dict[para][workload] = dict() # if config not in result_dict[para][workload]: result_dict[para][workload][config] = [] for i in range(0, iterations): log_file = cur_dir + '/' + para + '_' + str(i) + '.profile.csv' result_dict[para][workload][config].append(process_file(log_file, perf_list)) print(workload, config, para, i, result_dict[para][workload][config][i]) sorted(result_dict[para][workload]) sorted(result_dict[para]) return result_dict def export_csv(result_dict, config_list, iterations, sub_metric): workload_list = dict_to_list(result_dict['super']) super_avg_dict = dict() for workload in workload_list: super_avg_dict[workload] = dict() for c in range(0, len(config_list)): super_avg_dict[workload][config_list[c]] = dict() metric_list = dict_to_list(result_dict['super'][workload][config_list[c]][0]) for metric in metric_list: super_avg_dict[workload][config_list[c]][metric] = 0 super_avg_dict[workload][config_list[c]]['all'] = 0 for i in range(0, iterations): for metric in metric_list: super_avg_dict[workload][config_list[c]][metric] += result_dict['super'][workload][config_list[c]][i][metric] / iterations super_avg_dict[workload][config_list[c]]['all'] += result_dict['super'][workload][config_list[c]][i][metric] / iterations # for c in range(0, len(config_list)): # normarlized_all = super_avg_dict[workload][config_list[c]]['all'] / super_avg_dict[workload]['standard']['all'] # print(super_avg_dict[workload][config_list[c]]) # for metric in metric_list: # super_avg_dict[workload][config_list[c]][metric] = (super_avg_dict[workload][config_list[c]][metric] / super_avg_dict[workload][config_list[c]]['all']) * normarlized_all csv_list = [] for metric in sub_metric: profile_csv_file = 'super_profile_' + metric + '.csv' csv_list.append(profile_csv_file) out = open(profile_csv_file, "w") out.write('group,') for i in range(0, len(config_list)): out.write(config_list[i]) if i != len(config_list) - 1: out.write(',') else: out.write(os.linesep) for i in range(0, len(workload_list)): out.write(workload_list[i] + ',') for j in range(0, len(config_list)): out.write(str(super_avg_dict[workload_list[i]][config_list[j]][metric])) if j != len(config_list) - 1: out.write(',') else: out.write(os.linesep) out.close() return csv_list def normalize(arr, t_min, t_max): norm_arr = [] diff = t_max - t_min diff_arr = max(arr) - min(arr) for i in arr: temp = (((i - min(arr))*diff)/diff_arr) + t_min norm_arr.append(temp) return norm_arr def plot_results(csv_file, output_file): df = pd.read_csv(csv_file, index_col=0) group_list = [] for index in df.index: if index not in group_list: group_list.append(index) col_list = df.columns ngroups = len(group_list) x = np.arange(ngroups) nbars = len(col_list) width = (1 - 0.4) / (1.5 * nbars) # the width of the bars print(group_list) matplotlib.rcParams["hatch.linewidth"] = 2 # patterns = [ "|" , "/", "-", "", "x", "-", "\\", "+", "o", "O" ] # patterns = [ "|" , "/", "x", "*", ".", "-", "\\", "+", "o", "O" ] # patterns = ["//", "//", "//", "//", "//", "//", "//"] patterns = ["", "", "", "", "", ""] # color_tab = ['#D9D9D9', '#BFBFBF', '#A6A6A6', '#7F7F7F', '#7F7F7F', '#7F7F7F'] color_tab = ['#000000', '#0000ff', '#ff0000', '#ff6666', '#00ff00'] edge_color_tab = ['#000000', '#000000', '#000000', '#000000', '#000000', '#000000'] if "rate" in csv_file: fig, ax = plt.subplots(figsize=[5, 6]) else: fig, ax = plt.subplots(figsize=[5, 4]) rects = [] print(nbars) print(col_list) for i in range(0, nbars): # height_cum = np.array([0.0] * ngroups) height_total = np.array([1 for g in group_list]) # y coo height_curr = np.array([float(df[col_list[i]][g]) for g in group_list]) # y coo print(height_total) print(height_curr) rect_base = ax.bar(x - 0.3 + (3 * i + 1.5) * width / 2, # x coo height_curr / height_total, # y coo width, label=col_list[i], color=color_tab[i], edgecolor=edge_color_tab[i], linewidth=0.5 ) rects.append(rect_base) # height_cum += height_curr hdl_pair = [(rects[i]) for i in range(nbars)] ax.set_xticks(x) ax.set_xticklabels(group_list, rotation=0) # ax.legend() if "rate" in csv_file: ax.legend(loc='upper left', ncol=1, bbox_to_anchor=(0.3, 1.4), fontsize=14) else: ax.legend(fontsize=14) ax.set_yscale('log') # ax.yaxis.set_major_formatter(mticker.PercentFormatter(1.0)) plt.xticks(fontsize=15, rotation=15) plt.yticks(fontsize=15) plt.grid(axis='y') plt.xlabel("") if "rate" in csv_file: plt.ylabel("Miss rate", fontsize=15) else: plt.ylabel("Inst count", fontsize=15) plt.tight_layout() # plt.margins(x=0.01, y=0.01) plt.savefig(output_file, bbox_inches='tight') plt.close() def main(): parser = argparse.ArgumentParser() addOptions(parser) options = parser.parse_args() iterations = options.iterations output_csv_file = options.csv output_figure_file = options.figure perf_list = [] perf_list.append('smsp__inst_executed.sum') perf_list.append('smsp__sass_thread_inst_executed_op_memory_pred_on.sum') perf_list.append('smsp__sass_thread_inst_executed_op_control_pred_on.sum') perf_list.append('smsp__sass_thread_inst_executed_op_fp16_pred_on.sum') perf_list.append('smsp__sass_thread_inst_executed_op_fp32_pred_on.sum') perf_list.append('smsp__sass_thread_inst_executed_op_fp64_pred_on.sum') perf_list.append('smsp__sass_thread_inst_executed_op_integer_pred_on.sum') perf_list.append('l1tex__t_sectors_pipe_lsu_mem_global_op_ld.sum') perf_list.append('l1tex__t_sectors_pipe_lsu_mem_global_op_ld_lookup_hit.sum') perf_list.append('l1tex__t_sectors_pipe_lsu_mem_global_op_st.sum') perf_list.append('l1tex__t_sectors_pipe_lsu_mem_global_op_st_lookup_hit.sum') micro_root_directory = './micro/' real_root_directory = './realworld/' config_list = get_config_list(micro_root_directory) micro_workload_list, micro_workload_dict = get_workload_dict(micro_root_directory, config_list) real_workload_list, real_workload_dict = get_workload_dict(real_root_directory, config_list) workload_list = micro_workload_list + real_workload_list workload_dict = dict() for workload in workload_list: if workload in micro_workload_dict: workload_dict[workload] = micro_workload_dict[workload] else: workload_dict[workload] = real_workload_dict[workload] print(workload_dict) metric_list = ['memory', 'control', 'fp', 'int', 'load_miss_rate', 'store_miss_rate'] result_dict = process_results(workload_dict, workload_list, iterations, perf_list) csv_list = export_csv(result_dict, config_super_list, iterations, metric_list) for csv_file in csv_list: plot_results(csv_file, csv_file.replace(".csv", ".pdf")) if __name__ == '__main__': main()