More reliable graphs
Browse files
graphs.py
CHANGED
|
@@ -629,6 +629,23 @@ def results_table(df: pd.DataFrame):
|
|
| 629 |
st.dataframe(df, height=min((len(df) + 1) * 35, 35 * 21))
|
| 630 |
|
| 631 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 632 |
def device_funnel(df: pd.DataFrame) -> None:
|
| 633 |
"""
|
| 634 |
Show count of how many models compile, assemble, etc
|
|
@@ -650,26 +667,26 @@ def device_funnel(df: pd.DataFrame) -> None:
|
|
| 650 |
|
| 651 |
# Show Sankey graph with percentages
|
| 652 |
sk_val = {
|
| 653 |
-
"All models":
|
| 654 |
-
"Converts to ONNX":
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
+ "
|
| 663 |
-
"Acquires
|
| 664 |
-
+
|
| 665 |
-
+ "% (Nvidia)",
|
| 666 |
-
"Acquires Groq Perf": f"{summ.groq} models - "
|
| 667 |
-
+ str(int(100 * summ.groq / summ.all_models))
|
| 668 |
-
+ "% (Groq)",
|
| 669 |
-
"Acquires x86 Perf": f"{summ.x86} models - "
|
| 670 |
-
+ str(int(100 * summ.x86 / summ.all_models))
|
| 671 |
-
+ "% (x86)",
|
| 672 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 673 |
option = {
|
| 674 |
"series": {
|
| 675 |
"type": "sankey",
|
|
@@ -745,42 +762,32 @@ def device_funnel(df: pd.DataFrame) -> None:
|
|
| 745 |
{
|
| 746 |
"source": "All models",
|
| 747 |
"target": "Converts to ONNX",
|
| 748 |
-
"value": summ.all_models,
|
| 749 |
},
|
| 750 |
{
|
| 751 |
"source": "Converts to ONNX",
|
| 752 |
"target": "Optimizes ONNX file",
|
| 753 |
-
"value": summ.optimized_onnx,
|
| 754 |
},
|
| 755 |
{
|
| 756 |
"source": "Optimizes ONNX file",
|
| 757 |
"target": "Converts to FP16",
|
| 758 |
-
"value": summ.fp16_onnx,
|
| 759 |
},
|
| 760 |
{
|
| 761 |
"source": "Converts to FP16",
|
| 762 |
"target": "Acquires Nvidia Perf",
|
| 763 |
-
"value":
|
| 764 |
-
summ.nvidia
|
| 765 |
-
* summ.fp16_onnx
|
| 766 |
-
/ (summ.x86 + summ.nvidia + summ.groq)
|
| 767 |
-
),
|
| 768 |
},
|
| 769 |
{
|
| 770 |
"source": "Converts to FP16",
|
| 771 |
"target": "Acquires Groq Perf",
|
| 772 |
-
"value":
|
| 773 |
-
summ.groq
|
| 774 |
-
* summ.fp16_onnx
|
| 775 |
-
/ (summ.x86 + summ.nvidia + summ.groq)
|
| 776 |
-
),
|
| 777 |
},
|
| 778 |
{
|
| 779 |
"source": "Converts to FP16",
|
| 780 |
"target": "Acquires x86 Perf",
|
| 781 |
-
"value":
|
| 782 |
-
summ.x86 * summ.fp16_onnx / (summ.x86 + summ.nvidia + summ.groq)
|
| 783 |
-
),
|
| 784 |
},
|
| 785 |
],
|
| 786 |
}
|
|
|
|
| 629 |
st.dataframe(df, height=min((len(df) + 1) * 35, 35 * 21))
|
| 630 |
|
| 631 |
|
| 632 |
+
def device_funnel_metrics(num_models: int, num_total_models: int) -> str:
|
| 633 |
+
"""
|
| 634 |
+
Calculates the percentage between models and total_models
|
| 635 |
+
Avoids ZeroDivisionError when dividend is zero
|
| 636 |
+
"""
|
| 637 |
+
models_message = f"{num_models} model"
|
| 638 |
+
models_message = models_message + "s" if num_models != 1 else models_message
|
| 639 |
+
percentage_message = ""
|
| 640 |
+
if num_total_models > 0:
|
| 641 |
+
model_ratio = num_models / num_total_models
|
| 642 |
+
if model_ratio < 0.01 and model_ratio != 0:
|
| 643 |
+
percentage_message = " - < 1%"
|
| 644 |
+
else:
|
| 645 |
+
percentage_message = f" - {int(100*num_models / num_total_models)}%"
|
| 646 |
+
return f"{models_message}{percentage_message}"
|
| 647 |
+
|
| 648 |
+
|
| 649 |
def device_funnel(df: pd.DataFrame) -> None:
|
| 650 |
"""
|
| 651 |
Show count of how many models compile, assemble, etc
|
|
|
|
| 667 |
|
| 668 |
# Show Sankey graph with percentages
|
| 669 |
sk_val = {
|
| 670 |
+
"All models": device_funnel_metrics(summ.all_models, summ.all_models),
|
| 671 |
+
"Converts to ONNX": device_funnel_metrics(summ.base_onnx, summ.all_models),
|
| 672 |
+
"Optimizes ONNX file": device_funnel_metrics(
|
| 673 |
+
summ.optimized_onnx, summ.all_models
|
| 674 |
+
),
|
| 675 |
+
"Converts to FP16": device_funnel_metrics(summ.fp16_onnx, summ.all_models),
|
| 676 |
+
"Acquires Nvidia Perf": device_funnel_metrics(summ.nvidia, summ.all_models)
|
| 677 |
+
+ " (Nvidia)",
|
| 678 |
+
"Acquires Groq Perf": device_funnel_metrics(summ.groq, summ.all_models)
|
| 679 |
+
+ " (Groq)",
|
| 680 |
+
"Acquires x86 Perf": device_funnel_metrics(summ.x86, summ.all_models)
|
| 681 |
+
+ " (x86)",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 682 |
}
|
| 683 |
+
|
| 684 |
+
# Calculate bar heights for each of the devices
|
| 685 |
+
# Bar height is proportional to the number of models benchmarked by each device
|
| 686 |
+
default_bar_size = 1
|
| 687 |
+
target_combined_height = max(default_bar_size, summ.fp16_onnx)
|
| 688 |
+
device_bar_size = target_combined_height / 3
|
| 689 |
+
|
| 690 |
option = {
|
| 691 |
"series": {
|
| 692 |
"type": "sankey",
|
|
|
|
| 762 |
{
|
| 763 |
"source": "All models",
|
| 764 |
"target": "Converts to ONNX",
|
| 765 |
+
"value": max(default_bar_size, summ.all_models),
|
| 766 |
},
|
| 767 |
{
|
| 768 |
"source": "Converts to ONNX",
|
| 769 |
"target": "Optimizes ONNX file",
|
| 770 |
+
"value": max(default_bar_size, summ.optimized_onnx),
|
| 771 |
},
|
| 772 |
{
|
| 773 |
"source": "Optimizes ONNX file",
|
| 774 |
"target": "Converts to FP16",
|
| 775 |
+
"value": max(default_bar_size, summ.fp16_onnx),
|
| 776 |
},
|
| 777 |
{
|
| 778 |
"source": "Converts to FP16",
|
| 779 |
"target": "Acquires Nvidia Perf",
|
| 780 |
+
"value": device_bar_size,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 781 |
},
|
| 782 |
{
|
| 783 |
"source": "Converts to FP16",
|
| 784 |
"target": "Acquires Groq Perf",
|
| 785 |
+
"value": device_bar_size,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 786 |
},
|
| 787 |
{
|
| 788 |
"source": "Converts to FP16",
|
| 789 |
"target": "Acquires x86 Perf",
|
| 790 |
+
"value": device_bar_size,
|
|
|
|
|
|
|
| 791 |
},
|
| 792 |
],
|
| 793 |
}
|