Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
|
@@ -37,19 +37,20 @@ More details on model performance across various devices, can be found
|
|
| 37 |
|
| 38 |
| Model | Device | Chipset | Target Runtime | Inference Time (ms) | Peak Memory Range (MB) | Precision | Primary Compute Unit | Target Model
|
| 39 |
|---|---|---|---|---|---|---|---|---|
|
| 40 |
-
| DDRNet23-Slim | Samsung Galaxy S23 | Snapdragon® 8 Gen 2 | TFLITE | 5.
|
| 41 |
-
| DDRNet23-Slim | Samsung Galaxy S23 | Snapdragon® 8 Gen 2 | ONNX | 7.
|
| 42 |
-
| DDRNet23-Slim | Samsung Galaxy S24 | Snapdragon® 8 Gen 3 | TFLITE | 3.
|
| 43 |
-
| DDRNet23-Slim | Samsung Galaxy S24 | Snapdragon® 8 Gen 3 | ONNX | 4.952 ms | 11 -
|
| 44 |
-
| DDRNet23-Slim | Snapdragon 8 Elite QRD | Snapdragon® 8 Elite | TFLITE |
|
| 45 |
-
| DDRNet23-Slim | Snapdragon 8 Elite QRD | Snapdragon® 8 Elite | ONNX | 4.
|
| 46 |
-
| DDRNet23-Slim | QCS8550 (Proxy) | QCS8550 Proxy | TFLITE | 5.
|
| 47 |
-
| DDRNet23-Slim |
|
| 48 |
-
| DDRNet23-Slim |
|
| 49 |
-
| DDRNet23-Slim |
|
| 50 |
-
| DDRNet23-Slim |
|
| 51 |
-
| DDRNet23-Slim |
|
| 52 |
-
| DDRNet23-Slim |
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
|
|
@@ -114,7 +115,7 @@ DDRNet23-Slim
|
|
| 114 |
Device : Samsung Galaxy S23 (13)
|
| 115 |
Runtime : TFLITE
|
| 116 |
Estimated inference time (ms) : 5.1
|
| 117 |
-
Estimated peak memory usage (MB): [
|
| 118 |
Total # Ops : 131
|
| 119 |
Compute Unit(s) : NPU (131 ops)
|
| 120 |
```
|
|
@@ -135,13 +136,29 @@ in memory using the `jit.trace` and then call the `submit_compile_job` API.
|
|
| 135 |
import torch
|
| 136 |
|
| 137 |
import qai_hub as hub
|
| 138 |
-
from qai_hub_models.models.ddrnet23_slim import
|
| 139 |
|
| 140 |
# Load the model
|
|
|
|
| 141 |
|
| 142 |
# Device
|
| 143 |
device = hub.Device("Samsung Galaxy S23")
|
| 144 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
```
|
| 147 |
|
|
|
|
| 37 |
|
| 38 |
| Model | Device | Chipset | Target Runtime | Inference Time (ms) | Peak Memory Range (MB) | Precision | Primary Compute Unit | Target Model
|
| 39 |
|---|---|---|---|---|---|---|---|---|
|
| 40 |
+
| DDRNet23-Slim | Samsung Galaxy S23 | Snapdragon® 8 Gen 2 | TFLITE | 5.087 ms | 1 - 20 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 41 |
+
| DDRNet23-Slim | Samsung Galaxy S23 | Snapdragon® 8 Gen 2 | ONNX | 7.462 ms | 9 - 25 MB | FP16 | NPU | [DDRNet23-Slim.onnx](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.onnx) |
|
| 42 |
+
| DDRNet23-Slim | Samsung Galaxy S24 | Snapdragon® 8 Gen 3 | TFLITE | 3.548 ms | 0 - 31 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 43 |
+
| DDRNet23-Slim | Samsung Galaxy S24 | Snapdragon® 8 Gen 3 | ONNX | 4.952 ms | 11 - 103 MB | FP16 | NPU | [DDRNet23-Slim.onnx](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.onnx) |
|
| 44 |
+
| DDRNet23-Slim | Snapdragon 8 Elite QRD | Snapdragon® 8 Elite | TFLITE | 2.854 ms | 0 - 28 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 45 |
+
| DDRNet23-Slim | Snapdragon 8 Elite QRD | Snapdragon® 8 Elite | ONNX | 4.11 ms | 11 - 58 MB | FP16 | NPU | [DDRNet23-Slim.onnx](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.onnx) |
|
| 46 |
+
| DDRNet23-Slim | QCS8550 (Proxy) | QCS8550 Proxy | TFLITE | 5.226 ms | 1 - 17 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 47 |
+
| DDRNet23-Slim | SA7255P ADP | SA7255P | TFLITE | 180.307 ms | 1 - 27 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 48 |
+
| DDRNet23-Slim | SA8255 (Proxy) | SA8255P Proxy | TFLITE | 5.107 ms | 1 - 19 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 49 |
+
| DDRNet23-Slim | SA8295P ADP | SA8295P | TFLITE | 8.933 ms | 1 - 22 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 50 |
+
| DDRNet23-Slim | SA8650 (Proxy) | SA8650P Proxy | TFLITE | 5.12 ms | 1 - 20 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 51 |
+
| DDRNet23-Slim | SA8775P ADP | SA8775P | TFLITE | 10.093 ms | 1 - 27 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 52 |
+
| DDRNet23-Slim | QCS8450 (Proxy) | QCS8450 Proxy | TFLITE | 7.54 ms | 1 - 26 MB | FP16 | NPU | [DDRNet23-Slim.tflite](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.tflite) |
|
| 53 |
+
| DDRNet23-Slim | Snapdragon X Elite CRD | Snapdragon® X Elite | ONNX | 8.348 ms | 9 - 9 MB | FP16 | NPU | [DDRNet23-Slim.onnx](https://huggingface.co/qualcomm/DDRNet23-Slim/blob/main/DDRNet23-Slim.onnx) |
|
| 54 |
|
| 55 |
|
| 56 |
|
|
|
|
| 115 |
Device : Samsung Galaxy S23 (13)
|
| 116 |
Runtime : TFLITE
|
| 117 |
Estimated inference time (ms) : 5.1
|
| 118 |
+
Estimated peak memory usage (MB): [1, 20]
|
| 119 |
Total # Ops : 131
|
| 120 |
Compute Unit(s) : NPU (131 ops)
|
| 121 |
```
|
|
|
|
| 136 |
import torch
|
| 137 |
|
| 138 |
import qai_hub as hub
|
| 139 |
+
from qai_hub_models.models.ddrnet23_slim import Model
|
| 140 |
|
| 141 |
# Load the model
|
| 142 |
+
torch_model = Model.from_pretrained()
|
| 143 |
|
| 144 |
# Device
|
| 145 |
device = hub.Device("Samsung Galaxy S23")
|
| 146 |
|
| 147 |
+
# Trace model
|
| 148 |
+
input_shape = torch_model.get_input_spec()
|
| 149 |
+
sample_inputs = torch_model.sample_inputs()
|
| 150 |
+
|
| 151 |
+
pt_model = torch.jit.trace(torch_model, [torch.tensor(data[0]) for _, data in sample_inputs.items()])
|
| 152 |
+
|
| 153 |
+
# Compile model on a specific device
|
| 154 |
+
compile_job = hub.submit_compile_job(
|
| 155 |
+
model=pt_model,
|
| 156 |
+
device=device,
|
| 157 |
+
input_specs=torch_model.get_input_spec(),
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
# Get target model to run on-device
|
| 161 |
+
target_model = compile_job.get_target_model()
|
| 162 |
|
| 163 |
```
|
| 164 |
|