| #!/usr/bin/env bash |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| set -euo pipefail |
|
|
| MODEL_NAME="${1:-yolo26n}" |
| PRECISION="${2:-FP16}" |
| PRECISION="$(echo "${PRECISION}" | tr '[:lower:]' '[:upper:]')" |
|
|
| if [[ "${PRECISION}" != "FP32" && "${PRECISION}" != "FP16" && "${PRECISION}" != "INT8" ]]; then |
| echo "ERROR: unsupported precision '${PRECISION}'. Choose FP32, FP16, or INT8." >&2 |
| exit 1 |
| fi |
|
|
| echo "--- Installing dependencies ---" |
| if [[ "${PRECISION}" == "INT8" ]]; then |
| pip install -qU "openvino>=2026.0.0" "nncf>=3.0.0" ultralytics |
| else |
| pip install -qU "openvino>=2026.0.0" ultralytics |
| fi |
|
|
| echo "--- Downloading sample test video ---" |
| if [[ ! -f test_video.mp4 ]]; then |
| wget -q -O test_video.mp4 \ |
| https://github.com/intel-iot-devkit/sample-videos/raw/master/people-detection.mp4 |
| echo "Downloaded: test_video.mp4" |
| else |
| echo "Already present: test_video.mp4" |
| fi |
|
|
| echo "--- Downloading sample test image ---" |
| if [[ ! -f test.jpg ]]; then |
| wget -q -O test.jpg https://ultralytics.com/images/bus.jpg |
| echo "Downloaded: test.jpg" |
| else |
| echo "Already present: test.jpg" |
| fi |
|
|
| if [[ "${PRECISION}" == "FP32" ]]; then |
| HALF_FLAG="False" |
| EXPORT_LABEL="FP32" |
| else |
| HALF_FLAG="True" |
| EXPORT_LABEL="FP16" |
| fi |
|
|
| echo "--- Exporting ${MODEL_NAME} to OpenVINO IR (${EXPORT_LABEL}) ---" |
| python3 -c " |
| from ultralytics import YOLO |
| |
| model = YOLO('${MODEL_NAME}.pt') |
| model.export(format='openvino', half=${HALF_FLAG}, dynamic=False, imgsz=640) |
| print('Export complete: ${MODEL_NAME}_openvino_model/') |
| " |
|
|
| if [[ "${PRECISION}" == "INT8" ]]; then |
| echo "--- Quantizing to INT8 with NNCF ---" |
| python3 -c " |
| import nncf |
| import openvino as ov |
| import numpy as np |
| import cv2 |
| |
| core = ov.Core() |
| model = core.read_model('${MODEL_NAME}_openvino_model/${MODEL_NAME}.xml') |
| |
| # Extract frames from the sample video for calibration. |
| cap = cv2.VideoCapture('test_video.mp4') |
| frames = [] |
| while len(frames) < 300: |
| ret, frame = cap.read() |
| if not ret: |
| cap.set(cv2.CAP_PROP_POS_FRAMES, 0) |
| continue |
| img = cv2.resize(frame, (640, 640)) |
| img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0 |
| img = img.transpose(2, 0, 1)[np.newaxis, ...] |
| frames.append(img) |
| cap.release() |
| |
| def transform_fn(data_item): |
| return frames[data_item % len(frames)] |
| |
| calibration_dataset = nncf.Dataset(list(range(300)), transform_fn) |
| |
| quantized = nncf.quantize( |
| model, |
| calibration_dataset, |
| preset=nncf.QuantizationPreset.MIXED, |
| subset_size=300, |
| ) |
| |
| ov.save_model(quantized, '${MODEL_NAME}_tracking_int8.xml') |
| print('Quantization complete: ${MODEL_NAME}_tracking_int8.xml') |
| " |
| fi |
| echo "--- Done ---" |
|
|