Spaces:
Running
Running
update preview, dowscale options
Browse files- dist/assets/{index-CmWeD5AW.js → index-CLQ9w9OG.js} +0 -0
- dist/index.html +1 -1
- src/components/ComparePanel.vue +8 -7
- src/components/ControlPanel.vue +28 -1
- src/composables/useCompareStage.ts +1 -0
- src/composables/useOnnxInspector.ts +27 -7
- src/lib/onnxHelpers.ts +65 -14
- src/lib/types.ts +2 -1
dist/assets/{index-CmWeD5AW.js → index-CLQ9w9OG.js}
RENAMED
|
The diff for this file is too large to render.
See raw diff
|
|
|
dist/index.html
CHANGED
|
@@ -4,7 +4,7 @@
|
|
| 4 |
<meta charset="UTF-8" />
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 6 |
<title>ONNX in Browser</title>
|
| 7 |
-
<script type="module" crossorigin src="/assets/index-
|
| 8 |
<link rel="stylesheet" crossorigin href="/assets/index-D-MCzeoS.css">
|
| 9 |
</head>
|
| 10 |
<body>
|
|
|
|
| 4 |
<meta charset="UTF-8" />
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 6 |
<title>ONNX in Browser</title>
|
| 7 |
+
<script type="module" crossorigin src="/assets/index-CLQ9w9OG.js"></script>
|
| 8 |
<link rel="stylesheet" crossorigin href="/assets/index-D-MCzeoS.css">
|
| 9 |
</head>
|
| 10 |
<body>
|
src/components/ComparePanel.vue
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
<script setup lang="ts">
|
| 2 |
-
import { computed, ref } from "vue";
|
| 3 |
import { useCompareStage } from "../composables/useCompareStage";
|
| 4 |
import type { ResultInfo } from "../lib/types";
|
| 5 |
|
|
@@ -22,7 +22,7 @@ const emit = defineEmits<{
|
|
| 22 |
const compareStageRef = ref<HTMLElement | null>(null);
|
| 23 |
const hasBaseImage = computed(() => Boolean(props.baseImageUrl));
|
| 24 |
const hasOverlayImage = computed(() => Boolean(props.overlayImageUrl));
|
| 25 |
-
const { handleWheel, preventContextMenu, startCompareDrag, styleVars, zoomLabel } =
|
| 26 |
useCompareStage(compareStageRef, hasBaseImage, hasOverlayImage);
|
| 27 |
|
| 28 |
const progressFillWidth = computed(() => {
|
|
@@ -69,15 +69,16 @@ const compareStageStyle = computed(() => ({
|
|
| 69 |
background: checkerboardBackground,
|
| 70 |
}));
|
| 71 |
|
| 72 |
-
const baseImageStyle = {
|
| 73 |
transform: "scale(var(--zoom))",
|
| 74 |
transformOrigin: "var(--origin-x) var(--origin-y)",
|
| 75 |
-
|
|
|
|
| 76 |
|
| 77 |
-
const overlayImageStyle = {
|
| 78 |
-
...baseImageStyle,
|
| 79 |
clipPath: "inset(0 calc(100% - var(--split-adjusted)) 0 0)",
|
| 80 |
-
};
|
| 81 |
|
| 82 |
const dividerStyle = {
|
| 83 |
left: "calc(var(--split) - 1px)",
|
|
|
|
| 1 |
<script setup lang="ts">
|
| 2 |
+
import { computed, ref, type CSSProperties } from "vue";
|
| 3 |
import { useCompareStage } from "../composables/useCompareStage";
|
| 4 |
import type { ResultInfo } from "../lib/types";
|
| 5 |
|
|
|
|
| 22 |
const compareStageRef = ref<HTMLElement | null>(null);
|
| 23 |
const hasBaseImage = computed(() => Boolean(props.baseImageUrl));
|
| 24 |
const hasOverlayImage = computed(() => Boolean(props.overlayImageUrl));
|
| 25 |
+
const { handleWheel, preventContextMenu, startCompareDrag, styleVars, zoomLabel, zoomScale } =
|
| 26 |
useCompareStage(compareStageRef, hasBaseImage, hasOverlayImage);
|
| 27 |
|
| 28 |
const progressFillWidth = computed(() => {
|
|
|
|
| 69 |
background: checkerboardBackground,
|
| 70 |
}));
|
| 71 |
|
| 72 |
+
const baseImageStyle = computed<CSSProperties>(() => ({
|
| 73 |
transform: "scale(var(--zoom))",
|
| 74 |
transformOrigin: "var(--origin-x) var(--origin-y)",
|
| 75 |
+
imageRendering: zoomScale.value > 1 ? "pixelated" : "auto",
|
| 76 |
+
}));
|
| 77 |
|
| 78 |
+
const overlayImageStyle = computed<CSSProperties>(() => ({
|
| 79 |
+
...baseImageStyle.value,
|
| 80 |
clipPath: "inset(0 calc(100% - var(--split-adjusted)) 0 0)",
|
| 81 |
+
}));
|
| 82 |
|
| 83 |
const dividerStyle = {
|
| 84 |
left: "calc(var(--split) - 1px)",
|
src/components/ControlPanel.vue
CHANGED
|
@@ -274,6 +274,31 @@ function handleImageChange(event: Event) {
|
|
| 274 |
/>
|
| 275 |
</label>
|
| 276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
<label class="flex flex-col gap-1">
|
| 278 |
<span class="text-xs font-medium text-stone-600">Final scale</span>
|
| 279 |
<input
|
|
@@ -290,7 +315,7 @@ function handleImageChange(event: Event) {
|
|
| 290 |
</label>
|
| 291 |
|
| 292 |
<label class="flex flex-col gap-1">
|
| 293 |
-
<span class="text-xs font-medium text-stone-600">
|
| 294 |
<div class="relative">
|
| 295 |
<select
|
| 296 |
v-model="props.controls.outputScaleAlgorithm"
|
|
@@ -300,6 +325,7 @@ function handleImageChange(event: Event) {
|
|
| 300 |
<option value="lanczos">Lanczos</option>
|
| 301 |
<option value="area">Area</option>
|
| 302 |
<option value="bicubic">Bicubic</option>
|
|
|
|
| 303 |
</select>
|
| 304 |
<svg
|
| 305 |
class="pointer-events-none absolute right-3 top-1/2 h-4 w-4 -translate-y-1/2 text-stone-500"
|
|
@@ -310,6 +336,7 @@ function handleImageChange(event: Event) {
|
|
| 310 |
<path d="M4 6l4 4 4-4" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" />
|
| 311 |
</svg>
|
| 312 |
</div>
|
|
|
|
| 313 |
</label>
|
| 314 |
|
| 315 |
<details class="group">
|
|
|
|
| 274 |
/>
|
| 275 |
</label>
|
| 276 |
|
| 277 |
+
<label class="flex flex-col gap-1">
|
| 278 |
+
<span class="text-xs font-medium text-stone-600">Pre resize algorithm</span>
|
| 279 |
+
<div class="relative">
|
| 280 |
+
<select
|
| 281 |
+
v-model="props.controls.preResizeAlgorithm"
|
| 282 |
+
:disabled="controlsDisabled"
|
| 283 |
+
class="w-full appearance-none rounded-lg border border-stone-300 bg-white px-3 py-2 pr-11 text-sm text-stone-800 outline-none transition focus:border-stone-500 disabled:cursor-not-allowed disabled:opacity-60"
|
| 284 |
+
>
|
| 285 |
+
<option value="lanczos">Lanczos</option>
|
| 286 |
+
<option value="area">Area</option>
|
| 287 |
+
<option value="bicubic">Bicubic</option>
|
| 288 |
+
<option value="nearest">Nearest neighbor</option>
|
| 289 |
+
</select>
|
| 290 |
+
<svg
|
| 291 |
+
class="pointer-events-none absolute right-3 top-1/2 h-4 w-4 -translate-y-1/2 text-stone-500"
|
| 292 |
+
viewBox="0 0 16 16"
|
| 293 |
+
fill="none"
|
| 294 |
+
aria-hidden="true"
|
| 295 |
+
>
|
| 296 |
+
<path d="M4 6l4 4 4-4" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" />
|
| 297 |
+
</svg>
|
| 298 |
+
</div>
|
| 299 |
+
<p class="m-0 text-xs italic leading-snug text-stone-500">Used when resizing the source image to the model input size.</p>
|
| 300 |
+
</label>
|
| 301 |
+
|
| 302 |
<label class="flex flex-col gap-1">
|
| 303 |
<span class="text-xs font-medium text-stone-600">Final scale</span>
|
| 304 |
<input
|
|
|
|
| 315 |
</label>
|
| 316 |
|
| 317 |
<label class="flex flex-col gap-1">
|
| 318 |
+
<span class="text-xs font-medium text-stone-600">Post resize algorithm</span>
|
| 319 |
<div class="relative">
|
| 320 |
<select
|
| 321 |
v-model="props.controls.outputScaleAlgorithm"
|
|
|
|
| 325 |
<option value="lanczos">Lanczos</option>
|
| 326 |
<option value="area">Area</option>
|
| 327 |
<option value="bicubic">Bicubic</option>
|
| 328 |
+
<option value="nearest">Nearest neighbor</option>
|
| 329 |
</select>
|
| 330 |
<svg
|
| 331 |
class="pointer-events-none absolute right-3 top-1/2 h-4 w-4 -translate-y-1/2 text-stone-500"
|
|
|
|
| 336 |
<path d="M4 6l4 4 4-4" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" />
|
| 337 |
</svg>
|
| 338 |
</div>
|
| 339 |
+
<p class="m-0 text-xs italic leading-snug text-stone-500">Used only for the optional output resize after inference.</p>
|
| 340 |
</label>
|
| 341 |
|
| 342 |
<details class="group">
|
src/composables/useCompareStage.ts
CHANGED
|
@@ -160,6 +160,7 @@ export function useCompareStage(
|
|
| 160 |
|
| 161 |
return {
|
| 162 |
styleVars,
|
|
|
|
| 163 |
zoomLabel: computed(() => `${Math.round(state.zoom * 100)}%`),
|
| 164 |
startCompareDrag,
|
| 165 |
preventContextMenu,
|
|
|
|
| 160 |
|
| 161 |
return {
|
| 162 |
styleVars,
|
| 163 |
+
zoomScale: computed(() => state.zoom),
|
| 164 |
zoomLabel: computed(() => `${Math.round(state.zoom * 100)}%`),
|
| 165 |
startCompareDrag,
|
| 166 |
preventContextMenu,
|
src/composables/useOnnxInspector.ts
CHANGED
|
@@ -29,6 +29,7 @@ import {
|
|
| 29 |
resolveByteScale,
|
| 30 |
resolveTargetImageSize,
|
| 31 |
round,
|
|
|
|
| 32 |
safeStringify,
|
| 33 |
summarizeTensor,
|
| 34 |
} from "../lib/onnxHelpers";
|
|
@@ -61,6 +62,7 @@ const DEFAULT_CONTROLS: InspectorControls = {
|
|
| 61 |
width: "600",
|
| 62 |
height: "",
|
| 63 |
outputScale: "",
|
|
|
|
| 64 |
outputScaleAlgorithm: "lanczos",
|
| 65 |
optLevel: "all",
|
| 66 |
webgpuLayout: "NCHW",
|
|
@@ -321,6 +323,7 @@ export function useOnnxInspector() {
|
|
| 321 |
width: toStoredText(controls.width, DEFAULT_CONTROLS.width),
|
| 322 |
height: toStoredText(controls.height, DEFAULT_CONTROLS.height),
|
| 323 |
outputScale: toStoredText(controls.outputScale, DEFAULT_CONTROLS.outputScale),
|
|
|
|
| 324 |
outputScaleAlgorithm: controls.outputScaleAlgorithm,
|
| 325 |
optLevel: controls.optLevel,
|
| 326 |
webgpuLayout: controls.webgpuLayout,
|
|
@@ -370,7 +373,17 @@ export function useOnnxInspector() {
|
|
| 370 |
controls.height = toStoredText(parsed.height, DEFAULT_CONTROLS.height);
|
| 371 |
controls.outputScale = toStoredText(parsed.outputScale, DEFAULT_CONTROLS.outputScale);
|
| 372 |
|
| 373 |
-
if (
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 374 |
controls.outputScaleAlgorithm = parsed.outputScaleAlgorithm;
|
| 375 |
}
|
| 376 |
|
|
@@ -887,15 +900,21 @@ export function useOnnxInspector() {
|
|
| 887 |
|
| 888 |
await flushUi();
|
| 889 |
|
| 890 |
-
const
|
| 891 |
-
|
| 892 |
-
|
| 893 |
-
const
|
| 894 |
-
if (!
|
| 895 |
throw new Error("2D canvas context is unavailable.");
|
| 896 |
}
|
| 897 |
|
| 898 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 899 |
const nextUrl = await canvasToBlobUrl(canvas);
|
| 900 |
|
| 901 |
if (requestId !== modelInputPreviewRequestId) {
|
|
@@ -1726,6 +1745,7 @@ export function useOnnxInspector() {
|
|
| 1726 |
inferredFixedInputSize.value?.height ?? null,
|
| 1727 |
controls.width,
|
| 1728 |
controls.height,
|
|
|
|
| 1729 |
] as const,
|
| 1730 |
() => {
|
| 1731 |
if (!initialized.value) {
|
|
|
|
| 29 |
resolveByteScale,
|
| 30 |
resolveTargetImageSize,
|
| 31 |
round,
|
| 32 |
+
resizeCanvas,
|
| 33 |
safeStringify,
|
| 34 |
summarizeTensor,
|
| 35 |
} from "../lib/onnxHelpers";
|
|
|
|
| 62 |
width: "600",
|
| 63 |
height: "",
|
| 64 |
outputScale: "",
|
| 65 |
+
preResizeAlgorithm: "lanczos",
|
| 66 |
outputScaleAlgorithm: "lanczos",
|
| 67 |
optLevel: "all",
|
| 68 |
webgpuLayout: "NCHW",
|
|
|
|
| 323 |
width: toStoredText(controls.width, DEFAULT_CONTROLS.width),
|
| 324 |
height: toStoredText(controls.height, DEFAULT_CONTROLS.height),
|
| 325 |
outputScale: toStoredText(controls.outputScale, DEFAULT_CONTROLS.outputScale),
|
| 326 |
+
preResizeAlgorithm: controls.preResizeAlgorithm,
|
| 327 |
outputScaleAlgorithm: controls.outputScaleAlgorithm,
|
| 328 |
optLevel: controls.optLevel,
|
| 329 |
webgpuLayout: controls.webgpuLayout,
|
|
|
|
| 373 |
controls.height = toStoredText(parsed.height, DEFAULT_CONTROLS.height);
|
| 374 |
controls.outputScale = toStoredText(parsed.outputScale, DEFAULT_CONTROLS.outputScale);
|
| 375 |
|
| 376 |
+
if (
|
| 377 |
+
parsed.preResizeAlgorithm &&
|
| 378 |
+
["lanczos", "area", "bicubic", "nearest"].includes(parsed.preResizeAlgorithm)
|
| 379 |
+
) {
|
| 380 |
+
controls.preResizeAlgorithm = parsed.preResizeAlgorithm;
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
if (
|
| 384 |
+
parsed.outputScaleAlgorithm &&
|
| 385 |
+
["lanczos", "area", "bicubic", "nearest"].includes(parsed.outputScaleAlgorithm)
|
| 386 |
+
) {
|
| 387 |
controls.outputScaleAlgorithm = parsed.outputScaleAlgorithm;
|
| 388 |
}
|
| 389 |
|
|
|
|
| 900 |
|
| 901 |
await flushUi();
|
| 902 |
|
| 903 |
+
const sourceCanvas = document.createElement("canvas");
|
| 904 |
+
sourceCanvas.width = sourceDimensions.width;
|
| 905 |
+
sourceCanvas.height = sourceDimensions.height;
|
| 906 |
+
const sourceCtx = sourceCanvas.getContext("2d");
|
| 907 |
+
if (!sourceCtx) {
|
| 908 |
throw new Error("2D canvas context is unavailable.");
|
| 909 |
}
|
| 910 |
|
| 911 |
+
sourceCtx.drawImage(image, 0, 0, sourceDimensions.width, sourceDimensions.height);
|
| 912 |
+
const canvas = await resizeCanvas(
|
| 913 |
+
sourceCanvas,
|
| 914 |
+
targetSize.width,
|
| 915 |
+
targetSize.height,
|
| 916 |
+
controls.preResizeAlgorithm,
|
| 917 |
+
);
|
| 918 |
const nextUrl = await canvasToBlobUrl(canvas);
|
| 919 |
|
| 920 |
if (requestId !== modelInputPreviewRequestId) {
|
|
|
|
| 1745 |
inferredFixedInputSize.value?.height ?? null,
|
| 1746 |
controls.width,
|
| 1747 |
controls.height,
|
| 1748 |
+
controls.preResizeAlgorithm,
|
| 1749 |
] as const,
|
| 1750 |
() => {
|
| 1751 |
if (!initialized.value) {
|
src/lib/onnxHelpers.ts
CHANGED
|
@@ -496,6 +496,7 @@ export async function preprocessImage(
|
|
| 496 |
const paddedHeight = height + padding.padBottom;
|
| 497 |
const channelOrder = controls.channelOrder;
|
| 498 |
const normalize = controls.normalize;
|
|
|
|
| 499 |
const mean = parseCsvNumbers(controls.mean, 3);
|
| 500 |
const std = parseCsvNumbers(controls.std, 3).map((value) => value || 1);
|
| 501 |
const declaredTensorType = normalizeTensorType(metadata.type);
|
|
@@ -505,15 +506,21 @@ export async function preprocessImage(
|
|
| 505 |
throw new Error(`This inspector currently supports float32 image inputs only. Model expects ${tensorType}.`);
|
| 506 |
}
|
| 507 |
|
| 508 |
-
const
|
| 509 |
-
|
| 510 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 511 |
const ctx = canvas.getContext("2d", { willReadFrequently: true });
|
| 512 |
if (!ctx) {
|
| 513 |
throw new Error("2D canvas context is unavailable.");
|
| 514 |
}
|
| 515 |
|
| 516 |
-
ctx.drawImage(imageElement, 0, 0, width, height);
|
| 517 |
const pixels = ctx.getImageData(0, 0, width, height).data;
|
| 518 |
|
| 519 |
const channels = 3;
|
|
@@ -562,6 +569,7 @@ export async function preprocessImage(
|
|
| 562 |
targetImage: `${width}x${height}`,
|
| 563 |
tensorImage: `${paddedWidth}x${paddedHeight}`,
|
| 564 |
layout,
|
|
|
|
| 565 |
channelOrder,
|
| 566 |
normalize,
|
| 567 |
mean,
|
|
@@ -879,6 +887,23 @@ function buildAreaContributors(sourceSize: number, targetSize: number): Resample
|
|
| 879 |
});
|
| 880 |
}
|
| 881 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 882 |
function bicubicKernel(distance: number): number {
|
| 883 |
const a = -0.5;
|
| 884 |
const x = Math.abs(distance);
|
|
@@ -911,7 +936,7 @@ function lanczosKernel(distance: number, lobes: number): number {
|
|
| 911 |
function buildKernelContributors(
|
| 912 |
sourceSize: number,
|
| 913 |
targetSize: number,
|
| 914 |
-
algorithm: Exclude<DownscaleAlgorithm, "area">,
|
| 915 |
): ResampleContributor[] {
|
| 916 |
const scale = sourceSize / Math.max(1, targetSize);
|
| 917 |
const filterScale = Math.max(1, scale);
|
|
@@ -951,6 +976,10 @@ function buildResampleContributors(
|
|
| 951 |
targetSize: number,
|
| 952 |
algorithm: DownscaleAlgorithm,
|
| 953 |
): ResampleContributor[] {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 954 |
if (algorithm === "area") {
|
| 955 |
return buildAreaContributors(sourceSize, targetSize);
|
| 956 |
}
|
|
@@ -958,7 +987,7 @@ function buildResampleContributors(
|
|
| 958 |
return buildKernelContributors(sourceSize, targetSize, algorithm);
|
| 959 |
}
|
| 960 |
|
| 961 |
-
export async function
|
| 962 |
sourceCanvas: HTMLCanvasElement,
|
| 963 |
targetWidth: number,
|
| 964 |
targetHeight: number,
|
|
@@ -993,10 +1022,15 @@ export async function downscaleCanvas(
|
|
| 993 |
for (let weightIndex = 0; weightIndex < contributors.offsets.length; weightIndex += 1) {
|
| 994 |
const sourceOffset = sourceRowOffset + contributors.offsets[weightIndex] * 4;
|
| 995 |
const weight = contributors.weights[weightIndex];
|
| 996 |
-
|
| 997 |
-
|
| 998 |
-
|
| 999 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1000 |
}
|
| 1001 |
}
|
| 1002 |
|
|
@@ -1027,10 +1061,18 @@ export async function downscaleCanvas(
|
|
| 1027 |
alpha += temp[sourceOffset + 3] * weight;
|
| 1028 |
}
|
| 1029 |
|
| 1030 |
-
|
| 1031 |
-
|
| 1032 |
-
|
| 1033 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1034 |
}
|
| 1035 |
|
| 1036 |
if (shouldYield && y > 0 && y % 32 === 0) {
|
|
@@ -1050,6 +1092,15 @@ export async function downscaleCanvas(
|
|
| 1050 |
return outputCanvas;
|
| 1051 |
}
|
| 1052 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1053 |
export async function tensorToPreview(
|
| 1054 |
tensor: ort.Tensor | undefined,
|
| 1055 |
cropInfo: PreprocessResult["outputCrop"] | null = null,
|
|
|
|
| 496 |
const paddedHeight = height + padding.padBottom;
|
| 497 |
const channelOrder = controls.channelOrder;
|
| 498 |
const normalize = controls.normalize;
|
| 499 |
+
const resizeAlgorithm = controls.preResizeAlgorithm;
|
| 500 |
const mean = parseCsvNumbers(controls.mean, 3);
|
| 501 |
const std = parseCsvNumbers(controls.std, 3).map((value) => value || 1);
|
| 502 |
const declaredTensorType = normalizeTensorType(metadata.type);
|
|
|
|
| 506 |
throw new Error(`This inspector currently supports float32 image inputs only. Model expects ${tensorType}.`);
|
| 507 |
}
|
| 508 |
|
| 509 |
+
const sourceCanvas = document.createElement("canvas");
|
| 510 |
+
sourceCanvas.width = sourceDimensions.width;
|
| 511 |
+
sourceCanvas.height = sourceDimensions.height;
|
| 512 |
+
const sourceCtx = sourceCanvas.getContext("2d", { willReadFrequently: true });
|
| 513 |
+
if (!sourceCtx) {
|
| 514 |
+
throw new Error("2D canvas context is unavailable.");
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
sourceCtx.drawImage(imageElement, 0, 0, sourceDimensions.width, sourceDimensions.height);
|
| 518 |
+
const canvas = await resizeCanvas(sourceCanvas, width, height, resizeAlgorithm);
|
| 519 |
const ctx = canvas.getContext("2d", { willReadFrequently: true });
|
| 520 |
if (!ctx) {
|
| 521 |
throw new Error("2D canvas context is unavailable.");
|
| 522 |
}
|
| 523 |
|
|
|
|
| 524 |
const pixels = ctx.getImageData(0, 0, width, height).data;
|
| 525 |
|
| 526 |
const channels = 3;
|
|
|
|
| 569 |
targetImage: `${width}x${height}`,
|
| 570 |
tensorImage: `${paddedWidth}x${paddedHeight}`,
|
| 571 |
layout,
|
| 572 |
+
resizeAlgorithm,
|
| 573 |
channelOrder,
|
| 574 |
normalize,
|
| 575 |
mean,
|
|
|
|
| 887 |
});
|
| 888 |
}
|
| 889 |
|
| 890 |
+
function buildNearestNeighborContributors(
|
| 891 |
+
sourceSize: number,
|
| 892 |
+
targetSize: number,
|
| 893 |
+
): ResampleContributor[] {
|
| 894 |
+
const scale = sourceSize / Math.max(1, targetSize);
|
| 895 |
+
|
| 896 |
+
return Array.from({ length: targetSize }, (_, targetIndex) => {
|
| 897 |
+
const center = (targetIndex + 0.5) * scale - 0.5;
|
| 898 |
+
const sourceIndex = clamp(Math.round(center), 0, sourceSize - 1);
|
| 899 |
+
|
| 900 |
+
return {
|
| 901 |
+
offsets: [sourceIndex],
|
| 902 |
+
weights: [1],
|
| 903 |
+
};
|
| 904 |
+
});
|
| 905 |
+
}
|
| 906 |
+
|
| 907 |
function bicubicKernel(distance: number): number {
|
| 908 |
const a = -0.5;
|
| 909 |
const x = Math.abs(distance);
|
|
|
|
| 936 |
function buildKernelContributors(
|
| 937 |
sourceSize: number,
|
| 938 |
targetSize: number,
|
| 939 |
+
algorithm: Exclude<DownscaleAlgorithm, "area" | "nearest">,
|
| 940 |
): ResampleContributor[] {
|
| 941 |
const scale = sourceSize / Math.max(1, targetSize);
|
| 942 |
const filterScale = Math.max(1, scale);
|
|
|
|
| 976 |
targetSize: number,
|
| 977 |
algorithm: DownscaleAlgorithm,
|
| 978 |
): ResampleContributor[] {
|
| 979 |
+
if (algorithm === "nearest") {
|
| 980 |
+
return buildNearestNeighborContributors(sourceSize, targetSize);
|
| 981 |
+
}
|
| 982 |
+
|
| 983 |
if (algorithm === "area") {
|
| 984 |
return buildAreaContributors(sourceSize, targetSize);
|
| 985 |
}
|
|
|
|
| 987 |
return buildKernelContributors(sourceSize, targetSize, algorithm);
|
| 988 |
}
|
| 989 |
|
| 990 |
+
export async function resizeCanvas(
|
| 991 |
sourceCanvas: HTMLCanvasElement,
|
| 992 |
targetWidth: number,
|
| 993 |
targetHeight: number,
|
|
|
|
| 1022 |
for (let weightIndex = 0; weightIndex < contributors.offsets.length; weightIndex += 1) {
|
| 1023 |
const sourceOffset = sourceRowOffset + contributors.offsets[weightIndex] * 4;
|
| 1024 |
const weight = contributors.weights[weightIndex];
|
| 1025 |
+
const sourceAlpha = sourcePixels[sourceOffset + 3];
|
| 1026 |
+
const alphaWeight = (sourceAlpha / 255) * weight;
|
| 1027 |
+
|
| 1028 |
+
// Filter in premultiplied-alpha space so transparent edges do not bleed
|
| 1029 |
+
// dark or fully-saturated colors into neighboring pixels.
|
| 1030 |
+
temp[outputOffset] += sourcePixels[sourceOffset] * alphaWeight;
|
| 1031 |
+
temp[outputOffset + 1] += sourcePixels[sourceOffset + 1] * alphaWeight;
|
| 1032 |
+
temp[outputOffset + 2] += sourcePixels[sourceOffset + 2] * alphaWeight;
|
| 1033 |
+
temp[outputOffset + 3] += sourceAlpha * weight;
|
| 1034 |
}
|
| 1035 |
}
|
| 1036 |
|
|
|
|
| 1061 |
alpha += temp[sourceOffset + 3] * weight;
|
| 1062 |
}
|
| 1063 |
|
| 1064 |
+
const alphaByte = clamp(Math.round(alpha), 0, 255);
|
| 1065 |
+
if (alpha > 1e-8) {
|
| 1066 |
+
const unpremultiply = 255 / alpha;
|
| 1067 |
+
outputPixels[outputOffset] = clamp(Math.round(red * unpremultiply), 0, 255);
|
| 1068 |
+
outputPixels[outputOffset + 1] = clamp(Math.round(green * unpremultiply), 0, 255);
|
| 1069 |
+
outputPixels[outputOffset + 2] = clamp(Math.round(blue * unpremultiply), 0, 255);
|
| 1070 |
+
} else {
|
| 1071 |
+
outputPixels[outputOffset] = 0;
|
| 1072 |
+
outputPixels[outputOffset + 1] = 0;
|
| 1073 |
+
outputPixels[outputOffset + 2] = 0;
|
| 1074 |
+
}
|
| 1075 |
+
outputPixels[outputOffset + 3] = alphaByte;
|
| 1076 |
}
|
| 1077 |
|
| 1078 |
if (shouldYield && y > 0 && y % 32 === 0) {
|
|
|
|
| 1092 |
return outputCanvas;
|
| 1093 |
}
|
| 1094 |
|
| 1095 |
+
export async function downscaleCanvas(
|
| 1096 |
+
sourceCanvas: HTMLCanvasElement,
|
| 1097 |
+
targetWidth: number,
|
| 1098 |
+
targetHeight: number,
|
| 1099 |
+
algorithm: DownscaleAlgorithm,
|
| 1100 |
+
): Promise<HTMLCanvasElement> {
|
| 1101 |
+
return resizeCanvas(sourceCanvas, targetWidth, targetHeight, algorithm);
|
| 1102 |
+
}
|
| 1103 |
+
|
| 1104 |
export async function tensorToPreview(
|
| 1105 |
tensor: ort.Tensor | undefined,
|
| 1106 |
cropInfo: PreprocessResult["outputCrop"] | null = null,
|
src/lib/types.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import type * as ort from "onnxruntime-web";
|
| 2 |
|
| 3 |
-
export type DownscaleAlgorithm = "lanczos" | "area" | "bicubic";
|
| 4 |
|
| 5 |
export interface ModelConfigEntry {
|
| 6 |
id: string;
|
|
@@ -78,6 +78,7 @@ export interface InspectorControls {
|
|
| 78 |
width: string;
|
| 79 |
height: string;
|
| 80 |
outputScale: string;
|
|
|
|
| 81 |
outputScaleAlgorithm: DownscaleAlgorithm;
|
| 82 |
optLevel: "all" | "extended" | "basic" | "disabled";
|
| 83 |
webgpuLayout: "NCHW" | "NHWC";
|
|
|
|
| 1 |
import type * as ort from "onnxruntime-web";
|
| 2 |
|
| 3 |
+
export type DownscaleAlgorithm = "lanczos" | "area" | "bicubic" | "nearest";
|
| 4 |
|
| 5 |
export interface ModelConfigEntry {
|
| 6 |
id: string;
|
|
|
|
| 78 |
width: string;
|
| 79 |
height: string;
|
| 80 |
outputScale: string;
|
| 81 |
+
preResizeAlgorithm: DownscaleAlgorithm;
|
| 82 |
outputScaleAlgorithm: DownscaleAlgorithm;
|
| 83 |
optLevel: "all" | "extended" | "basic" | "disabled";
|
| 84 |
webgpuLayout: "NCHW" | "NHWC";
|