|
|
(function () { |
|
|
'use strict'; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class CameraManager { |
|
|
constructor() { |
|
|
this.currentStream = null; |
|
|
this.availableCameras = []; |
|
|
this.selectedCameraId = null; |
|
|
this.videoElement = null; |
|
|
this.preferredConstraints = { |
|
|
width: { |
|
|
ideal: 480, |
|
|
min: 240 |
|
|
}, |
|
|
height: { |
|
|
ideal: 640, |
|
|
min: 320 |
|
|
} |
|
|
}; |
|
|
this.fallbackConstraints = { |
|
|
width: { |
|
|
ideal: 640, |
|
|
min: 320 |
|
|
}, |
|
|
height: { |
|
|
ideal: 480, |
|
|
min: 240 |
|
|
} |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async initialize() { |
|
|
try { |
|
|
|
|
|
await this.requestCameraPermission(); |
|
|
|
|
|
|
|
|
await this.enumerateCameras(); |
|
|
console.log(`Found ${this.availableCameras.length} camera(s)`); |
|
|
} catch (error) { |
|
|
console.error('Failed to initialize camera manager:', error); |
|
|
throw new Error(`Camera initialization failed: ${error.message}`); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async requestCameraPermission() { |
|
|
try { |
|
|
|
|
|
const tempStream = await navigator.mediaDevices.getUserMedia({ |
|
|
video: true |
|
|
}); |
|
|
|
|
|
|
|
|
tempStream.getTracks().forEach(track => track.stop()); |
|
|
console.log('Camera permission granted'); |
|
|
} catch (error) { |
|
|
throw new Error('Camera permission denied or not available'); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async enumerateCameras() { |
|
|
try { |
|
|
const devices = await navigator.mediaDevices.enumerateDevices(); |
|
|
this.availableCameras = devices.filter(device => device.kind === 'videoinput').map((device, index) => ({ |
|
|
id: device.deviceId, |
|
|
label: device.label || `Camera ${index + 1}`, |
|
|
groupId: device.groupId |
|
|
})); |
|
|
if (this.availableCameras.length === 0) { |
|
|
throw new Error('No cameras found'); |
|
|
} |
|
|
|
|
|
|
|
|
this.selectedCameraId = this.availableCameras[0].id; |
|
|
} catch (error) { |
|
|
throw new Error(`Failed to enumerate cameras: ${error.message}`); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getAvailableCameras() { |
|
|
return [...this.availableCameras]; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async startCamera(videoElement) { |
|
|
let cameraId = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null; |
|
|
try { |
|
|
|
|
|
if (this.currentStream) { |
|
|
this.stopCamera(); |
|
|
} |
|
|
|
|
|
|
|
|
const targetCameraId = cameraId || this.selectedCameraId; |
|
|
this.selectedCameraId = targetCameraId; |
|
|
|
|
|
|
|
|
this.videoElement = videoElement; |
|
|
|
|
|
|
|
|
let stream = null; |
|
|
let usedConstraints = null; |
|
|
try { |
|
|
const constraints = { |
|
|
video: { |
|
|
deviceId: { |
|
|
exact: targetCameraId |
|
|
}, |
|
|
...this.preferredConstraints |
|
|
} |
|
|
}; |
|
|
console.log('Trying preferred resolution (480x640)...'); |
|
|
stream = await navigator.mediaDevices.getUserMedia(constraints); |
|
|
usedConstraints = this.preferredConstraints; |
|
|
} catch (error) { |
|
|
console.log('Preferred resolution failed, trying fallback (640x480)...'); |
|
|
const constraints = { |
|
|
video: { |
|
|
deviceId: { |
|
|
exact: targetCameraId |
|
|
}, |
|
|
...this.fallbackConstraints |
|
|
} |
|
|
}; |
|
|
stream = await navigator.mediaDevices.getUserMedia(constraints); |
|
|
usedConstraints = this.fallbackConstraints; |
|
|
} |
|
|
|
|
|
|
|
|
videoElement.srcObject = stream; |
|
|
this.currentStream = stream; |
|
|
|
|
|
|
|
|
await new Promise((resolve, reject) => { |
|
|
videoElement.addEventListener('loadedmetadata', resolve, { |
|
|
once: true |
|
|
}); |
|
|
videoElement.addEventListener('error', reject, { |
|
|
once: true |
|
|
}); |
|
|
|
|
|
|
|
|
setTimeout(() => reject(new Error('Video load timeout')), 10000); |
|
|
}); |
|
|
|
|
|
|
|
|
const videoWidth = videoElement.videoWidth; |
|
|
const videoHeight = videoElement.videoHeight; |
|
|
console.log(`Video stream started: ${videoWidth}x${videoHeight}`); |
|
|
|
|
|
|
|
|
|
|
|
const shouldRotateDisplay = false; |
|
|
const isPortrait = videoHeight > videoWidth; |
|
|
|
|
|
|
|
|
const videoWrapper = videoElement.closest('.video-wrapper'); |
|
|
|
|
|
|
|
|
if (shouldRotateDisplay) ; else { |
|
|
videoElement.classList.remove('rotate-90ccw'); |
|
|
} |
|
|
|
|
|
|
|
|
if (videoWrapper) { |
|
|
if (isPortrait) { |
|
|
videoWrapper.classList.add('portrait'); |
|
|
console.log('Applied portrait aspect ratio (3:4)'); |
|
|
} else { |
|
|
videoWrapper.classList.remove('portrait'); |
|
|
} |
|
|
} |
|
|
return { |
|
|
width: videoWidth, |
|
|
height: videoHeight, |
|
|
rotated: shouldRotateDisplay, |
|
|
constraints: usedConstraints, |
|
|
cameraId: targetCameraId, |
|
|
cameraLabel: this.getCameraLabel(targetCameraId) |
|
|
}; |
|
|
} catch (error) { |
|
|
console.error('Failed to start camera:', error); |
|
|
throw new Error(`Camera start failed: ${error.message}`); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
stopCamera() { |
|
|
if (this.currentStream) { |
|
|
this.currentStream.getTracks().forEach(track => { |
|
|
track.stop(); |
|
|
console.log(`Stopped ${track.kind} track`); |
|
|
}); |
|
|
this.currentStream = null; |
|
|
} |
|
|
if (this.videoElement) { |
|
|
this.videoElement.srcObject = null; |
|
|
this.videoElement.classList.remove('rotate-90ccw'); |
|
|
|
|
|
|
|
|
const videoWrapper = this.videoElement.closest('.video-wrapper'); |
|
|
if (videoWrapper) { |
|
|
videoWrapper.classList.remove('portrait'); |
|
|
} |
|
|
} |
|
|
console.log('Camera stopped'); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async switchCamera(cameraId) { |
|
|
if (!this.videoElement) { |
|
|
throw new Error('No video element available'); |
|
|
} |
|
|
return await this.startCamera(this.videoElement, cameraId); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getCameraLabel(cameraId) { |
|
|
const camera = this.availableCameras.find(cam => cam.id === cameraId); |
|
|
return camera ? camera.label : 'Unknown Camera'; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getCurrentCameraInfo() { |
|
|
if (!this.selectedCameraId || !this.videoElement) { |
|
|
return null; |
|
|
} |
|
|
return { |
|
|
id: this.selectedCameraId, |
|
|
label: this.getCameraLabel(this.selectedCameraId), |
|
|
width: this.videoElement.videoWidth, |
|
|
height: this.videoElement.videoHeight, |
|
|
isRotated: this.videoElement.classList.contains('rotate-90ccw') |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
isActive() { |
|
|
return this.currentStream !== null && this.currentStream.getTracks().some(track => track.readyState === 'live'); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getVideoElement() { |
|
|
return this.videoElement; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
populateCameraSelect(selectElement) { |
|
|
|
|
|
selectElement.innerHTML = ''; |
|
|
if (this.availableCameras.length === 0) { |
|
|
const option = document.createElement('option'); |
|
|
option.value = ''; |
|
|
option.textContent = 'No cameras available'; |
|
|
option.disabled = true; |
|
|
selectElement.appendChild(option); |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
this.availableCameras.forEach(camera => { |
|
|
const option = document.createElement('option'); |
|
|
option.value = camera.id; |
|
|
option.textContent = camera.label; |
|
|
selectElement.appendChild(option); |
|
|
}); |
|
|
|
|
|
|
|
|
if (this.selectedCameraId) { |
|
|
selectElement.value = this.selectedCameraId; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
onDeviceChange(callback) { |
|
|
navigator.mediaDevices.addEventListener('devicechange', async () => { |
|
|
console.log('Camera devices changed'); |
|
|
try { |
|
|
await this.enumerateCameras(); |
|
|
callback(this.availableCameras); |
|
|
} catch (error) { |
|
|
console.error('Error handling device change:', error); |
|
|
callback([]); |
|
|
} |
|
|
}); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dispose() { |
|
|
this.stopCamera(); |
|
|
this.availableCameras = []; |
|
|
this.selectedCameraId = null; |
|
|
this.videoElement = null; |
|
|
console.log('Camera manager disposed'); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ImageUtils { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static createCanvas(width, height) { |
|
|
const canvas = document.createElement('canvas'); |
|
|
canvas.width = width; |
|
|
canvas.height = height; |
|
|
return canvas; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static rotateImage90CCW(imageData) { |
|
|
const { |
|
|
width, |
|
|
height, |
|
|
data |
|
|
} = imageData; |
|
|
const rotatedCanvas = this.createCanvas(height, width); |
|
|
const ctx = rotatedCanvas.getContext('2d'); |
|
|
|
|
|
|
|
|
const tempCanvas = this.createCanvas(width, height); |
|
|
const tempCtx = tempCanvas.getContext('2d'); |
|
|
tempCtx.putImageData(imageData, 0, 0); |
|
|
|
|
|
|
|
|
ctx.translate(height, 0); |
|
|
ctx.rotate(Math.PI / 2); |
|
|
ctx.drawImage(tempCanvas, 0, 0); |
|
|
return ctx.getImageData(0, 0, height, width); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static resizeImage(imageData, targetWidth, targetHeight) { |
|
|
const { |
|
|
width, |
|
|
height |
|
|
} = imageData; |
|
|
|
|
|
|
|
|
const sourceCanvas = this.createCanvas(width, height); |
|
|
const sourceCtx = sourceCanvas.getContext('2d'); |
|
|
sourceCtx.putImageData(imageData, 0, 0); |
|
|
|
|
|
|
|
|
const targetCanvas = this.createCanvas(targetWidth, targetHeight); |
|
|
const targetCtx = targetCanvas.getContext('2d'); |
|
|
|
|
|
|
|
|
targetCtx.drawImage(sourceCanvas, 0, 0, width, height, 0, 0, targetWidth, targetHeight); |
|
|
return targetCtx.getImageData(0, 0, targetWidth, targetHeight); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static getVideoFrame(video) { |
|
|
const canvas = this.createCanvas(video.videoWidth, video.videoHeight); |
|
|
const ctx = canvas.getContext('2d'); |
|
|
ctx.drawImage(video, 0, 0); |
|
|
return ctx.getImageData(0, 0, video.videoWidth, video.videoHeight); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static normalizeImageData(imageData) { |
|
|
const { |
|
|
width, |
|
|
height, |
|
|
data |
|
|
} = imageData; |
|
|
const normalizedData = new Float32Array(3 * width * height); |
|
|
|
|
|
|
|
|
const mean = [0.485, 0.456, 0.406]; |
|
|
const std = [0.229, 0.224, 0.225]; |
|
|
let pixelIndex = 0; |
|
|
for (let i = 0; i < data.length; i += 4) { |
|
|
|
|
|
const r = data[i] / 255.0; |
|
|
const g = data[i + 1] / 255.0; |
|
|
const b = data[i + 2] / 255.0; |
|
|
|
|
|
|
|
|
normalizedData[pixelIndex] = (r - mean[0]) / std[0]; |
|
|
normalizedData[pixelIndex + width * height] = (g - mean[1]) / std[1]; |
|
|
normalizedData[pixelIndex + 2 * width * height] = (b - mean[2]) / std[2]; |
|
|
|
|
|
pixelIndex++; |
|
|
} |
|
|
return normalizedData; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static preprocessVideoFrame(video, targetWidth, targetHeight) { |
|
|
try { |
|
|
|
|
|
let imageData = this.getVideoFrame(video); |
|
|
|
|
|
|
|
|
imageData = this.cropCenter3to4(imageData); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (imageData.width !== targetWidth || imageData.height !== targetHeight) { |
|
|
imageData = this.resizeImage(imageData, targetWidth, targetHeight); |
|
|
} |
|
|
|
|
|
|
|
|
return this.normalizeImageData(imageData); |
|
|
} catch (error) { |
|
|
console.error('Error preprocessing video frame:', error); |
|
|
throw error; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static createInputTensor(normalizedData, height, width) { |
|
|
return { |
|
|
data: normalizedData, |
|
|
dims: [1, 3, height, width], |
|
|
type: 'float32' |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static processModelOutput(modelOutput, height, width) { |
|
|
const mask = new Uint8Array(height * width); |
|
|
|
|
|
|
|
|
for (let i = 0; i < height * width; i++) { |
|
|
const backgroundLogit = modelOutput[i]; |
|
|
const cardLogit = modelOutput[i + height * width]; |
|
|
|
|
|
|
|
|
mask[i] = cardLogit > backgroundLogit ? 255 : 0; |
|
|
} |
|
|
return mask; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static drawSegmentationOverlay(mask, width, height, targetCanvas) { |
|
|
let shouldRotate = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : false; |
|
|
const ctx = targetCanvas.getContext('2d'); |
|
|
|
|
|
|
|
|
const maskCanvas = this.createCanvas(width, height); |
|
|
const maskCtx = maskCanvas.getContext('2d'); |
|
|
const maskImageData = maskCtx.createImageData(width, height); |
|
|
|
|
|
|
|
|
for (let i = 0; i < mask.length; i++) { |
|
|
const pixelIndex = i * 4; |
|
|
if (mask[i] > 0) { |
|
|
maskImageData.data[pixelIndex] = 0; |
|
|
maskImageData.data[pixelIndex + 1] = 255; |
|
|
maskImageData.data[pixelIndex + 2] = 255; |
|
|
maskImageData.data[pixelIndex + 3] = 128; |
|
|
} else { |
|
|
maskImageData.data[pixelIndex + 3] = 0; |
|
|
} |
|
|
} |
|
|
maskCtx.putImageData(maskImageData, 0, 0); |
|
|
|
|
|
|
|
|
ctx.clearRect(0, 0, targetCanvas.width, targetCanvas.height); |
|
|
shouldRotate = false; |
|
|
|
|
|
if (shouldRotate) { |
|
|
ctx.save(); |
|
|
ctx.translate(targetCanvas.width, 0); |
|
|
ctx.rotate(Math.PI / 2); |
|
|
ctx.drawImage(maskCanvas, 0, 0, height, width, 0, 0, targetCanvas.height, targetCanvas.width); |
|
|
ctx.restore(); |
|
|
} else { |
|
|
ctx.drawImage(maskCanvas, 0, 0, width, height, 0, 0, targetCanvas.width, targetCanvas.height); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static shouldRotateCamera(width, height) { |
|
|
return width > height; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static shouldRotateForModel(width, height) { |
|
|
return width === 640 && height === 480; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static cropCenter3to4(imageData) { |
|
|
const { |
|
|
width, |
|
|
height |
|
|
} = imageData; |
|
|
|
|
|
|
|
|
let cropWidth, cropHeight; |
|
|
|
|
|
|
|
|
if (width / height > 3 / 4) { |
|
|
|
|
|
cropHeight = height; |
|
|
cropWidth = Math.floor(height * 3 / 4); |
|
|
} else { |
|
|
|
|
|
cropWidth = width; |
|
|
cropHeight = Math.floor(width * 4 / 3); |
|
|
} |
|
|
|
|
|
|
|
|
const startX = Math.floor((width - cropWidth) / 2); |
|
|
const startY = Math.floor((height - cropHeight) / 2); |
|
|
|
|
|
|
|
|
const sourceCanvas = this.createCanvas(width, height); |
|
|
const sourceCtx = sourceCanvas.getContext('2d'); |
|
|
sourceCtx.putImageData(imageData, 0, 0); |
|
|
|
|
|
|
|
|
const targetCanvas = this.createCanvas(cropWidth, cropHeight); |
|
|
const targetCtx = targetCanvas.getContext('2d'); |
|
|
|
|
|
|
|
|
targetCtx.drawImage(sourceCanvas, startX, startY, cropWidth, cropHeight, |
|
|
|
|
|
0, 0, cropWidth, cropHeight |
|
|
); |
|
|
return targetCtx.getImageData(0, 0, cropWidth, cropHeight); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ModelInference { |
|
|
constructor() { |
|
|
this.session = null; |
|
|
this.isModelLoaded = false; |
|
|
this.isInferring = false; |
|
|
this.modelPath = 'models/card_segmentation_fp16.onnx'; |
|
|
|
|
|
|
|
|
this.inputHeight = 320; |
|
|
this.inputWidth = 240; |
|
|
this.numClasses = 2; |
|
|
|
|
|
|
|
|
this.accelerationInfo = { |
|
|
activeProvider: 'unknown', |
|
|
availableProviders: [], |
|
|
gpuInfo: null, |
|
|
supportsWebGL: false, |
|
|
supportsWebGPU: false |
|
|
}; |
|
|
|
|
|
|
|
|
this.inferenceStats = { |
|
|
totalInferences: 0, |
|
|
totalTime: 0, |
|
|
averageTime: 0, |
|
|
lastInferenceTime: 0 |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async initialize() { |
|
|
try { |
|
|
console.log('Initializing ONNX Runtime with GPU acceleration...'); |
|
|
|
|
|
|
|
|
ort.env.wasm.wasmPaths = 'https://cdn.jsdelivr.net/npm/onnxruntime-web@1.22.0/dist/'; |
|
|
ort.env.wasm.numThreads = 1; |
|
|
|
|
|
|
|
|
await this.detectGPUCapabilities(); |
|
|
|
|
|
|
|
|
const executionProviders = this.getOptimalExecutionProviders(); |
|
|
console.log('Attempting to load model with providers:', executionProviders); |
|
|
console.log(`Loading model from: ${this.modelPath}`); |
|
|
|
|
|
|
|
|
this.session = await this.loadModelWithFallback(executionProviders); |
|
|
console.log(`Model loaded successfully with provider: ${this.accelerationInfo.activeProvider}`); |
|
|
this.isModelLoaded = true; |
|
|
|
|
|
|
|
|
this.logModelInfo(); |
|
|
this.logAccelerationInfo(); |
|
|
return true; |
|
|
} catch (error) { |
|
|
console.error('Failed to initialize model:', error); |
|
|
this.isModelLoaded = false; |
|
|
throw new Error(`Model initialization failed: ${error.message}`); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async detectGPUCapabilities() { |
|
|
console.log('Detecting GPU capabilities...'); |
|
|
|
|
|
|
|
|
this.accelerationInfo.supportsWebGL = this.checkWebGLSupport(); |
|
|
|
|
|
|
|
|
this.accelerationInfo.supportsWebGPU = await this.checkWebGPUSupport(); |
|
|
|
|
|
|
|
|
if (this.accelerationInfo.supportsWebGL) { |
|
|
this.accelerationInfo.gpuInfo = this.getWebGLInfo(); |
|
|
} |
|
|
console.log('GPU Capabilities:', { |
|
|
WebGL: this.accelerationInfo.supportsWebGL, |
|
|
WebGPU: this.accelerationInfo.supportsWebGPU, |
|
|
GPU: this.accelerationInfo.gpuInfo |
|
|
}); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
checkWebGLSupport() { |
|
|
try { |
|
|
const canvas = document.createElement('canvas'); |
|
|
const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl'); |
|
|
return !!gl; |
|
|
} catch (error) { |
|
|
console.warn('WebGL check failed:', error); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async checkWebGPUSupport() { |
|
|
try { |
|
|
if (!navigator.gpu) { |
|
|
return false; |
|
|
} |
|
|
const adapter = await navigator.gpu.requestAdapter(); |
|
|
return !!adapter; |
|
|
} catch (error) { |
|
|
console.warn('WebGPU check failed:', error); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getWebGLInfo() { |
|
|
try { |
|
|
const canvas = document.createElement('canvas'); |
|
|
const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl'); |
|
|
if (!gl) return null; |
|
|
const debugInfo = gl.getExtension('WEBGL_debug_renderer_info'); |
|
|
return { |
|
|
vendor: debugInfo ? gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL) : gl.getParameter(gl.VENDOR), |
|
|
renderer: debugInfo ? gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL) : gl.getParameter(gl.RENDERER), |
|
|
version: gl.getParameter(gl.VERSION), |
|
|
shadingLanguageVersion: gl.getParameter(gl.SHADING_LANGUAGE_VERSION) |
|
|
}; |
|
|
} catch (error) { |
|
|
console.warn('Failed to get WebGL info:', error); |
|
|
return null; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getOptimalExecutionProviders() { |
|
|
const providers = []; |
|
|
|
|
|
|
|
|
if (this.accelerationInfo.supportsWebGPU) { |
|
|
providers.push('webgpu'); |
|
|
this.accelerationInfo.availableProviders.push('webgpu'); |
|
|
} |
|
|
|
|
|
|
|
|
if (this.accelerationInfo.supportsWebGL) { |
|
|
providers.push('webgl'); |
|
|
this.accelerationInfo.availableProviders.push('webgl'); |
|
|
} |
|
|
|
|
|
|
|
|
providers.push('wasm'); |
|
|
this.accelerationInfo.availableProviders.push('wasm'); |
|
|
return providers; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async loadModelWithFallback(executionProviders) { |
|
|
const baseOptions = { |
|
|
enableMemPattern: false, |
|
|
enableCpuMemArena: false, |
|
|
graphOptimizationLevel: 'all' |
|
|
}; |
|
|
|
|
|
|
|
|
for (const provider of executionProviders) { |
|
|
try { |
|
|
console.log(`Attempting to load model with ${provider} provider...`); |
|
|
const options = { |
|
|
...baseOptions, |
|
|
executionProviders: [provider] |
|
|
}; |
|
|
const session = await ort.InferenceSession.create(this.modelPath, options); |
|
|
this.accelerationInfo.activeProvider = provider; |
|
|
console.log(`✓ Successfully loaded with ${provider} provider`); |
|
|
return session; |
|
|
} catch (error) { |
|
|
const errorMsg = error.message; |
|
|
|
|
|
|
|
|
if (errorMsg.includes('cannot resolve operator')) { |
|
|
var _errorMsg$match; |
|
|
const operatorName = ((_errorMsg$match = errorMsg.match(/operator '(\w+)'/)) === null || _errorMsg$match === void 0 ? void 0 : _errorMsg$match[1]) || 'unknown'; |
|
|
console.warn(`✗ ${provider.toUpperCase()} provider doesn't support operator '${operatorName}' - falling back to next provider`); |
|
|
} else { |
|
|
console.warn(`✗ Failed to load with ${provider} provider:`, errorMsg); |
|
|
} |
|
|
|
|
|
|
|
|
this.accelerationInfo[`${provider}Error`] = errorMsg; |
|
|
continue; |
|
|
} |
|
|
} |
|
|
throw new Error('Failed to load model with any execution provider'); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logAccelerationInfo() { |
|
|
console.log('=== Acceleration Information ==='); |
|
|
console.log(`Active Provider: ${this.accelerationInfo.activeProvider}`); |
|
|
console.log(`Available Providers: ${this.accelerationInfo.availableProviders.join(', ')}`); |
|
|
console.log(`WebGL Support: ${this.accelerationInfo.supportsWebGL}`); |
|
|
console.log(`WebGPU Support: ${this.accelerationInfo.supportsWebGPU}`); |
|
|
if (this.accelerationInfo.gpuInfo) { |
|
|
console.log('GPU Information:'); |
|
|
console.log(` Vendor: ${this.accelerationInfo.gpuInfo.vendor}`); |
|
|
console.log(` Renderer: ${this.accelerationInfo.gpuInfo.renderer}`); |
|
|
console.log(` Version: ${this.accelerationInfo.gpuInfo.version}`); |
|
|
} |
|
|
console.log('==============================='); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getAccelerationInfo() { |
|
|
return { |
|
|
...this.accelerationInfo |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logModelInfo() { |
|
|
if (!this.session) return; |
|
|
console.log('=== Model Information ==='); |
|
|
console.log('Input tensors:'); |
|
|
this.session.inputNames.forEach((name, index) => { |
|
|
|
|
|
try { |
|
|
if (this.session.inputs && this.session.inputs[index]) { |
|
|
const input = this.session.inputs[index]; |
|
|
console.log(` ${name}: ${input.dims} (${input.type})`); |
|
|
} else { |
|
|
console.log(` ${name}: metadata not available`); |
|
|
} |
|
|
} catch (error) { |
|
|
console.log(` ${name}: metadata not available`); |
|
|
} |
|
|
}); |
|
|
console.log('Output tensors:'); |
|
|
this.session.outputNames.forEach((name, index) => { |
|
|
|
|
|
try { |
|
|
if (this.session.outputs && this.session.outputs[index]) { |
|
|
const output = this.session.outputs[index]; |
|
|
console.log(` ${name}: ${output.dims} (${output.type})`); |
|
|
} else { |
|
|
console.log(` ${name}: metadata not available`); |
|
|
} |
|
|
} catch (error) { |
|
|
console.log(` ${name}: metadata not available`); |
|
|
} |
|
|
}); |
|
|
console.log('========================'); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async runInference(inputData) { |
|
|
if (!this.isModelLoaded || !this.session) { |
|
|
throw new Error('Model not loaded. Call initialize() first.'); |
|
|
} |
|
|
if (this.isInferring) { |
|
|
console.warn('Inference already in progress, skipping...'); |
|
|
return null; |
|
|
} |
|
|
try { |
|
|
this.isInferring = true; |
|
|
const startTime = performance.now(); |
|
|
|
|
|
|
|
|
const inputTensor = new ort.Tensor('float32', inputData, [1, 3, this.inputHeight, this.inputWidth]); |
|
|
|
|
|
|
|
|
const inputName = this.session.inputNames[0]; |
|
|
|
|
|
|
|
|
const results = await this.session.run({ |
|
|
[inputName]: inputTensor |
|
|
}); |
|
|
|
|
|
|
|
|
const outputName = this.session.outputNames[0]; |
|
|
const outputTensor = results[outputName]; |
|
|
|
|
|
|
|
|
const endTime = performance.now(); |
|
|
const inferenceTime = endTime - startTime; |
|
|
|
|
|
|
|
|
this.updateInferenceStats(inferenceTime); |
|
|
console.log(`Inference completed in ${inferenceTime.toFixed(2)}ms`); |
|
|
return outputTensor.data; |
|
|
} catch (error) { |
|
|
console.error('Inference failed:', error); |
|
|
throw new Error(`Inference failed: ${error.message}`); |
|
|
} finally { |
|
|
this.isInferring = false; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async processVideoFrame(video) { |
|
|
try { |
|
|
|
|
|
if (!this.isModelLoaded) { |
|
|
throw new Error('Model not initialized'); |
|
|
} |
|
|
|
|
|
|
|
|
const videoWidth = video.videoWidth; |
|
|
const videoHeight = video.videoHeight; |
|
|
if (videoWidth === 0 || videoHeight === 0) { |
|
|
throw new Error('Video not ready'); |
|
|
} |
|
|
|
|
|
|
|
|
const shouldRotateForModel = ImageUtils.shouldRotateForModel(videoWidth, videoHeight); |
|
|
|
|
|
|
|
|
const preprocessedData = ImageUtils.preprocessVideoFrame(video, this.inputWidth, this.inputHeight, shouldRotateForModel); |
|
|
|
|
|
|
|
|
const modelOutput = await this.runInference(preprocessedData); |
|
|
if (!modelOutput) { |
|
|
return null; |
|
|
} |
|
|
|
|
|
|
|
|
const mask = ImageUtils.processModelOutput(modelOutput, this.inputHeight, this.inputWidth); |
|
|
return { |
|
|
mask: mask, |
|
|
shouldRotateBack: shouldRotateForModel, |
|
|
stats: this.getInferenceStats() |
|
|
}; |
|
|
} catch (error) { |
|
|
console.error('Error processing video frame:', error); |
|
|
throw error; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
updateInferenceStats(inferenceTime) { |
|
|
this.inferenceStats.totalInferences++; |
|
|
this.inferenceStats.totalTime += inferenceTime; |
|
|
this.inferenceStats.averageTime = this.inferenceStats.totalTime / this.inferenceStats.totalInferences; |
|
|
this.inferenceStats.lastInferenceTime = inferenceTime; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getInferenceStats() { |
|
|
return { |
|
|
...this.inferenceStats, |
|
|
fps: this.inferenceStats.lastInferenceTime > 0 ? 1000 / this.inferenceStats.lastInferenceTime : 0 |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
resetStats() { |
|
|
this.inferenceStats = { |
|
|
totalInferences: 0, |
|
|
totalTime: 0, |
|
|
averageTime: 0, |
|
|
lastInferenceTime: 0 |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
isReady() { |
|
|
return this.isModelLoaded && !this.isInferring; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
getModelSpecs() { |
|
|
return { |
|
|
inputHeight: this.inputHeight, |
|
|
inputWidth: this.inputWidth, |
|
|
numClasses: this.numClasses, |
|
|
isLoaded: this.isModelLoaded |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dispose() { |
|
|
if (this.session) { |
|
|
this.session.release(); |
|
|
this.session = null; |
|
|
} |
|
|
this.isModelLoaded = false; |
|
|
this.isInferring = false; |
|
|
console.log('Model resources disposed'); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class CardSegmentationApp { |
|
|
constructor() { |
|
|
|
|
|
this.cameraManager = new CameraManager(); |
|
|
this.modelInference = new ModelInference(); |
|
|
|
|
|
|
|
|
this.elements = {}; |
|
|
|
|
|
|
|
|
this.state = { |
|
|
isInitialized: false, |
|
|
isModelLoaded: false, |
|
|
isCameraActive: false, |
|
|
isInferenceRunning: false, |
|
|
currentError: null |
|
|
}; |
|
|
|
|
|
|
|
|
this.inferenceLoopId = null; |
|
|
|
|
|
|
|
|
this.performanceStats = { |
|
|
frameCount: 0, |
|
|
lastFpsUpdate: 0, |
|
|
fps: 0 |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async init() { |
|
|
try { |
|
|
console.log('Initializing Card Segmentation App...'); |
|
|
|
|
|
|
|
|
this.setupDOMElements(); |
|
|
|
|
|
|
|
|
this.setupEventListeners(); |
|
|
|
|
|
|
|
|
this.showLoading('Initializing cameras...'); |
|
|
|
|
|
|
|
|
await this.cameraManager.initialize(); |
|
|
this.populateCameraDropdown(); |
|
|
|
|
|
|
|
|
this.showLoading('Loading AI model...'); |
|
|
await this.modelInference.initialize(); |
|
|
|
|
|
|
|
|
this.updateAccelerationInfo(); |
|
|
|
|
|
|
|
|
this.hideLoading(); |
|
|
this.showCameraSelection(); |
|
|
this.state.isInitialized = true; |
|
|
this.state.isModelLoaded = true; |
|
|
console.log('Application initialized successfully'); |
|
|
} catch (error) { |
|
|
console.error('Application initialization failed:', error); |
|
|
this.showError('Failed to initialize application', error.message); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
setupDOMElements() { |
|
|
this.elements = { |
|
|
|
|
|
cameraSelection: document.getElementById('cameraSelection'), |
|
|
videoContainer: document.getElementById('videoContainer'), |
|
|
loadingIndicator: document.getElementById('loadingIndicator'), |
|
|
errorDisplay: document.getElementById('errorDisplay'), |
|
|
|
|
|
cameraSelect: document.getElementById('cameraSelect'), |
|
|
startCamera: document.getElementById('startCamera'), |
|
|
|
|
|
videoElement: document.getElementById('videoElement'), |
|
|
overlayCanvas: document.getElementById('overlayCanvas'), |
|
|
toggleInference: document.getElementById('toggleInference'), |
|
|
switchCamera: document.getElementById('switchCamera'), |
|
|
|
|
|
resolutionInfo: document.getElementById('resolutionInfo'), |
|
|
fpsInfo: document.getElementById('fpsInfo'), |
|
|
inferenceStatus: document.getElementById('inferenceStatus'), |
|
|
accelerationInfo: document.getElementById('accelerationInfo'), |
|
|
|
|
|
loadingText: document.getElementById('loadingText'), |
|
|
errorMessage: document.getElementById('errorMessage'), |
|
|
retryButton: document.getElementById('retryButton') |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
setupEventListeners() { |
|
|
|
|
|
this.elements.cameraSelect.addEventListener('change', e => { |
|
|
this.elements.startCamera.disabled = !e.target.value; |
|
|
}); |
|
|
this.elements.startCamera.addEventListener('click', () => { |
|
|
this.startCamera(); |
|
|
}); |
|
|
|
|
|
|
|
|
this.elements.toggleInference.addEventListener('click', () => { |
|
|
this.toggleInference(); |
|
|
}); |
|
|
this.elements.switchCamera.addEventListener('click', () => { |
|
|
this.showCameraSelection(); |
|
|
}); |
|
|
|
|
|
|
|
|
this.elements.retryButton.addEventListener('click', () => { |
|
|
this.hideError(); |
|
|
this.init(); |
|
|
}); |
|
|
|
|
|
|
|
|
this.elements.videoElement.addEventListener('loadedmetadata', () => { |
|
|
this.setupOverlayCanvas(); |
|
|
}); |
|
|
|
|
|
|
|
|
this.cameraManager.onDeviceChange(() => { |
|
|
this.populateCameraDropdown(); |
|
|
}); |
|
|
|
|
|
|
|
|
window.addEventListener('beforeunload', () => { |
|
|
this.cleanup(); |
|
|
}); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
populateCameraDropdown() { |
|
|
this.cameraManager.populateCameraSelect(this.elements.cameraSelect); |
|
|
this.elements.startCamera.disabled = this.elements.cameraSelect.value === ''; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async startCamera() { |
|
|
try { |
|
|
const selectedCameraId = this.elements.cameraSelect.value; |
|
|
if (!selectedCameraId) { |
|
|
throw new Error('No camera selected'); |
|
|
} |
|
|
this.showLoading('Starting camera...'); |
|
|
|
|
|
|
|
|
const streamInfo = await this.cameraManager.startCamera(this.elements.videoElement, selectedCameraId); |
|
|
|
|
|
|
|
|
this.updateResolutionInfo(streamInfo); |
|
|
this.setupOverlayCanvas(); |
|
|
|
|
|
|
|
|
this.hideLoading(); |
|
|
this.hideCameraSelection(); |
|
|
this.showVideoContainer(); |
|
|
this.state.isCameraActive = true; |
|
|
console.log('Camera started successfully:', streamInfo); |
|
|
} catch (error) { |
|
|
console.error('Failed to start camera:', error); |
|
|
this.showError('Failed to start camera', error.message); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
setupOverlayCanvas() { |
|
|
const video = this.elements.videoElement; |
|
|
const canvas = this.elements.overlayCanvas; |
|
|
if (video.videoWidth && video.videoHeight) { |
|
|
canvas.width = video.videoWidth; |
|
|
canvas.height = video.videoHeight; |
|
|
|
|
|
|
|
|
if (video.classList.contains('rotate-90ccw')) { |
|
|
canvas.classList.add('rotate-90ccw'); |
|
|
} else { |
|
|
canvas.classList.remove('rotate-90ccw'); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
toggleInference() { |
|
|
if (this.state.isInferenceRunning) { |
|
|
this.stopInference(); |
|
|
} else { |
|
|
this.startInference(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
startInference() { |
|
|
if (!this.state.isModelLoaded || !this.state.isCameraActive) { |
|
|
this.showError('Cannot start inference', 'Camera or model not ready'); |
|
|
return; |
|
|
} |
|
|
this.state.isInferenceRunning = true; |
|
|
this.elements.toggleInference.textContent = 'Stop Detection'; |
|
|
this.elements.toggleInference.className = 'btn btn-danger'; |
|
|
this.updateInferenceStatus('Detection: Running'); |
|
|
|
|
|
|
|
|
this.performanceStats.frameCount = 0; |
|
|
this.performanceStats.lastFpsUpdate = performance.now(); |
|
|
|
|
|
|
|
|
this.runInferenceLoop(); |
|
|
console.log('Inference started'); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
stopInference() { |
|
|
this.state.isInferenceRunning = false; |
|
|
this.elements.toggleInference.textContent = 'Start Detection'; |
|
|
this.elements.toggleInference.className = 'btn btn-success'; |
|
|
this.updateInferenceStatus('Detection: Off'); |
|
|
|
|
|
|
|
|
if (this.inferenceLoopId) { |
|
|
cancelAnimationFrame(this.inferenceLoopId); |
|
|
this.inferenceLoopId = null; |
|
|
} |
|
|
|
|
|
|
|
|
const ctx = this.elements.overlayCanvas.getContext('2d'); |
|
|
ctx.clearRect(0, 0, this.elements.overlayCanvas.width, this.elements.overlayCanvas.height); |
|
|
console.log('Inference stopped'); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async runInferenceLoop() { |
|
|
if (!this.state.isInferenceRunning) { |
|
|
return; |
|
|
} |
|
|
try { |
|
|
|
|
|
const result = await this.modelInference.processVideoFrame(this.elements.videoElement); |
|
|
if (result && result.mask) { |
|
|
|
|
|
ImageUtils.drawSegmentationOverlay(result.mask, this.modelInference.inputWidth, this.modelInference.inputHeight, this.elements.overlayCanvas, result.shouldRotateBack); |
|
|
|
|
|
|
|
|
this.updatePerformanceStats(result.stats); |
|
|
} |
|
|
} catch (error) { |
|
|
console.error('Inference loop error:', error); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
if (this.state.isInferenceRunning) { |
|
|
this.inferenceLoopId = requestAnimationFrame(() => this.runInferenceLoop()); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
updatePerformanceStats(inferenceStats) { |
|
|
this.performanceStats.frameCount++; |
|
|
const now = performance.now(); |
|
|
|
|
|
|
|
|
if (now - this.performanceStats.lastFpsUpdate >= 1000) { |
|
|
this.performanceStats.fps = this.performanceStats.frameCount; |
|
|
this.performanceStats.frameCount = 0; |
|
|
this.performanceStats.lastFpsUpdate = now; |
|
|
|
|
|
|
|
|
this.elements.fpsInfo.textContent = `FPS: ${this.performanceStats.fps}`; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
updateResolutionInfo(streamInfo) { |
|
|
const resText = `Resolution: ${streamInfo.width}x${streamInfo.height}${streamInfo.rotated ? ' (rotated)' : ''}`; |
|
|
this.elements.resolutionInfo.textContent = resText; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
updateInferenceStatus(status) { |
|
|
this.elements.inferenceStatus.textContent = status; |
|
|
|
|
|
|
|
|
this.elements.inferenceStatus.className = 'status-inactive'; |
|
|
if (status.includes('Running')) { |
|
|
this.elements.inferenceStatus.className = 'status-active'; |
|
|
} else if (status.includes('Processing')) { |
|
|
this.elements.inferenceStatus.className = 'status-processing'; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
updateAccelerationInfo() { |
|
|
if (!this.elements.accelerationInfo) return; |
|
|
const accelerationInfo = this.modelInference.getAccelerationInfo(); |
|
|
|
|
|
|
|
|
let accelerationText = `Acceleration: ${accelerationInfo.activeProvider.toUpperCase()}`; |
|
|
|
|
|
|
|
|
if (accelerationInfo.gpuInfo && accelerationInfo.activeProvider !== 'wasm') { |
|
|
const gpu = accelerationInfo.gpuInfo; |
|
|
const vendor = gpu.vendor.includes('Google') ? gpu.renderer.split(' ')[0] : gpu.vendor; |
|
|
accelerationText += ` (${vendor})`; |
|
|
} |
|
|
|
|
|
|
|
|
if (accelerationInfo.activeProvider === 'wasm') { |
|
|
if (accelerationInfo.webglError && accelerationInfo.webglError.includes('cannot resolve operator')) { |
|
|
accelerationText += ' (GPU unsupported operators)'; |
|
|
} else if (accelerationInfo.supportsWebGL || accelerationInfo.supportsWebGPU) { |
|
|
accelerationText += ' (GPU provider failed)'; |
|
|
} else { |
|
|
accelerationText += ' (No GPU support)'; |
|
|
} |
|
|
} |
|
|
this.elements.accelerationInfo.textContent = accelerationText; |
|
|
|
|
|
|
|
|
this.elements.accelerationInfo.className = 'acceleration-info'; |
|
|
if (accelerationInfo.activeProvider === 'webgpu') { |
|
|
this.elements.accelerationInfo.classList.add('acceleration-webgpu'); |
|
|
} else if (accelerationInfo.activeProvider === 'webgl') { |
|
|
this.elements.accelerationInfo.classList.add('acceleration-webgl'); |
|
|
} else { |
|
|
this.elements.accelerationInfo.classList.add('acceleration-wasm'); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
showLoading() { |
|
|
let message = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 'Loading...'; |
|
|
this.elements.loadingText.textContent = message; |
|
|
this.elements.loadingIndicator.style.display = 'block'; |
|
|
this.elements.cameraSelection.style.display = 'none'; |
|
|
this.elements.videoContainer.style.display = 'none'; |
|
|
this.elements.errorDisplay.style.display = 'none'; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hideLoading() { |
|
|
this.elements.loadingIndicator.style.display = 'none'; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
showCameraSelection() { |
|
|
this.stopInference(); |
|
|
this.cameraManager.stopCamera(); |
|
|
this.state.isCameraActive = false; |
|
|
this.elements.cameraSelection.style.display = 'block'; |
|
|
this.elements.videoContainer.style.display = 'none'; |
|
|
this.elements.errorDisplay.style.display = 'none'; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hideCameraSelection() { |
|
|
this.elements.cameraSelection.style.display = 'none'; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
showVideoContainer() { |
|
|
this.elements.videoContainer.style.display = 'block'; |
|
|
this.elements.errorDisplay.style.display = 'none'; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
showError(title, message) { |
|
|
this.elements.errorMessage.innerHTML = `<strong>${title}</strong><br>${message}`; |
|
|
this.elements.errorDisplay.style.display = 'block'; |
|
|
this.elements.loadingIndicator.style.display = 'none'; |
|
|
this.elements.cameraSelection.style.display = 'none'; |
|
|
this.elements.videoContainer.style.display = 'none'; |
|
|
this.state.currentError = { |
|
|
title, |
|
|
message |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hideError() { |
|
|
this.elements.errorDisplay.style.display = 'none'; |
|
|
this.state.currentError = null; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cleanup() { |
|
|
console.log('Cleaning up application...'); |
|
|
this.stopInference(); |
|
|
this.cameraManager.dispose(); |
|
|
this.modelInference.dispose(); |
|
|
this.state = { |
|
|
isInitialized: false, |
|
|
isModelLoaded: false, |
|
|
isCameraActive: false, |
|
|
isInferenceRunning: false, |
|
|
currentError: null |
|
|
}; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
document.addEventListener('DOMContentLoaded', () => { |
|
|
const app = new CardSegmentationApp(); |
|
|
app.init(); |
|
|
|
|
|
|
|
|
window.cardSegmentationApp = app; |
|
|
}); |
|
|
|
|
|
})(); |
|
|
|
|
|
|