|
|
<!DOCTYPE html> |
|
|
<html lang="en"> |
|
|
<head> |
|
|
<meta charset="UTF-8"> |
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0"> |
|
|
<title>ONNX Model Analyzer & Loader</title> |
|
|
<script src="https://cdn.tailwindcss.com"></script> |
|
|
<script src="https://cdn.jsdelivr.net/npm/feather-icons/dist/feather.min.js"></script> |
|
|
<script src="https://cdn.jsdelivr.net/npm/animejs@3.2.1/lib/anime.min.js"></script> |
|
|
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap" rel="stylesheet"> |
|
|
<style> |
|
|
:root { |
|
|
--primary: #6366f1; |
|
|
--primary-dark: #4f46e5; |
|
|
--secondary: #10b981; |
|
|
--dark: #0f172a; |
|
|
--light: #f8fafc; |
|
|
} |
|
|
|
|
|
body { |
|
|
font-family: 'Inter', sans-serif; |
|
|
background: linear-gradient(135deg, #0f172a 0%, #1e293b 100%); |
|
|
color: var(--light); |
|
|
min-height: 100vh; |
|
|
} |
|
|
|
|
|
.model-card { |
|
|
transition: all 0.3s ease; |
|
|
border: 1px solid rgba(99, 102, 241, 0.2); |
|
|
backdrop-filter: blur(10px); |
|
|
background: rgba(30, 41, 59, 0.6); |
|
|
} |
|
|
|
|
|
.model-card:hover { |
|
|
transform: translateY(-5px); |
|
|
box-shadow: 0 20px 25px -5px rgba(0, 0, 0, 0.3), 0 10px 10px -5px rgba(0, 0, 0, 0.2); |
|
|
border-color: rgba(99, 102, 241, 0.5); |
|
|
} |
|
|
|
|
|
.upload-area { |
|
|
border: 2px dashed rgba(99, 102, 241, 0.5); |
|
|
transition: all 0.3s ease; |
|
|
} |
|
|
|
|
|
.upload-area:hover, .upload-area.dragover { |
|
|
border-color: var(--primary); |
|
|
background: rgba(99, 102, 241, 0.1); |
|
|
} |
|
|
|
|
|
.progress-bar { |
|
|
height: 6px; |
|
|
border-radius: 3px; |
|
|
background: rgba(255, 255, 255, 0.1); |
|
|
overflow: hidden; |
|
|
} |
|
|
|
|
|
.progress-fill { |
|
|
height: 100%; |
|
|
background: linear-gradient(90deg, var(--primary), var(--secondary)); |
|
|
width: 0%; |
|
|
transition: width 0.4s ease; |
|
|
} |
|
|
|
|
|
.terminal { |
|
|
background: rgba(0, 0, 0, 0.7); |
|
|
font-family: monospace; |
|
|
font-size: 14px; |
|
|
line-height: 1.5; |
|
|
} |
|
|
|
|
|
.glow-text { |
|
|
text-shadow: 0 0 10px rgba(99, 102, 241, 0.7); |
|
|
} |
|
|
|
|
|
.pulse { |
|
|
animation: pulse 2s infinite; |
|
|
} |
|
|
|
|
|
@keyframes pulse { |
|
|
0% { opacity: 0.6; } |
|
|
50% { opacity: 1; } |
|
|
100% { opacity: 0.6; } |
|
|
} |
|
|
|
|
|
.fade-in { |
|
|
opacity: 0; |
|
|
transform: translateY(20px); |
|
|
} |
|
|
|
|
|
.fade-in.visible { |
|
|
opacity: 1; |
|
|
transform: translateY(0); |
|
|
transition: opacity 0.6s ease, transform 0.6s ease; |
|
|
} |
|
|
</style> |
|
|
</head> |
|
|
<body class="min-h-screen"> |
|
|
|
|
|
<header class="py-6 px-4 sm:px-6 lg:px-8 border-b border-slate-800"> |
|
|
<div class="max-w-7xl mx-auto flex justify-between items-center"> |
|
|
<div class="flex items-center space-x-3"> |
|
|
<div class="w-10 h-10 rounded-lg bg-indigo-600 flex items-center justify-center"> |
|
|
<i data-feather="cpu" class="text-white"></i> |
|
|
</div> |
|
|
<h1 class="text-2xl font-bold">ONNX<span class="text-indigo-400">Web</span></h1> |
|
|
</div> |
|
|
<nav class="hidden md:flex space-x-8"> |
|
|
<a href="#" class="text-slate-300 hover:text-white transition">Models</a> |
|
|
<a href="#" class="text-slate-300 hover:text-white transition">Analyzer</a> |
|
|
<a href="#" class="text-slate-300 hover:text-white transition">Documentation</a> |
|
|
</nav> |
|
|
<button class="md:hidden text-slate-300"> |
|
|
<i data-feather="menu"></i> |
|
|
</button> |
|
|
</div> |
|
|
</header> |
|
|
|
|
|
|
|
|
<main class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8 py-12"> |
|
|
|
|
|
<section class="text-center mb-16 fade-in"> |
|
|
<h1 class="text-4xl md:text-6xl font-bold mb-6"> |
|
|
Universal <span class="text-indigo-400">ONNX</span> Model Loader |
|
|
</h1> |
|
|
<p class="text-xl text-slate-300 max-w-3xl mx-auto mb-10"> |
|
|
Analyze, load, and run ONNX models directly in your browser with WebGPU acceleration and Web Workers for optimal performance. |
|
|
</p> |
|
|
<div class="flex flex-col sm:flex-row justify-center gap-4"> |
|
|
<button id="loadModelBtn" class="px-8 py-3 bg-indigo-600 hover:bg-indigo-700 rounded-lg font-medium transition flex items-center justify-center"> |
|
|
<i data-feather="upload" class="mr-2"></i> Load ONNX Model |
|
|
</button> |
|
|
<button class="px-8 py-3 bg-slate-800 hover:bg-slate-700 rounded-lg font-medium transition flex items-center justify-center"> |
|
|
<i data-feather="book" class="mr-2"></i> Documentation |
|
|
</button> |
|
|
</div> |
|
|
</section> |
|
|
|
|
|
<section id="modelTypeSection" class="mb-16 fade-in"> |
|
|
<div class="text-center mb-8"> |
|
|
<h2 class="text-3xl font-bold mb-4">Choose Your Model Type</h2> |
|
|
<p class="text-slate-300">Select the type of media processing model you want to load</p> |
|
|
</div> |
|
|
|
|
|
<div class="grid grid-cols-1 md:grid-cols-3 gap-6"> |
|
|
|
|
|
<div class="model-card rounded-xl p-6 cursor-pointer hover:border-indigo-400" data-model-type="image"> |
|
|
<div class="text-center"> |
|
|
<div class="w-16 h-16 rounded-full bg-gradient-to-br from-purple-500 to-pink-500 flex items-center justify-center mx-auto mb-4"> |
|
|
<i data-feather="image" class="text-white w-8 h-8"></i> |
|
|
</div> |
|
|
<h3 class="text-xl font-semibold mb-2">Image Models</h3> |
|
|
<p class="text-slate-400 text-sm mb-4">Generation, transformation, and analysis</p> |
|
|
<div class="space-y-2 text-xs text-slate-500"> |
|
|
<div>🖼️ Image → Image (Style Transfer, Super Resolution)</div> |
|
|
<div>📝 Text → Image (DALL-E, Stable Diffusion)</div> |
|
|
<div>🏷️ Image → Text (Image Captioning, OCR)</div> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="model-card rounded-xl p-6 cursor-pointer hover:border-emerald-400" data-model-type="audio"> |
|
|
<div class="text-center"> |
|
|
<div class="w-16 h-16 rounded-full bg-gradient-to-br from-emerald-500 to-teal-500 flex items-center justify-center mx-auto mb-4"> |
|
|
<i data-feather="mic" class="text-white w-8 h-8"></i> |
|
|
</div> |
|
|
<h3 class="text-xl font-semibold mb-2">Audio Models</h3> |
|
|
<p class="text-slate-400 text-sm mb-4">Speech, music, and audio processing</p> |
|
|
<div class="space-y-2 text-xs text-slate-500"> |
|
|
<div>🎵 Audio → Audio (Enhancement, Conversion)</div> |
|
|
<div>🗣️ Text → Audio (Text-to-Speech)</div> |
|
|
<div>📝 Audio → Text (Speech Recognition)</div> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="model-card rounded-xl p-6 cursor-pointer hover:border-amber-400" data-model-type="text"> |
|
|
<div class="text-center"> |
|
|
<div class="w-16 h-16 rounded-full bg-gradient-to-br from-amber-500 to-orange-500 flex items-center justify-center mx-auto mb-4"> |
|
|
<i data-feather="type" class="text-white w-8 h-8"></i> |
|
|
</div> |
|
|
<h3 class="text-xl font-semibold mb-2">Text Models</h3> |
|
|
<p class="text-slate-400 text-sm mb-4">Language processing and generation</p> |
|
|
<div class="space-y-2 text-xs text-slate-500"> |
|
|
<div>💬 Text → Text (Translation, Summarization)</div> |
|
|
<div>🖼️ Text → Image (Image Generation)</div> |
|
|
<div>🎵 Text → Audio (Voice Synthesis)</div> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
</section> |
|
|
|
|
|
|
|
|
<section id="uploadSection" class="mb-16 fade-in hidden"> |
|
|
<div class="text-center mb-6"> |
|
|
<h3 class="text-2xl font-semibold mb-2" id="uploadTitle">Upload ONNX Model</h3> |
|
|
<p class="text-slate-400" id="uploadSubtitle">Drag & drop your .onnx file here or click to browse</p> |
|
|
</div> |
|
|
|
|
|
<div class="upload-area rounded-2xl p-12 text-center cursor-pointer transition-all duration-300"> |
|
|
<div class="max-w-md mx-auto"> |
|
|
<div class="w-16 h-16 rounded-full bg-indigo-900/30 flex items-center justify-center mx-auto mb-6"> |
|
|
<i data-feather="cloud-upload" class="text-indigo-400 w-8 h-8"></i> |
|
|
</div> |
|
|
<p class="text-sm text-slate-500">Supports all major ONNX formats with automatic requirement detection</p> |
|
|
</div> |
|
|
<input type="file" id="modelFileInput" accept=".onnx" class="hidden"> |
|
|
</div> |
|
|
</section> |
|
|
|
|
|
<section id="analysisSection" class="hidden mb-16 fade-in"> |
|
|
<div class="bg-slate-800/50 backdrop-blur rounded-2xl p-6 mb-8"> |
|
|
<div class="flex items-center justify-between mb-6"> |
|
|
<h2 class="text-2xl font-bold">Model Analysis</h2> |
|
|
<div class="flex items-center space-x-2"> |
|
|
<span class="px-3 py-1 bg-green-900/30 text-green-400 rounded-full text-sm">Ready</span> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
<div class="grid grid-cols-1 md:grid-cols-3 gap-6 mb-8"> |
|
|
<div class="model-card rounded-xl p-6"> |
|
|
<div class="flex items-center mb-4"> |
|
|
<div class="w-10 h-10 rounded-lg bg-indigo-900/30 flex items-center justify-center mr-3"> |
|
|
<i data-feather="info" class="text-indigo-400"></i> |
|
|
</div> |
|
|
<h3 class="font-semibold">Model Info</h3> |
|
|
</div> |
|
|
<div class="space-y-3"> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">Name</p> |
|
|
<p id="modelName" class="font-medium">-</p> |
|
|
</div> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">Version</p> |
|
|
<p id="modelVersion" class="font-medium">-</p> |
|
|
</div> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">IR Version</p> |
|
|
<p id="irVersion" class="font-medium">-</p> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
<div class="model-card rounded-xl p-6"> |
|
|
<div class="flex items-center mb-4"> |
|
|
<div class="w-10 h-10 rounded-lg bg-emerald-900/30 flex items-center justify-center mr-3"> |
|
|
<i data-feather="cpu" class="text-emerald-400"></i> |
|
|
</div> |
|
|
<h3 class="font-semibold">Requirements</h3> |
|
|
</div> |
|
|
<div class="space-y-3"> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">Inputs</p> |
|
|
<p id="inputCount" class="font-medium">-</p> |
|
|
</div> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">Outputs</p> |
|
|
<p id="outputCount" class="font-medium">-</p> |
|
|
</div> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">Ops</p> |
|
|
<p id="opCount" class="font-medium">-</p> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
<div class="model-card rounded-xl p-6"> |
|
|
<div class="flex items-center mb-4"> |
|
|
<div class="w-10 h-10 rounded-lg bg-amber-900/30 flex items-center justify-center mr-3"> |
|
|
<i data-feather="zap" class="text-amber-400"></i> |
|
|
</div> |
|
|
<h3 class="font-semibold">Performance</h3> |
|
|
</div> |
|
|
<div class="space-y-3"> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">Size</p> |
|
|
<p id="modelSize" class="font-medium">-</p> |
|
|
</div> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">WebGPU Support</p> |
|
|
<p id="webgpuSupport" class="font-medium text-emerald-400">Yes</p> |
|
|
</div> |
|
|
<div> |
|
|
<p class="text-slate-400 text-sm">Workers</p> |
|
|
<p id="workerCount" class="font-medium">4</p> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
<div class="progress-bar mb-6"> |
|
|
<div id="analysisProgress" class="progress-fill"></div> |
|
|
</div> |
|
|
|
|
|
<div class="flex flex-wrap gap-3"> |
|
|
<button id="runModelBtn" class="px-6 py-2 bg-indigo-600 hover:bg-indigo-700 rounded-lg font-medium transition flex items-center"> |
|
|
<i data-feather="play" class="mr-2"></i> Run Model |
|
|
</button> |
|
|
<button id="exportConfigBtn" class="px-6 py-2 bg-slate-700 hover:bg-slate-600 rounded-lg font-medium transition flex items-center"> |
|
|
<i data-feather="download" class="mr-2"></i> Export Config |
|
|
</button> |
|
|
<button id="detailedAnalysisBtn" class="px-6 py-2 bg-slate-700 hover:bg-slate-600 rounded-lg font-medium transition flex items-center"> |
|
|
<i data-feather="bar-chart-2" class="mr-2"></i> Detailed Analysis |
|
|
</button> |
|
|
</div> |
|
|
</div> |
|
|
</section> |
|
|
|
|
|
|
|
|
<section id="inferenceSection" class="hidden fade-in"> |
|
|
<div class="grid grid-cols-1 lg:grid-cols-2 gap-8"> |
|
|
|
|
|
<div class="model-card rounded-2xl p-6"> |
|
|
<h2 class="text-2xl font-bold mb-6">Input Configuration</h2> |
|
|
|
|
|
|
|
|
<div class="mb-6"> |
|
|
<label class="block text-slate-300 mb-3">Input Type</label> |
|
|
<div class="grid grid-cols-3 gap-2" id="inputTypeGrid"> |
|
|
<button class="input-type-btn px-3 py-2 bg-slate-700 hover:bg-slate-600 rounded-lg text-sm transition flex items-center justify-center" data-type="image"> |
|
|
<i data-feather="image" class="w-4 h-4 mr-2"></i> Image |
|
|
</button> |
|
|
<button class="input-type-btn px-3 py-2 bg-slate-700 hover:bg-slate-600 rounded-lg text-sm transition flex items-center justify-center" data-type="audio"> |
|
|
<i data-feather="mic" class="w-4 h-4 mr-2"></i> Audio |
|
|
</button> |
|
|
<button class="input-type-btn px-3 py-2 bg-slate-700 hover:bg-slate-600 rounded-lg text-sm transition flex items-center justify-center" data-type="text"> |
|
|
<i data-feather="type" class="w-4 h-4 mr-2"></i> Text |
|
|
</button> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6" id="inputSourceSection"> |
|
|
<label class="block text-slate-300 mb-2">Input Source</label> |
|
|
<div class="grid grid-cols-2 gap-3" id="inputSourceGrid"> |
|
|
<button class="input-source-btn px-4 py-3 bg-slate-700 hover:bg-slate-600 rounded-lg text-left transition" data-source="upload"> |
|
|
<div class="flex items-center"> |
|
|
<i data-feather="upload" class="mr-2"></i> |
|
|
<span>Upload File</span> |
|
|
</div> |
|
|
</button> |
|
|
<button class="input-source-btn px-4 py-3 bg-slate-700 hover:bg-slate-600 rounded-lg text-left transition" data-source="camera"> |
|
|
<div class="flex items-center"> |
|
|
<i data-feather="camera" class="mr-2"></i> |
|
|
<span>Camera</span> |
|
|
</div> |
|
|
</button> |
|
|
<button class="input-source-btn px-4 py-3 bg-slate-700 hover:bg-slate-600 rounded-lg text-left transition hidden" data-source="microphone"> |
|
|
<div class="flex items-center"> |
|
|
<i data-feather="mic" class="mr-2"></i> |
|
|
<span>Microphone</span> |
|
|
</div> |
|
|
</button> |
|
|
<button class="input-source-btn px-4 py-3 bg-slate-700 hover:bg-slate-600 rounded-lg text-left transition" data-source="text"> |
|
|
<div class="flex items-center"> |
|
|
<i data-feather="edit-3" class="mr-2"></i> |
|
|
<span>Enter Text</span> |
|
|
</div> |
|
|
</button> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6 hidden" id="textInputArea"> |
|
|
<label class="block text-slate-300 mb-2">Enter Text</label> |
|
|
<textarea id="textInput" class="w-full p-3 bg-slate-800 border border-slate-600 rounded-lg text-white placeholder-slate-400 resize-none" rows="4" placeholder="Enter your text here..."></textarea> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6 hidden" id="filePreview"> |
|
|
<label class="block text-slate-300 mb-2">Selected File</label> |
|
|
<div id="previewContent" class="bg-slate-800 rounded-lg p-4"></div> |
|
|
</div> |
|
|
|
|
|
<div class="mb-6 hidden" id="mediaControls"> |
|
|
<label class="block text-slate-300 mb-3">Media Controls</label> |
|
|
<div class="space-y-3"> |
|
|
<div class="flex items-center justify-between"> |
|
|
<span>Auto-play</span> |
|
|
<label class="relative inline-flex items-center cursor-pointer"> |
|
|
<input type="checkbox" id="autoPlayToggle" class="sr-only peer"> |
|
|
<div class="w-11 h-6 bg-gray-200 peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-300 dark:peer-focus:ring-blue-800 rounded-full peer dark:bg-gray-700 peer-checked:after:translate-x-full peer-checked:after:border-white after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 after:border after:rounded-full after:h-5 after:w-5 after:transition-all dark:border-gray-600 peer-checked:bg-blue-600"></div> |
|
|
</label> |
|
|
</div> |
|
|
<div class="flex items-center justify-between"> |
|
|
<span>Loop</span> |
|
|
<label class="relative inline-flex items-center cursor-pointer"> |
|
|
<input type="checkbox" id="loopToggle" class="sr-only peer" checked> |
|
|
<div class="w-11 h-6 bg-gray-200 peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-300 dark:peer-focus:ring-blue-800 rounded-full peer dark:bg-gray-700 peer-checked:after:translate-x-full peer-checked:after:border-white after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 after:border after:rounded-full after:h-5 after:w-5 after:transition-all dark:border-gray-600 peer-checked:bg-blue-600"></div> |
|
|
</label> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6 hidden" id="cameraSection"> |
|
|
<label class="block text-slate-300 mb-2">Camera Stream</label> |
|
|
<video id="cameraStream" class="w-full max-h-48 bg-black rounded-lg" autoplay muted playsinline></video> |
|
|
<div class="flex space-x-2 mt-3"> |
|
|
<button id="captureBtn" class="px-4 py-2 bg-indigo-600 hover:bg-indigo-700 rounded-lg text-sm transition flex items-center"> |
|
|
<i data-feather="camera" class="mr-2"></i>Capture |
|
|
</button> |
|
|
<button id="stopCameraBtn" class="px-4 py-2 bg-red-600 hover:bg-red-700 rounded-lg text-sm transition flex items-center"> |
|
|
<i data-feather="stop-circle" class="mr-2"></i>Stop |
|
|
</button> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6 hidden" id="microphoneSection"> |
|
|
<label class="block text-slate-300 mb-2">Audio Recording</label> |
|
|
<div id="audioLevel" class="bg-slate-800 rounded-lg p-4"> |
|
|
<div class="flex items-center justify-between mb-2"> |
|
|
<span class="text-sm text-slate-400">Recording Level</span> |
|
|
<span id="audioLevelValue" class="text-sm text-slate-300">0%</span> |
|
|
</div> |
|
|
<div class="h-2 bg-slate-700 rounded-full overflow-hidden"> |
|
|
<div id="audioLevelBar" class="h-full bg-emerald-400 rounded-full transition-all duration-200" style="width: 0%"></div> |
|
|
</div> |
|
|
</div> |
|
|
<div class="flex space-x-2 mt-3"> |
|
|
<button id="startRecordingBtn" class="px-4 py-2 bg-emerald-600 hover:bg-emerald-700 rounded-lg text-sm transition flex items-center"> |
|
|
<i data-feather="mic" class="mr-2"></i>Start Recording |
|
|
</button> |
|
|
<button id="stopRecordingBtn" class="px-4 py-2 bg-red-600 hover:bg-red-700 rounded-lg text-sm transition flex items-center"> |
|
|
<i data-feather="square" class="mr-2"></i>Stop |
|
|
</button> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6 hidden" id="capturedImageSection"> |
|
|
<label class="block text-slate-300 mb-2">Captured Image</label> |
|
|
<canvas id="capturedCanvas" class="w-full max-h-48 border border-slate-600 rounded-lg"></canvas> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6 hidden" id="recordedAudioSection"> |
|
|
<label class="block text-slate-300 mb-2">Recorded Audio</label> |
|
|
<audio id="recordedAudio" class="w-full" controls></audio> |
|
|
</div> |
|
|
|
|
|
<div class="mb-6" id="preprocessingSection"> |
|
|
<label class="block text-slate-300 mb-2">Preprocessing</label> |
|
|
<div class="space-y-3" id="preprocessingOptions"> |
|
|
|
|
|
</div> |
|
|
</div> |
|
|
|
|
|
<button class="w-full py-3 bg-indigo-600 hover:bg-indigo-700 rounded-lg font-medium transition flex items-center justify-center" id="executeBtn"> |
|
|
<i data-feather="play" class="mr-2"></i> Execute Inference |
|
|
</button> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="model-card rounded-2xl p-6"> |
|
|
<h2 class="text-2xl font-bold mb-6">Output Results</h2> |
|
|
|
|
|
<div class="mb-6"> |
|
|
<div class="flex justify-between items-center mb-3"> |
|
|
<span class="text-slate-300">Inference Time</span> |
|
|
<span class="font-mono" id="inferenceTime">-</span> |
|
|
</div> |
|
|
<div class="progress-bar"> |
|
|
<div class="progress-fill" id="outputProgress" style="width: 0%"></div> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6" id="outputDisplay"> |
|
|
<h3 class="font-semibold mb-3">Output</h3> |
|
|
<div id="outputContent" class="bg-slate-800 rounded-lg p-4 min-h-48 flex items-center justify-center"> |
|
|
<p class="text-slate-400">Output will appear here after inference</p> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div class="mb-6 hidden" id="downloadOptions"> |
|
|
<h3 class="font-semibold mb-3">Download Results</h3> |
|
|
<div class="grid grid-cols-2 gap-3"> |
|
|
<button class="px-4 py-2 bg-emerald-600 hover:bg-emerald-700 rounded-lg text-sm transition flex items-center justify-center"> |
|
|
<i data-feather="download" class="mr-2"></i> Download |
|
|
</button> |
|
|
<button class="px-4 py-2 bg-slate-600 hover:bg-slate-500 rounded-lg text-sm transition flex items-center justify-center"> |
|
|
<i data-feather="share-2" class="mr-2"></i> Share |
|
|
</button> |
|
|
</div> |
|
|
</div> |
|
|
|
|
|
|
|
|
<div> |
|
|
<h3 class="font-semibold mb-3">Model Log</h3> |
|
|
<div class="terminal rounded-lg p-4 h-48 overflow-auto" id="modelLog"> |
|
|
<pre class="text-green-400">[INFO] Model loaded successfully |
|
|
[INFO] WebGPU backend initialized |
|
|
[INFO] Ready for inference...</pre> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
</section> |
|
|
|
|
|
|
|
|
<section id="comparisonSection" class="hidden mb-16 fade-in"> |
|
|
<div class="text-center mb-8"> |
|
|
<h2 class="text-3xl font-bold mb-4">Model Comparison</h2> |
|
|
<p class="text-slate-300">Compare results from multiple models</p> |
|
|
</div> |
|
|
|
|
|
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-6" id="comparisonGrid"> |
|
|
|
|
|
</div> |
|
|
</section> |
|
|
</main> |
|
|
|
|
|
|
|
|
<footer class="border-t border-slate-800 py-8 mt-16"> |
|
|
<div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
|
|
<div class="flex flex-col md:flex-row justify-between items-center"> |
|
|
<div class="flex items-center space-x-3 mb-4 md:mb-0"> |
|
|
<div class="w-8 h-8 rounded-lg bg-indigo-600 flex items-center justify-center"> |
|
|
<i data-feather="cpu" class="text-white w-4 h-4"></i> |
|
|
</div> |
|
|
<span class="font-semibold">ONNXWeb</span> |
|
|
</div> |
|
|
<div class="flex space-x-6"> |
|
|
<a href="#" class="text-slate-400 hover:text-white transition"> |
|
|
<i data-feather="github"></i> |
|
|
</a> |
|
|
<a href="#" class="text-slate-400 hover:text-white transition"> |
|
|
<i data-feather="twitter"></i> |
|
|
</a> |
|
|
<a href="#" class="text-slate-400 hover:text-white transition"> |
|
|
<i data-feather="book"></i> |
|
|
</a> |
|
|
</div> |
|
|
</div> |
|
|
<div class="mt-6 text-center text-slate-500 text-sm"> |
|
|
<p>Universal ONNX Model Loader for Browser-based Inference with WebGPU Acceleration</p> |
|
|
<p class="mt-2">Supports all major frameworks: PyTorch, TensorFlow, Keras, Scikit-learn</p> |
|
|
</div> |
|
|
</div> |
|
|
</footer> |
|
|
<script> |
|
|
|
|
|
let currentModelType = null; |
|
|
let currentInputType = 'image'; |
|
|
let modelInfo = {}; |
|
|
|
|
|
|
|
|
let cameraStream = null; |
|
|
let microphoneStream = null; |
|
|
let mediaRecorder = null; |
|
|
let recordedChunks = []; |
|
|
let audioContext = null; |
|
|
let audioAnalyzer = null; |
|
|
let audioLevelInterval = null; |
|
|
let currentCapturedImage = null; |
|
|
let currentRecordedAudioBlob = null; |
|
|
|
|
|
|
|
|
feather.replace(); |
|
|
|
|
|
const modelTypeCards = document.querySelectorAll('[data-model-type]'); |
|
|
const uploadSection = document.getElementById('uploadSection'); |
|
|
const uploadTitle = document.getElementById('uploadTitle'); |
|
|
const uploadSubtitle = document.getElementById('uploadSubtitle'); |
|
|
const uploadArea = document.querySelector('.upload-area'); |
|
|
const modelFileInput = document.getElementById('modelFileInput'); |
|
|
const loadModelBtn = document.getElementById('loadModelBtn'); |
|
|
const analysisSection = document.getElementById('analysisSection'); |
|
|
const inferenceSection = document.getElementById('inferenceSection'); |
|
|
const comparisonSection = document.getElementById('comparisonSection'); |
|
|
const runModelBtn = document.getElementById('runModelBtn'); |
|
|
const executeBtn = document.getElementById('executeBtn'); |
|
|
|
|
|
|
|
|
const inputTypeGrid = document.getElementById('inputTypeGrid'); |
|
|
const inputSourceGrid = document.getElementById('inputSourceGrid'); |
|
|
const inputSourceSection = document.getElementById('inputSourceSection'); |
|
|
const textInputArea = document.getElementById('textInputArea'); |
|
|
const filePreview = document.getElementById('filePreview'); |
|
|
const previewContent = document.getElementById('previewContent'); |
|
|
const mediaControls = document.getElementById('mediaControls'); |
|
|
const preprocessingSection = document.getElementById('preprocessingSection'); |
|
|
const preprocessingOptions = document.getElementById('preprocessingOptions'); |
|
|
|
|
|
|
|
|
const cameraSection = document.getElementById('cameraSection'); |
|
|
const microphoneSection = document.getElementById('microphoneSection'); |
|
|
const capturedImageSection = document.getElementById('capturedImageSection'); |
|
|
const recordedAudioSection = document.getElementById('recordedAudioSection'); |
|
|
const cameraStream = document.getElementById('cameraStream'); |
|
|
const audioLevel = document.getElementById('audioLevel'); |
|
|
const audioLevelBar = document.getElementById('audioLevelBar'); |
|
|
const audioLevelValue = document.getElementById('audioLevelValue'); |
|
|
const capturedCanvas = document.getElementById('capturedCanvas'); |
|
|
const recordedAudio = document.getElementById('recordedAudio'); |
|
|
const captureBtn = document.getElementById('captureBtn'); |
|
|
const stopCameraBtn = document.getElementById('stopCameraBtn'); |
|
|
const startRecordingBtn = document.getElementById('startRecordingBtn'); |
|
|
const stopRecordingBtn = document.getElementById('stopRecordingBtn'); |
|
|
const autoPlayToggle = document.getElementById('autoPlayToggle'); |
|
|
const loopToggle = document.getElementById('loopToggle'); |
|
|
|
|
|
|
|
|
const inferenceTime = document.getElementById('inferenceTime'); |
|
|
const outputProgress = document.getElementById('outputProgress'); |
|
|
const outputDisplay = document.getElementById('outputDisplay'); |
|
|
const outputContent = document.getElementById('outputContent'); |
|
|
const downloadOptions = document.getElementById('downloadOptions'); |
|
|
const modelLog = document.getElementById('modelLog'); |
|
|
|
|
|
|
|
|
document.addEventListener('DOMContentLoaded', function() { |
|
|
const fadeElements = document.querySelectorAll('.fade-in'); |
|
|
fadeElements.forEach((el, index) => { |
|
|
setTimeout(() => { |
|
|
el.classList.add('visible'); |
|
|
}, 300 * index); |
|
|
}); |
|
|
}); |
|
|
|
|
|
|
|
|
modelTypeCards.forEach(card => { |
|
|
card.addEventListener('click', () => { |
|
|
const modelType = card.dataset.modelType; |
|
|
selectModelType(modelType); |
|
|
}); |
|
|
}); |
|
|
|
|
|
function selectModelType(modelType) { |
|
|
currentModelType = modelType; |
|
|
|
|
|
|
|
|
modelTypeCards.forEach(card => { |
|
|
card.classList.remove('border-indigo-400', 'border-emerald-400', 'border-amber-400'); |
|
|
if (card.dataset.modelType === modelType) { |
|
|
const borderClass = modelType === 'image' ? 'border-indigo-400' : |
|
|
modelType === 'audio' ? 'border-emerald-400' : 'border-amber-400'; |
|
|
card.classList.add(borderClass); |
|
|
} |
|
|
}); |
|
|
|
|
|
|
|
|
uploadSection.classList.remove('hidden'); |
|
|
uploadSection.scrollIntoView({ behavior: 'smooth' }); |
|
|
|
|
|
|
|
|
const titles = { |
|
|
image: 'Upload Image Model (.onnx)', |
|
|
audio: 'Upload Audio Model (.onnx)', |
|
|
text: 'Upload Text Model (.onnx)' |
|
|
}; |
|
|
const subtitles = { |
|
|
image: 'Support for image generation, transformation, and analysis models', |
|
|
audio: 'Support for speech recognition, TTS, and audio processing models', |
|
|
text: 'Support for language models, translation, and text generation' |
|
|
}; |
|
|
|
|
|
uploadTitle.textContent = titles[modelType]; |
|
|
uploadSubtitle.textContent = subtitles[modelType]; |
|
|
|
|
|
logMessage(`Selected ${modelType} model type`); |
|
|
} |
|
|
|
|
|
|
|
|
uploadArea.addEventListener('click', () => { |
|
|
modelFileInput.click(); |
|
|
}); |
|
|
|
|
|
uploadArea.addEventListener('dragover', (e) => { |
|
|
e.preventDefault(); |
|
|
uploadArea.classList.add('dragover'); |
|
|
}); |
|
|
|
|
|
uploadArea.addEventListener('dragleave', () => { |
|
|
uploadArea.classList.remove('dragover'); |
|
|
}); |
|
|
|
|
|
uploadArea.addEventListener('drop', (e) => { |
|
|
e.preventDefault(); |
|
|
uploadArea.classList.remove('dragover'); |
|
|
|
|
|
if (e.dataTransfer.files.length) { |
|
|
handleFileUpload(e.dataTransfer.files[0]); |
|
|
} |
|
|
}); |
|
|
|
|
|
modelFileInput.addEventListener('change', (e) => { |
|
|
if (e.target.files.length) { |
|
|
handleFileUpload(e.target.files[0]); |
|
|
} |
|
|
}); |
|
|
|
|
|
loadModelBtn.addEventListener('click', () => { |
|
|
modelFileInput.click(); |
|
|
}); |
|
|
|
|
|
|
|
|
async function startCamera() { |
|
|
try { |
|
|
cameraStream = await navigator.mediaDevices.getUserMedia({ |
|
|
video: { width: 640, height: 480 } |
|
|
}); |
|
|
cameraStream.srcObject = cameraStream; |
|
|
cameraSection.classList.remove('hidden'); |
|
|
logMessage('Camera started successfully'); |
|
|
} catch (error) { |
|
|
console.error('Error accessing camera:', error); |
|
|
logMessage('Error: Could not access camera - ' + error.message); |
|
|
alert('Could not access camera. Please check permissions.'); |
|
|
} |
|
|
} |
|
|
|
|
|
function stopCamera() { |
|
|
if (cameraStream) { |
|
|
cameraStream.getTracks().forEach(track => track.stop()); |
|
|
cameraStream = null; |
|
|
cameraStream.srcObject = null; |
|
|
cameraSection.classList.add('hidden'); |
|
|
logMessage('Camera stopped'); |
|
|
} |
|
|
} |
|
|
|
|
|
function captureImage() { |
|
|
if (!cameraStream) return; |
|
|
|
|
|
const canvas = capturedCanvas; |
|
|
const ctx = canvas.getContext('2d'); |
|
|
canvas.width = 640; |
|
|
canvas.height = 480; |
|
|
|
|
|
ctx.drawImage(cameraStream, 0, 0, canvas.width, canvas.height); |
|
|
currentCapturedImage = canvas.toDataURL('image/jpeg'); |
|
|
capturedImageSection.classList.remove('hidden'); |
|
|
|
|
|
logMessage('Image captured from camera'); |
|
|
} |
|
|
|
|
|
|
|
|
async function startRecording() { |
|
|
try { |
|
|
microphoneStream = await navigator.mediaDevices.getUserMedia({ |
|
|
audio: true |
|
|
}); |
|
|
|
|
|
|
|
|
audioContext = new (window.AudioContext || window.webkitAudioContext)(); |
|
|
const source = audioContext.createMediaStreamSource(microphoneStream); |
|
|
audioAnalyzer = audioContext.createAnalyser(); |
|
|
audioAnalyzer.fftSize = 256; |
|
|
source.connect(audioAnalyzer); |
|
|
|
|
|
|
|
|
mediaRecorder = new MediaRecorder(microphoneStream); |
|
|
recordedChunks = []; |
|
|
|
|
|
mediaRecorder.ondataavailable = (event) => { |
|
|
if (event.data.size > 0) { |
|
|
recordedChunks.push(event.data); |
|
|
} |
|
|
}; |
|
|
|
|
|
mediaRecorder.onstop = () => { |
|
|
const blob = new Blob(recordedChunks, { type: 'audio/wav' }); |
|
|
currentRecordedAudioBlob = blob; |
|
|
recordedAudio.src = URL.createObjectURL(blob); |
|
|
recordedAudioSection.classList.remove('hidden'); |
|
|
logMessage('Audio recording completed'); |
|
|
}; |
|
|
|
|
|
mediaRecorder.start(); |
|
|
microphoneSection.classList.remove('hidden'); |
|
|
startAudioLevelMonitoring(); |
|
|
|
|
|
logMessage('Recording started'); |
|
|
} catch (error) { |
|
|
console.error('Error accessing microphone:', error); |
|
|
logMessage('Error: Could not access microphone - ' + error.message); |
|
|
alert('Could not access microphone. Please check permissions.'); |
|
|
} |
|
|
} |
|
|
|
|
|
function startAudioLevelMonitoring() { |
|
|
const dataArray = new Uint8Array(audioAnalyzer.frequencyBinCount); |
|
|
|
|
|
audioLevelInterval = setInterval(() => { |
|
|
audioAnalyzer.getByteFrequencyData(dataArray); |
|
|
const average = dataArray.reduce((sum, value) => sum + value) / dataArray.length; |
|
|
const percentage = Math.min((average / 255) * 100, 100); |
|
|
|
|
|
audioLevelBar.style.width = percentage + '%'; |
|
|
audioLevelValue.textContent = Math.round(percentage) + '%'; |
|
|
|
|
|
if (percentage > 70) { |
|
|
audioLevelBar.classList.add('bg-red-400'); |
|
|
audioLevelBar.classList.remove('bg-emerald-400'); |
|
|
} else { |
|
|
audioLevelBar.classList.add('bg-emerald-400'); |
|
|
audioLevelBar.classList.remove('bg-red-400'); |
|
|
} |
|
|
}, 100); |
|
|
} |
|
|
|
|
|
function stopRecording() { |
|
|
if (mediaRecorder && mediaRecorder.state === 'recording') { |
|
|
mediaRecorder.stop(); |
|
|
} |
|
|
|
|
|
if (microphoneStream) { |
|
|
microphoneStream.getTracks().forEach(track => track.stop()); |
|
|
microphoneStream = null; |
|
|
} |
|
|
|
|
|
if (audioContext) { |
|
|
audioContext.close(); |
|
|
audioContext = null; |
|
|
} |
|
|
|
|
|
if (audioLevelInterval) { |
|
|
clearInterval(audioLevelInterval); |
|
|
audioLevelInterval = null; |
|
|
} |
|
|
|
|
|
microphoneSection.classList.add('hidden'); |
|
|
logMessage('Recording stopped'); |
|
|
} |
|
|
|
|
|
|
|
|
inputTypeGrid.addEventListener('click', (e) => { |
|
|
const btn = e.target.closest('.input-type-btn'); |
|
|
if (btn) { |
|
|
const inputType = btn.dataset.type; |
|
|
selectInputType(inputType); |
|
|
} |
|
|
}); |
|
|
|
|
|
function selectInputType(inputType) { |
|
|
currentInputType = inputType; |
|
|
|
|
|
|
|
|
document.querySelectorAll('.input-type-btn').forEach(btn => { |
|
|
btn.classList.remove('bg-indigo-600'); |
|
|
if (btn.dataset.type === inputType) { |
|
|
btn.classList.add('bg-indigo-600'); |
|
|
} |
|
|
}); |
|
|
|
|
|
|
|
|
updateInputSources(inputType); |
|
|
updatePreprocessingOptions(inputType); |
|
|
|
|
|
logMessage(`Selected ${inputType} input type`); |
|
|
} |
|
|
|
|
|
|
|
|
inputSourceGrid.addEventListener('click', (e) => { |
|
|
const btn = e.target.closest('.input-source-btn'); |
|
|
if (btn) { |
|
|
const source = btn.dataset.source; |
|
|
selectInputSource(source); |
|
|
} |
|
|
}); |
|
|
|
|
|
function selectInputSource(source) { |
|
|
|
|
|
if (source !== 'camera') stopCamera(); |
|
|
if (source !== 'microphone') stopRecording(); |
|
|
|
|
|
|
|
|
document.querySelectorAll('.input-source-btn').forEach(btn => { |
|
|
btn.classList.remove('bg-indigo-600'); |
|
|
if (btn.dataset.source === source) { |
|
|
btn.classList.add('bg-indigo-600'); |
|
|
} |
|
|
}); |
|
|
|
|
|
|
|
|
textInputArea.classList.toggle('hidden', source !== 'text'); |
|
|
filePreview.classList.toggle('hidden', source !== 'upload'); |
|
|
mediaControls.classList.toggle('hidden', !['audio', 'video'].includes(currentInputType)); |
|
|
cameraSection.classList.toggle('hidden', source !== 'camera'); |
|
|
microphoneSection.classList.toggle('hidden', source !== 'microphone'); |
|
|
|
|
|
|
|
|
if (source === 'camera') { |
|
|
startCamera(); |
|
|
} else if (source === 'microphone') { |
|
|
|
|
|
} |
|
|
|
|
|
logMessage(`Selected ${source} input source`); |
|
|
} |
|
|
|
|
|
function updateInputSources(inputType) { |
|
|
const sourceButtons = document.querySelectorAll('.input-source-btn'); |
|
|
|
|
|
|
|
|
sourceButtons.forEach(btn => btn.classList.add('hidden')); |
|
|
|
|
|
|
|
|
const visibleSources = { |
|
|
image: ['upload', 'camera'], |
|
|
audio: ['upload', 'microphone'], |
|
|
text: ['upload', 'text'] |
|
|
}; |
|
|
|
|
|
visibleSources[inputType].forEach(source => { |
|
|
const btn = document.querySelector(`[data-source="${source}"]`); |
|
|
if (btn) btn.classList.remove('hidden'); |
|
|
}); |
|
|
|
|
|
|
|
|
const firstSource = visibleSources[inputType][0]; |
|
|
selectInputSource(firstSource); |
|
|
} |
|
|
|
|
|
function updatePreprocessingOptions(inputType) { |
|
|
const options = { |
|
|
image: [ |
|
|
{ id: 'resize', label: 'Resize to 224x224', checked: true }, |
|
|
{ id: 'normalize', label: 'Normalize (0-1)', checked: true }, |
|
|
{ id: '裁剪', label: 'Center Crop', checked: false }, |
|
|
{ id: 'rotate', label: 'Random Rotate', checked: false }, |
|
|
{ id: 'flip', label: 'Horizontal Flip', checked: false } |
|
|
], |
|
|
audio: [ |
|
|
{ id: 'resample', label: 'Resample to 16kHz', checked: true }, |
|
|
{ id: 'normalize', label: 'Normalize Audio', checked: true }, |
|
|
{ id: 'denoise', label: 'Noise Reduction', checked: false }, |
|
|
{ id: 'trim', label: 'Trim Silence', checked: false }, |
|
|
{ id: 'augment', label: 'Data Augmentation', checked: false } |
|
|
], |
|
|
text: [ |
|
|
{ id: 'tokenize', label: 'Tokenize Text', checked: true }, |
|
|
{ id: 'lowercase', label: 'Convert to Lowercase', checked: false }, |
|
|
{ id: 'remove_punct', label: 'Remove Punctuation', checked: false }, |
|
|
{ id: 'stop_words', label: 'Remove Stop Words', checked: false }, |
|
|
{ id: 'stem', label: 'Stemming', checked: false } |
|
|
] |
|
|
}; |
|
|
|
|
|
const selectedOptions = options[inputType] || []; |
|
|
preprocessingOptions.innerHTML = selectedOptions.map(option => ` |
|
|
<div class="flex items-center justify-between p-3 bg-slate-800 rounded-lg"> |
|
|
<span>${option.label}</span> |
|
|
<label class="relative inline-flex items-center cursor-pointer"> |
|
|
<input type="checkbox" class="sr-only peer" ${option.checked ? 'checked' : ''} data-preprocess="${option.id}"> |
|
|
<div class="w-11 h-6 bg-gray-200 peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-300 dark:peer-focus:ring-blue-800 rounded-full peer dark:bg-gray-700 peer-checked:after:translate-x-full peer-checked:after:border-white after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 after:border after:rounded-full after:h-5 after:w-5 after:transition-all dark:border-gray-600 peer-checked:bg-blue-600"></div> |
|
|
</label> |
|
|
</div> |
|
|
`).join(''); |
|
|
} |
|
|
|
|
|
|
|
|
function handleFileUpload(file) { |
|
|
if (!file.name.endsWith('.onnx')) { |
|
|
alert('Please upload a valid ONNX file (.onnx)'); |
|
|
return; |
|
|
} |
|
|
|
|
|
logMessage(`Loading model: ${file.name}`); |
|
|
|
|
|
|
|
|
analysisSection.classList.remove('hidden'); |
|
|
|
|
|
|
|
|
const progressBar = document.getElementById('analysisProgress'); |
|
|
let width = 0; |
|
|
const interval = setInterval(() => { |
|
|
if (width >= 100) { |
|
|
clearInterval(interval); |
|
|
populateAnalysisData(file); |
|
|
showInferenceSection(); |
|
|
} else { |
|
|
width += 10; |
|
|
progressBar.style.width = width + '%'; |
|
|
} |
|
|
}, 100); |
|
|
|
|
|
|
|
|
analysisSection.scrollIntoView({ behavior: 'smooth' }); |
|
|
} |
|
|
|
|
|
|
|
|
function handleMediaFileUpload(file) { |
|
|
if (currentInputType === 'image' && file.type.startsWith('image/')) { |
|
|
const reader = new FileReader(); |
|
|
reader.onload = (e) => { |
|
|
previewContent.innerHTML = ` |
|
|
<img src="${e.target.result}" class="w-full max-h-48 object-contain rounded" alt="Preview"> |
|
|
<p class="text-sm text-slate-400 mt-2">${file.name} (${(file.size / 1024).toFixed(1)} KB)</p> |
|
|
`; |
|
|
filePreview.classList.remove('hidden'); |
|
|
}; |
|
|
reader.readAsDataURL(file); |
|
|
} else if (currentInputType === 'audio' && file.type.startsWith('audio/')) { |
|
|
const url = URL.createObjectURL(file); |
|
|
previewContent.innerHTML = ` |
|
|
<audio controls class="w-full"> |
|
|
<source src="${url}" type="${file.type}"> |
|
|
Your browser does not support the audio element. |
|
|
</audio> |
|
|
<p class="text-sm text-slate-400 mt-2">${file.name} (${(file.size / 1024).toFixed(1)} KB)</p> |
|
|
`; |
|
|
filePreview.classList.remove('hidden'); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
modelFileInput.addEventListener('change', (e) => { |
|
|
const file = e.target.files[0]; |
|
|
if (!file) return; |
|
|
|
|
|
if (file.name.endsWith('.onnx')) { |
|
|
handleFileUpload(file); |
|
|
} else if (currentInputType && ['image', 'audio'].includes(currentInputType)) { |
|
|
handleMediaFileUpload(file); |
|
|
} |
|
|
}); |
|
|
|
|
|
|
|
|
function populateAnalysisData(file) { |
|
|
modelInfo = { |
|
|
name: file.name.replace('.onnx', ''), |
|
|
version: '1.0.0', |
|
|
irVersion: '8', |
|
|
size: (file.size / (1024 * 1024)).toFixed(2) + ' MB', |
|
|
type: currentModelType, |
|
|
inputType: currentInputType |
|
|
}; |
|
|
|
|
|
|
|
|
document.getElementById('modelName').textContent = modelInfo.name; |
|
|
document.getElementById('modelVersion').textContent = modelInfo.version; |
|
|
document.getElementById('irVersion').textContent = modelInfo.irVersion; |
|
|
document.getElementById('modelSize').textContent = modelInfo.size; |
|
|
|
|
|
|
|
|
updateModelSpecificInfo(); |
|
|
|
|
|
logMessage(`Model analysis complete: ${modelInfo.type} model with ${modelInfo.inputType} input/output`); |
|
|
} |
|
|
|
|
|
function updateModelSpecificInfo() { |
|
|
const typeConfigs = { |
|
|
image: { |
|
|
inputs: '1-4', |
|
|
outputs: '1-3', |
|
|
ops: '50-200', |
|
|
description: 'Image processing model' |
|
|
}, |
|
|
audio: { |
|
|
inputs: '1-2', |
|
|
outputs: '1-2', |
|
|
ops: '30-150', |
|
|
description: 'Audio processing model' |
|
|
}, |
|
|
text: { |
|
|
inputs: '1-2', |
|
|
outputs: '1-2', |
|
|
ops: '100-500', |
|
|
description: 'Text processing model' |
|
|
} |
|
|
}; |
|
|
|
|
|
const config = typeConfigs[currentModelType] || typeConfigs.image; |
|
|
document.getElementById('inputCount').textContent = config.inputs; |
|
|
document.getElementById('outputCount').textContent = config.outputs; |
|
|
document.getElementById('opCount').textContent = config.ops; |
|
|
|
|
|
logMessage(`Model capabilities: ${config.description}`); |
|
|
} |
|
|
|
|
|
function showInferenceSection() { |
|
|
|
|
|
const inputTypes = currentModelType === 'text' ? ['text'] : |
|
|
currentModelType === 'audio' ? ['audio'] : ['image']; |
|
|
|
|
|
|
|
|
document.querySelectorAll('.input-type-btn').forEach(btn => { |
|
|
btn.style.display = inputTypes.includes(btn.dataset.type) ? 'flex' : 'none'; |
|
|
}); |
|
|
|
|
|
|
|
|
selectInputType(inputTypes[0]); |
|
|
|
|
|
|
|
|
inferenceSection.classList.remove('hidden'); |
|
|
inferenceSection.scrollIntoView({ behavior: 'smooth' }); |
|
|
|
|
|
logMessage('Ready for inference - configure input and execute'); |
|
|
} |
|
|
|
|
|
|
|
|
executeBtn.addEventListener('click', async () => { |
|
|
executeBtn.disabled = true; |
|
|
executeBtn.innerHTML = '<i data-feather="loader" class="mr-2 animate-spin"></i>Processing...'; |
|
|
|
|
|
const startTime = Date.now(); |
|
|
|
|
|
|
|
|
logMessage('Starting inference...'); |
|
|
|
|
|
|
|
|
const inputData = await getInputData(); |
|
|
logMessage('Input data prepared for inference'); |
|
|
|
|
|
|
|
|
const steps = [ |
|
|
'Preprocessing input data...', |
|
|
'Loading model weights...', |
|
|
'Executing inference...', |
|
|
'Post-processing results...' |
|
|
]; |
|
|
|
|
|
for (let i = 0; i < steps.length; i++) { |
|
|
logMessage(steps[i]); |
|
|
await new Promise(resolve => setTimeout(resolve, 500)); |
|
|
outputProgress.style.width = ((i + 1) / steps.length * 100) + '%'; |
|
|
} |
|
|
|
|
|
const endTime = Date.now(); |
|
|
inferenceTime.textContent = (endTime - startTime) + 'ms'; |
|
|
|
|
|
|
|
|
await displayResults(); |
|
|
|
|
|
executeBtn.disabled = false; |
|
|
executeBtn.innerHTML = '<i data-feather="play" class="mr-2"></i>Execute Inference'; |
|
|
|
|
|
logMessage('Inference completed successfully'); |
|
|
}); |
|
|
|
|
|
async function getInputData() { |
|
|
const source = document.querySelector('.input-source-btn.bg-indigo-600')?.dataset.source; |
|
|
|
|
|
switch (source) { |
|
|
case 'upload': |
|
|
const file = modelFileInput.files[0]; |
|
|
if (file && !file.name.endsWith('.onnx')) { |
|
|
return { type: currentInputType, file: file }; |
|
|
} |
|
|
break; |
|
|
case 'camera': |
|
|
if (currentCapturedImage) { |
|
|
return { type: 'image', data: currentCapturedImage }; |
|
|
} |
|
|
break; |
|
|
case 'microphone': |
|
|
if (currentRecordedAudioBlob) { |
|
|
return { type: 'audio', blob: currentRecordedAudioBlob }; |
|
|
} |
|
|
break; |
|
|
case 'text': |
|
|
const text = document.getElementById('textInput').value; |
|
|
return { type: 'text', data: text }; |
|
|
} |
|
|
|
|
|
return null; |
|
|
} |
|
|
|
|
|
async function displayResults() { |
|
|
const inputData = await getInputData(); |
|
|
const outputType = currentInputType; |
|
|
let outputHTML = ''; |
|
|
|
|
|
switch (outputType) { |
|
|
case 'image': |
|
|
let imageSource = ''; |
|
|
if (inputData?.type === 'image') { |
|
|
if (inputData.data) { |
|
|
imageSource = inputData.data; |
|
|
} else if (inputData.file) { |
|
|
imageSource = URL.createObjectURL(inputData.file); |
|
|
} |
|
|
} |
|
|
|
|
|
outputHTML = ` |
|
|
<div class="text-center"> |
|
|
${imageSource ? `<img src="${imageSource}" class="w-full max-h-64 object-contain rounded-lg mx-auto mb-4" alt="Processed Image">` : ` |
|
|
<div class="w-full h-64 bg-gradient-to-br from-purple-400 to-pink-400 rounded-lg mx-auto mb-4 flex items-center justify-center"> |
|
|
<i data-feather="image" class="text-white w-16 h-16"></i> |
|
|
</div>`} |
|
|
<p class="text-slate-300">Processed Image Output</p> |
|
|
<p class="text-sm text-slate-400 mt-2">224x224 RGB</p> |
|
|
</div> |
|
|
`; |
|
|
downloadOptions.classList.remove('hidden'); |
|
|
break; |
|
|
|
|
|
case 'audio': |
|
|
let audioElement = ''; |
|
|
if (inputData?.type === 'audio') { |
|
|
const audioUrl = inputData.blob ? URL.createObjectURL(inputData.blob) : URL.createObjectURL(inputData.file); |
|
|
audioElement = `<audio controls class="w-full mb-4" ${autoPlayToggle.checked ? 'autoplay' : ''} ${loopToggle.checked ? 'loop' : ''}> |
|
|
<source src="${audioUrl}" type="audio/wav"> |
|
|
Your browser does not support the audio element. |
|
|
</audio>`; |
|
|
} |
|
|
|
|
|
outputHTML = ` |
|
|
<div class="text-center"> |
|
|
${audioElement || ` |
|
|
<div class="bg-slate-700 rounded-lg p-6 mb-4"> |
|
|
<div class="flex items-center justify-center mb-4"> |
|
|
<i data-feather="play" class="text-emerald-400 w-8 h-8 mr-2"></i> |
|
|
<span class="text-slate-300">Generated Audio</span> |
|
|
</div> |
|
|
<div class="h-24 bg-slate-600 rounded flex items-end justify-center space-x-1 p-2"> |
|
|
${Array.from({length: 20}, (_, i) => |
|
|
`<div class="bg-emerald-400 w-2 rounded-t" style="height: ${Math.random() * 80 + 20}%"></div>` |
|
|
).join('')} |
|
|
</div> |
|
|
</div>`} |
|
|
<p class="text-slate-300">Processed Audio Output</p> |
|
|
<p class="text-sm text-slate-400 mt-2">44.1kHz, 16-bit</p> |
|
|
</div> |
|
|
`; |
|
|
downloadOptions.classList.remove('hidden'); |
|
|
break; |
|
|
|
|
|
case 'text': |
|
|
const inputText = inputData?.data || 'Sample text for processing'; |
|
|
outputHTML = ` |
|
|
<div class="text-left"> |
|
|
<div class="bg-slate-800 rounded-lg p-4 mb-4"> |
|
|
<p class="text-slate-300 mb-2"><strong>Input:</strong> ${inputText}</p> |
|
|
<hr class="border-slate-600 my-3"> |
|
|
<p class="text-slate-300"><strong>Output:</strong> This is a sample generated text output from the ONNX model. The text processing model has successfully processed the input "${inputText}" and generated meaningful content based on the model capabilities and preprocessing options selected.</p> |
|
|
</div> |
|
|
<div class="flex items-center space-x-4"> |
|
|
<button class="px-3 py-1 bg-slate-700 hover:bg-slate-600 rounded text-sm" onclick="navigator.clipboard.writeText(this.closest('.text-left').querySelector('p:last-child').textContent)"> |
|
|
<i data-feather="copy" class="w-4 h-4 mr-1 inline"></i>Copy |
|
|
</button> |
|
|
<button class="px-3 py-1 bg-slate-700 hover:bg-slate-600 rounded text-sm"> |
|
|
<i data-feather="share-2" class="w-4 h-4 mr-1 inline"></i>Share |
|
|
</button> |
|
|
</div> |
|
|
</div> |
|
|
`; |
|
|
downloadOptions.classList.remove('hidden'); |
|
|
break; |
|
|
} |
|
|
|
|
|
outputContent.innerHTML = outputHTML; |
|
|
feather.replace(); |
|
|
|
|
|
|
|
|
outputDisplay.classList.remove('hidden'); |
|
|
} |
|
|
|
|
|
|
|
|
captureBtn.addEventListener('click', captureImage); |
|
|
stopCameraBtn.addEventListener('click', stopCamera); |
|
|
startRecordingBtn.addEventListener('click', startRecording); |
|
|
stopRecordingBtn.addEventListener('click', stopRecording); |
|
|
|
|
|
|
|
|
downloadOptions.addEventListener('click', (e) => { |
|
|
if (e.target.closest('button')) { |
|
|
const button = e.target.closest('button'); |
|
|
if (button.textContent.includes('Download')) { |
|
|
downloadResults(); |
|
|
} |
|
|
} |
|
|
}); |
|
|
|
|
|
function downloadResults() { |
|
|
const outputType = currentInputType; |
|
|
|
|
|
switch (outputType) { |
|
|
case 'image': |
|
|
if (currentCapturedImage) { |
|
|
const link = document.createElement('a'); |
|
|
link.href = currentCapturedImage; |
|
|
link.download = 'processed_image.jpg'; |
|
|
link.click(); |
|
|
} |
|
|
break; |
|
|
case 'audio': |
|
|
if (currentRecordedAudioBlob) { |
|
|
const link = document.createElement('a'); |
|
|
link.href = URL.createObjectURL(currentRecordedAudioBlob); |
|
|
link.download = 'processed_audio.wav'; |
|
|
link.click(); |
|
|
} |
|
|
break; |
|
|
case 'text': |
|
|
const textContent = outputContent.querySelector('p:last-child').textContent; |
|
|
const blob = new Blob([textContent], { type: 'text/plain' }); |
|
|
const link = document.createElement('a'); |
|
|
link.href = URL.createObjectURL(blob); |
|
|
link.download = 'processed_text.txt'; |
|
|
link.click(); |
|
|
break; |
|
|
} |
|
|
|
|
|
logMessage('Results downloaded successfully'); |
|
|
} |
|
|
|
|
|
|
|
|
function logMessage(message) { |
|
|
const timestamp = new Date().toLocaleTimeString(); |
|
|
const logEntry = document.createElement('div'); |
|
|
logEntry.className = 'mb-1'; |
|
|
logEntry.innerHTML = `<span class="text-slate-500">[${timestamp}]</span> <span class="text-green-400">${message}</span>`; |
|
|
modelLog.appendChild(logEntry); |
|
|
modelLog.scrollTop = modelLog.scrollHeight; |
|
|
} |
|
|
|
|
|
|
|
|
runModelBtn.addEventListener('click', () => { |
|
|
showInferenceSection(); |
|
|
}); |
|
|
|
|
|
document.getElementById('detailedAnalysisBtn').addEventListener('click', () => { |
|
|
logMessage('Detailed analysis opened'); |
|
|
alert('Detailed analysis would show model architecture, layer breakdown, and performance metrics'); |
|
|
}); |
|
|
|
|
|
|
|
|
updatePreprocessingOptions(currentInputType); |
|
|
|
|
|
|
|
|
const buttons = document.querySelectorAll('button'); |
|
|
buttons.forEach(button => { |
|
|
button.addEventListener('mouseenter', () => { |
|
|
anime({ |
|
|
targets: button, |
|
|
scale: 1.05, |
|
|
duration: 200, |
|
|
easing: 'easeInOutQuad' |
|
|
}); |
|
|
}); |
|
|
|
|
|
button.addEventListener('mouseleave', () => { |
|
|
anime({ |
|
|
targets: button, |
|
|
scale: 1, |
|
|
duration: 200, |
|
|
easing: 'easeInOutQuad' |
|
|
}); |
|
|
}); |
|
|
}); |
|
|
|
|
|
|
|
|
loadModelBtn.addEventListener('click', () => { |
|
|
modelFileInput.click(); |
|
|
}); |
|
|
|
|
|
|
|
|
window.addEventListener('beforeunload', () => { |
|
|
stopCamera(); |
|
|
stopRecording(); |
|
|
}); |
|
|
</script> |
|
|
</body> |
|
|
</html> |
|
|
|