mangamagicfixer-wand / index.html
Alirezamw3's picture
متون انگلیسی رو در مرحله 3 پاک نمیکنه،
f3dd78f verified
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>MangaMagic Translator</title>
<script src="https://cdn.tailwindcss.com"></script>
<script src="https://unpkg.com/feather-icons"></script>
<script src="https://cdn.jsdelivr.net/npm/feather-icons/dist/feather.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@4/dist/tesseract.min.js"></script>
<style>
.dropzone {
border: 2px dashed #94a3b8;
transition: all 0.3s ease;
}
.dropzone.active {
border-color: #6366f1;
background-color: #e0e7ff;
}
#outputCanvas {
max-width: 100%;
height: auto;
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
}
.progress-bar {
height: 4px;
transition: width 0.3s ease;
}
</style>
</head>
<body class="bg-gray-50 min-h-screen">
<div class="container mx-auto px-4 py-8">
<header class="text-center mb-12">
<h1 class="text-4xl font-bold text-indigo-600 mb-2">MangaMagic Translator Wand ✨</h1>
<p class="text-lg text-gray-600">One-click manga translation with AI-powered tools</p>
</header>
<div class="max-w-4xl mx-auto bg-white rounded-xl shadow-md overflow-hidden p-6">
<div class="grid md:grid-cols-2 gap-8">
<!-- Input Section -->
<div>
<h2 class="text-xl font-semibold text-gray-800 mb-4">Upload Manga Page</h2>
<div id="dropzone" class="dropzone rounded-lg p-8 text-center cursor-pointer mb-4">
<i data-feather="upload" class="w-12 h-12 mx-auto text-gray-400 mb-3"></i>
<p class="text-gray-500 mb-2">Drag & drop your manga image here</p>
<p class="text-sm text-gray-400">or click to browse files (JPEG, PNG)</p>
<input type="file" id="fileInput" accept="image/*" class="hidden">
</div>
<div class="space-y-4">
<div>
<label class="block text-sm font-medium text-gray-700 mb-1">Processing Options</label>
<div class="flex items-center space-x-4">
<label class="inline-flex items-center">
<input type="checkbox" id="upscaleCheck" checked class="rounded text-indigo-600">
<span class="ml-2 text-gray-700">2x Upscale</span>
</label>
<label class="inline-flex items-center">
<input type="checkbox" id="cleanCheck" checked class="rounded text-indigo-600">
<span class="ml-2 text-gray-700">Clean Text Areas</span>
</label>
</div>
</div>
<div class="pt-2">
<button id="processBtn" disabled class="w-full py-3 px-4 bg-indigo-600 hover:bg-indigo-700 text-white font-medium rounded-lg transition duration-200 flex items-center justify-center">
<i data-feather="wand" class="w-5 h-5 mr-2"></i>
Process & Translate
</button>
</div>
</div>
</div>
<!-- Output Section -->
<div>
<h2 class="text-xl font-semibold text-gray-800 mb-4">Translated Result</h2>
<div id="progressContainer" class="hidden mb-4">
<div class="flex justify-between text-sm text-gray-600 mb-1">
<span id="progressStatus">Processing image...</span>
<span id="progressPercent">0%</span>
</div>
<div class="w-full bg-gray-200 rounded-full h-2">
<div id="progressBar" class="progress-bar bg-indigo-600 rounded-full h-2" style="width: 0%"></div>
</div>
</div>
<div id="outputContainer" class="hidden">
<canvas id="outputCanvas" class="rounded-lg border border-gray-200"></canvas>
<div class="mt-4 flex justify-end">
<button id="downloadBtn" class="py-2 px-4 bg-white border border-indigo-600 text-indigo-600 hover:bg-indigo-50 rounded-lg transition duration-200 flex items-center">
<i data-feather="download" class="w-4 h-4 mr-2"></i>
Download
</button>
</div>
</div>
<div id="emptyState" class="flex flex-col items-center justify-center py-12 bg-gray-50 rounded-lg">
<i data-feather="image" class="w-12 h-12 text-gray-300 mb-4"></i>
<p class="text-gray-500">Your translated manga will appear here</p>
</div>
</div>
</div>
</div>
<div class="mt-12 bg-white rounded-xl shadow-md overflow-hidden p-6">
<h2 class="text-xl font-semibold text-gray-800 mb-4">How It Works</h2>
<div class="grid md:grid-cols-4 gap-6">
<div class="text-center p-4">
<div class="bg-indigo-100 w-12 h-12 rounded-full flex items-center justify-center mx-auto mb-3">
<i data-feather="upload" class="text-indigo-600"></i>
</div>
<h3 class="font-medium text-gray-800 mb-1">Upload</h3>
<p class="text-sm text-gray-600">Upload your manga page image</p>
</div>
<div class="text-center p-4">
<div class="bg-indigo-100 w-12 h-12 rounded-full flex items-center justify-center mx-auto mb-3">
<i data-feather="maximize" class="text-indigo-600"></i>
</div>
<h3 class="font-medium text-gray-800 mb-1">Enhance</h3>
<p class="text-sm text-gray-600">Image upscaling for better OCR</p>
</div>
<div class="text-center p-4">
<div class="bg-indigo-100 w-12 h-12 rounded-full flex items-center justify-center mx-auto mb-3">
<i data-feather="globe" class="text-indigo-600"></i>
</div>
<h3 class="font-medium text-gray-800 mb-1">Translate</h3>
<p class="text-sm text-gray-600">AI-powered text translation</p>
</div>
<div class="text-center p-4">
<div class="bg-indigo-100 w-12 h-12 rounded-full flex items-center justify-center mx-auto mb-3">
<i data-feather="image" class="text-indigo-600"></i>
</div>
<h3 class="font-medium text-gray-800 mb-1">Render</h3>
<p class="text-sm text-gray-600">Natural-looking text placement</p>
</div>
</div>
</div>
</div>
<script>
feather.replace();
// Global variables
let originalImage = null;
let translatedImage = null;
// DOM elements
const dropzone = document.getElementById('dropzone');
const fileInput = document.getElementById('fileInput');
const processBtn = document.getElementById('processBtn');
const outputCanvas = document.getElementById('outputCanvas');
const outputContainer = document.getElementById('outputContainer');
const emptyState = document.getElementById('emptyState');
const progressContainer = document.getElementById('progressContainer');
const progressBar = document.getElementById('progressBar');
const progressStatus = document.getElementById('progressStatus');
const progressPercent = document.getElementById('progressPercent');
const downloadBtn = document.getElementById('downloadBtn');
const upscaleCheck = document.getElementById('upscaleCheck');
const cleanCheck = document.getElementById('cleanCheck');
// Event listeners
dropzone.addEventListener('click', () => fileInput.click());
dropzone.addEventListener('dragover', (e) => {
e.preventDefault();
dropzone.classList.add('active');
});
dropzone.addEventListener('dragleave', () => {
dropzone.classList.remove('active');
});
dropzone.addEventListener('drop', (e) => {
e.preventDefault();
dropzone.classList.remove('active');
if (e.dataTransfer.files.length) {
handleFile(e.dataTransfer.files[0]);
}
});
fileInput.addEventListener('change', () => {
if (fileInput.files.length) {
handleFile(fileInput.files[0]);
}
});
// Reset dropzone content when clicking to select new file
dropzone.addEventListener('click', (e) => {
if (!fileInput.files.length) {
// Reset to original state if no file is selected
dropzone.innerHTML = `
<i data-feather="upload" class="w-12 h-12 mx-auto text-gray-400 mb-3"></i>
<p class="text-gray-500 mb-2">Drag & drop your manga image here</p>
<p class="text-sm text-gray-400">or click to browse files (JPEG, PNG)</p>
`;
feather.replace();
}
fileInput.click();
});
processBtn.addEventListener('click', async () => {
if (!originalImage) return;
// Show progress
progressContainer.classList.remove('hidden');
progressStatus.textContent = "Processing image...";
progressBar.style.width = "10%";
try {
// Step 1: Upscale if selected
let workingImage = originalImage;
if (upscaleCheck.checked) {
progressStatus.textContent = "Upscaling image (2x)...";
workingImage = await upscaleImage(workingImage);
progressBar.style.width = "30%";
}
// Step 2: OCR text detection
progressStatus.textContent = "Detecting text areas...";
const { data: ocrResult } = await performOCR(workingImage);
progressBar.style.width = "50%";
// Step 3: Text cleaning
let cleanedImage = workingImage;
if (cleanCheck.checked) {
progressStatus.textContent = "Cleaning text areas...";
cleanedImage = await cleanTextAreas(workingImage, ocrResult);
progressBar.style.width = "70%";
}
// Step 4: Translation
progressStatus.textContent = "Translating text...";
const translatedTexts = await translateTexts(ocrResult);
progressBar.style.width = "90%";
// Step 5: Render translated text
progressStatus.textContent = "Rendering translated text...";
translatedImage = await renderTranslation(cleanedImage, ocrResult, translatedTexts);
progressBar.style.width = "100%";
// Display result
displayResult(translatedImage);
progressStatus.textContent = "Translation complete!";
} catch (error) {
console.error("Processing error:", error);
progressStatus.textContent = "Error: " + error.message;
progressBar.style.backgroundColor = "#ef4444";
}
});
downloadBtn.addEventListener('click', () => {
if (!translatedImage) return;
const link = document.createElement('a');
link.download = 'translated-manga.png';
link.href = translatedImage.toDataURL('image/png');
link.click();
});
// Functions
function handleFile(file) {
// Fix for gallery images that might have different MIME types
const validImageTypes = ['image/jpeg', 'image/png', 'image/jpg', 'image/webp', 'image/gif'];
if (!validImageTypes.includes(file.type.toLowerCase())) {
alert('Please select a valid image file (JPEG, PNG, WEBP, GIF)');
return;
}
const reader = new FileReader();
reader.onload = (e) => {
const img = new Image();
img.onload = () => {
originalImage = img;
processBtn.disabled = false;
// Show success feedback to user
dropzone.innerHTML = `
<i data-feather="check-circle" class="w-12 h-12 mx-auto text-green-500 mb-3"></i>
<p class="text-green-600 font-medium mb-1">Image loaded successfully!</p>
<p class="text-sm text-gray-500">${file.name}</p>
<p class="text-xs text-gray-400 mt-2">Click to select a different image</p>
`;
feather.replace();
};
img.onerror = () => {
alert('Error loading image. Please try another file.');
};
img.src = e.target.result;
};
reader.onerror = () => {
alert('Error reading file. Please try again.');
};
reader.readAsDataURL(file);
}
function displayResult(image) {
outputContainer.classList.remove('hidden');
emptyState.classList.add('hidden');
const ctx = outputCanvas.getContext('2d');
outputCanvas.width = image.width;
outputCanvas.height = image.height;
ctx.clearRect(0, 0, outputCanvas.width, outputCanvas.height);
ctx.drawImage(image, 0, 0);
}
async function upscaleImage(image) {
// In a real app, you would use a proper upscaling algorithm or API
// This is a simplified placeholder implementation
return new Promise((resolve) => {
const canvas = document.createElement('canvas');
canvas.width = image.width * 2;
canvas.height = image.height * 2;
const ctx = canvas.getContext('2d');
ctx.imageSmoothingEnabled = true;
ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
const upscaled = new Image();
upscaled.onload = () => resolve(upscaled);
upscaled.src = canvas.toDataURL('image/png');
});
}
async function performOCR(image) {
// Using Tesseract.js with optimized settings for manga text
progressStatus.textContent = "Initializing OCR engine...";
// Preprocess image for better OCR
const processedImage = await preprocessImageForOCR(image);
return await Tesseract.recognize(
processedImage.src,
'jpn', // Focus on Japanese only for better accuracy
{
logger: (m) => {
if (m.status) {
progressStatus.textContent = `OCR: ${m.status}`;
}
},
// Optimize for manga/comic text
tessedit_pageseg_mode: Tesseract.PSM.SINGLE_BLOCK,
tessedit_char_whitelist: 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,!?\'"()[]{}:;ー。、!?「」『』【】(){}:;',
// Japanese-specific optimization
tessedit_ocr_engine_mode: Tesseract.OEM.LSTM_ONLY
}
);
}
async function preprocessImageForOCR(image) {
// Enhance image for better OCR results
const canvas = document.createElement('canvas');
canvas.width = image.width;
canvas.height = image.height;
const ctx = canvas.getContext('2d');
// Draw original image
ctx.drawImage(image, 0, 0);
// Get image data for processing
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
const data = imageData.data;
// Simple contrast enhancement for text
for (let i = 0; i < data.length; i += 4) {
// Increase contrast
const factor = 1.5;
data[i] = Math.min(255, Math.max(0, (data[i] - 128) * factor + 128)); // R
data[i + 1] = Math.min(255, Math.max(0, (data[i + 1] - 128) * factor + 128)); // G
data[i + 2] = Math.min(255, Math.max(0, (data[i + 2] - 128) * factor + 128)); // B
}
ctx.putImageData(imageData, 0, 0);
const processed = new Image();
processed.src = canvas.toDataURL('image/png');
await new Promise(resolve => processed.onload = resolve);
return processed;
}
async function cleanTextAreas(image, ocrResult) {
// Create canvas to clean text areas
const canvas = document.createElement('canvas');
canvas.width = image.width;
canvas.height = image.height;
const ctx = canvas.getContext('2d');
// Draw original image
ctx.drawImage(image, 0, 0);
// Clean detected text areas by filling with surrounding color
if (ocrResult.words && ocrResult.words.length > 0) {
ctx.save();
ocrResult.words.forEach(word => {
const { x0, y0, x1, y1 } = word.bbox;
const width = x1 - x0;
const height = y1 - y0;
// Get surrounding pixels for color sampling
const sampleX = Math.max(0, x0 - 5);
const sampleY = Math.max(0, y0 - 5);
const sampleData = ctx.getImageData(sampleX, sampleY, 1, 1).data;
const avgColor = `rgb(${sampleData[0]}, ${sampleData[1]}, ${sampleData[2]})`;
// Fill text area with surrounding color
ctx.fillStyle = avgColor;
ctx.fillRect(x0, y0, width, height);
});
ctx.restore();
}
const cleaned = new Image();
cleaned.src = canvas.toDataURL('image/png');
await new Promise(resolve => cleaned.onload = resolve);
return cleaned;
}
async function translateTexts(ocrResult) {
// Using Google Translate API (free tier with limitations)
// Note: For production, use a proper translation API with API key
const detectedTexts = ocrResult.words.map(word => word.text).filter(text => text.trim().length > 0);
if (detectedTexts.length === 0) {
return [];
}
try {
// Join texts for batch translation (better for context)
const textToTranslate = detectedTexts.join(' ');
const response = await fetch(`https://translate.googleapis.com/translate_a/single?client=gtx&sl=ja&tl=fa&dt=t&q=${encodeURIComponent(textToTranslate)}`);
const data = await response.json();
// Extract translated text
const translatedText = data[0].map(item => item[0]).join(' ');
// Split back to individual words (approximate)
return ocrResult.words.map((word, index) => {
// Simple word mapping - in real app would need better alignment
const words = translatedText.split(' ');
const translatedWord = words[index % words.length] || word.text;
return {
text: translatedWord,
bbox: word.bbox
};
});
} catch (error) {
console.error('Translation error:', error);
// Fallback: return original text with Persian marker
return ocrResult.words.map(word => {
return {
text: `[ترجمه: ${word.text}]`,
bbox: word.bbox
};
});
}
}
async function renderTranslation(image, ocrResult, translations) {
const canvas = document.createElement('canvas');
canvas.width = image.width;
canvas.height = image.height;
const ctx = canvas.getContext('2d');
// Draw original image (cleaned)
ctx.drawImage(image, 0, 0);
// Draw translated texts (simplified)
ctx.font = 'bold 16px Arial'; // Would use a better font matching the original
ctx.fillStyle = '#000000';
ctx.textAlign = 'left';
translations.forEach(item => {
const { x0, y0, x1, y1 } = item.bbox;
const width = x1 - x0;
const height = y1 - y0;
// Calculate font size to fit the box
const fontSize = Math.min(height * 0.7, width / (item.text.length * 0.6));
ctx.font = `bold ${fontSize}px Arial`;
// Draw text (simple version - would need more sophisticated rendering)
ctx.fillText(item.text, x0, y0 + height * 0.8);
});
const result = new Image();
result.src = canvas.toDataURL('image/png');
await new Promise(resolve => result.onload = resolve);
return result;
}
</script>
</body>
</html>