import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.0'; // Configuration const MODEL_ID = 'Xenova/gemma-2b-it'; // A small, efficient LLM for text generation const DTYPE = 'q4_k_m'; // Quantized for speed in browser let generator = null; let isGenerating = false; // UI Elements const generateBtn = document.getElementById('generate-btn'); const stopBtn = document.getElementById('stop-btn'); const outputDiv = document.getElementById('output'); const statusText = document.getElementById('status-text'); const progressBar = document.getElementById('progress-bar'); // Initialize the AI Model async function initModel() { statusText.textContent = 'Loading AI Model... (First time may take 30s)'; try { // Create the text generation pipeline generator = await pipeline('text-generation', MODEL_ID, { dtype: DTYPE, device: 'webgpu' // Try WebGPU first, fallback to wasm }); statusText.textContent = 'Model Ready'; progressBar.style.width = '100%'; console.log('Model loaded successfully'); } catch (error) { console.error('Model loading failed:', error); statusText.textContent = 'Error loading model. Please refresh.'; outputDiv.textContent = 'Failed to load AI engine. Check console for details.'; } } // Start initialization immediately initModel(); // Handle Generation generateBtn.addEventListener('click', async () => { if (!generator) { alert('Model is still loading. Please wait.'); return; } const product = document.getElementById('product').value; const audience = document.getElementById('audience').value; const feature = document.getElementById('feature').value; if (!product || !audience || !feature) { alert('Please fill in all fields to generate copy.'); return; } // Construct the prompt const prompt = ` Write a catchy Facebook ad post for ${product}. Target audience: ${audience}. Key feature to highlight: ${feature}. Use emojis and a persuasive tone. Keep it short. Output: `; isGenerating = true; generateBtn.style.display = 'none'; stopBtn.style.display = 'inline-block'; outputDiv.textContent = ''; statusText.textContent = 'Generating...'; progressBar.style.width = '0%'; try { // Run the pipeline const output = await generator(prompt, { max_new_tokens: 150, do_sample: true, temperature: 0.7, top_k: 50, callback_function: (beams) => { // Update progress bar based on tokens generated const decodedText = beams[0].text; outputDiv.textContent = decodedText; // Simple visual progress estimation const progress = Math.min((decodedText.length / 150) * 100, 100); progressBar.style.width = `${progress}%`; } }); // Final output handling const finalText = output[0].generated_text; outputDiv.textContent = finalText.replace(prompt.trim(), ''); // Remove prompt from output statusText.textContent = 'Generation Complete'; progressBar.style.width = '100%'; } catch (err) { console.error(err); outputDiv.textContent = 'Error generating text.'; statusText.textContent = 'Error'; } finally { isGenerating = false; generateBtn.style.display = 'inline-block'; stopBtn.style.display = 'none'; } }); // Stop Generation (Optional implementation for advanced control) stopBtn.addEventListener('click', () { if(generator && isGenerating) { // Note: Stopping mid-generation in transformers.js requires specific abort controllers // which are complex in the simple pipeline API. // For this demo, we simply reset UI. isGenerating = false; statusText.textContent = 'Stopped'; generateBtn.style.display = 'inline-block'; stopBtn.style.display = 'none'; } }); // Form Handling (Mock) document.querySelector('.contact-form').addEventListener('submit', (e) => { e.preventDefault(); alert('Thanks! We will contact you shortly.'); });