AEUPH commited on
Commit
458037e
·
verified ·
1 Parent(s): 56c08e4

Upload 12 files

Browse files
Files changed (12) hide show
  1. README (1).md +12 -0
  2. api.py +239 -0
  3. app.js +1057 -0
  4. app.py +249 -0
  5. architecture-diagram.js +96 -0
  6. gitattributes +35 -0
  7. index (1).html +459 -0
  8. quantum-viz.js +327 -0
  9. requirements.txt +11 -0
  10. simulator.js +726 -0
  11. style.css +28 -0
  12. styles.css +803 -0
README (1).md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: artistic-2.0
3
+ title: Quantum-Enhanced WAN 2.1 Video Generation System
4
+ sdk: static
5
+ emoji: 🚀
6
+ colorFrom: indigo
7
+ colorTo: indigo
8
+ pinned: true
9
+ thumbnail: >-
10
+ https://cdn-uploads.huggingface.co/production/uploads/63a41856de134926a2b51191/qebqqRHAFmhFg8AspSmp_.jpeg
11
+ short_description: '## Features - **Hybrid Quantum-Classical Architecture:**'
12
+ ---
api.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Blueprint, request, jsonify
2
+ import base64
3
+ from PIL import Image
4
+ import io
5
+ import torch
6
+ from transformers import CLIPProcessor, CLIPModel
7
+ from mlc_llm import ChatModule
8
+ import threading
9
+ import os
10
+
11
+ # Create a Blueprint for API routes
12
+ app.register_blueprint(api_bp, url_prefix='/api')
13
+
14
+ # Global instances for models (will be initialized in app.py)
15
+ clip_processor = None
16
+ clip_model = None
17
+ mlc_chat_module = None
18
+ mlc_lock = threading.Lock()
19
+
20
+ @api_bp.route('/health')
21
+ def health_check():
22
+ llm_status = "loaded" if mlc_chat_module else "not loaded (check logs)"
23
+ clip_status = "loaded" if clip_model else "not loaded (check logs)"
24
+ return jsonify({
25
+ "status": "Quantum-Enhanced WAN 2.1 Backend is running!",
26
+ "llm_status": llm_status,
27
+ "clip_status": clip_status
28
+ })
29
+
30
+ @api_bp.route('/embed_image', methods=['POST'])
31
+ def embed_image():
32
+ """Handle image embedding requests"""
33
+ if clip_processor is None or clip_model is None:
34
+ return jsonify({"error": "CLIP model not loaded. Check server logs for details."}), 500
35
+
36
+ try:
37
+ data = request.get_json()
38
+ if not data:
39
+ return jsonify({"error": "Invalid JSON data"}), 400
40
+
41
+ image_data_url = data.get('image') or data.get('image_url') or data.get('image_data')
42
+
43
+ if not image_data_url:
44
+ return jsonify({"error": "No image data provided. Expected 'image', 'image_url', or 'image_data' field."}), 400
45
+
46
+ # Handle data URL format
47
+ if ',' in image_data_url:
48
+ header, encoded = image_data_url.split(",", 1)
49
+ else:
50
+ # Assume it's raw base64
51
+ encoded = image_data_url
52
+
53
+ # Decode and process image
54
+ image_bytes = base64.b64decode(encoded)
55
+ image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
56
+
57
+ inputs = clip_processor(images=image, return_tensors="pt")
58
+ if torch.cuda.is_available():
59
+ inputs = {k: v.to("cuda") for k, v in inputs.items()}
60
+
61
+ with torch.no_grad():
62
+ image_features = clip_model.get_image_features(**inputs)
63
+
64
+ # Normalize embeddings and convert to list for JSON serialization
65
+ image_embeddings = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
66
+ embeddings_list = image_embeddings.squeeze().cpu().tolist()
67
+
68
+ return jsonify({
69
+ "embeddings": embeddings_list,
70
+ "shape": image_embeddings.shape,
71
+ "success": True
72
+ }), 200
73
+
74
+ except ValueError as ve:
75
+ print(f"Value error embedding image: {ve}")
76
+ return jsonify({"error": f"Invalid image data format: {str(ve)}"}), 400
77
+ except Exception as e:
78
+ print(f"Error embedding image: {e}")
79
+ import traceback
80
+ traceback.print_exc()
81
+ return jsonify({"error": f"Failed to embed image: {str(e)}"}), 500
82
+
83
+ @api_bp.route('/chat/completions', methods=['POST'])
84
+ def chat_completions_endpoint():
85
+ if mlc_chat_module is None:
86
+ return jsonify({"error": "LLM model not loaded. Check server logs for details."}), 500
87
+
88
+ data = request.get_json()
89
+ prompt = data.get("prompt")
90
+ system_message = data.get("system_message", "You are a creative AI assistant for video generation.")
91
+
92
+ if not prompt:
93
+ return jsonify({"error": "Prompt is required"}), 400
94
+
95
+ try:
96
+ full_prompt = f"{system_message}\nUser: {prompt}"
97
+
98
+ with mlc_lock:
99
+ mlc_chat_module.reset_chat()
100
+ response = mlc_chat_module.generate(full_prompt)
101
+
102
+ return jsonify({"completion": response}), 200
103
+ except Exception as e:
104
+ print(f"Error getting chat completion: {e}")
105
+ return jsonify({"error": f"Failed to get chat completion: {str(e)}"}), 500
106
+
107
+ @api_bp.route('/generate_frame_guidance', methods=['POST'])
108
+ def generate_frame_guidance():
109
+ # This endpoint provides LLM guidance for the frontend's quantum diffusion.
110
+ # It does NOT generate the image itself.
111
+ if mlc_chat_module is None or clip_processor is None or clip_model is None:
112
+ return jsonify({"error": "One or more AI models not loaded. Check server logs for details."}), 500
113
+
114
+ data = request.get_json()
115
+ image_data_url = data.get('image') # The current frame from the frontend
116
+ prompt = data.get('prompt', 'Quantum interpolation')
117
+ influence = data.get('influence', 5) # 0-100
118
+ entanglement_depth = data.get('depth', 16) # For LLM to consider
119
+ frame_number = data.get('frame_number', 0)
120
+
121
+ if not image_data_url:
122
+ return jsonify({"error": "No image data provided"}), 400
123
+
124
+ try:
125
+ # 1. Get CLIP embeddings for the current frame
126
+ header, encoded = image_data_url.split(",", 1)
127
+ image_bytes = base64.b64decode(encoded)
128
+ input_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
129
+
130
+ clip_inputs = clip_processor(images=input_image, return_tensors="pt")
131
+ if torch.cuda.is_available():
132
+ clip_inputs = {k: v.to("cuda") for k, v in clip_inputs.items()}
133
+
134
+ with torch.no_grad():
135
+ image_features = clip_model.get_image_features(**clip_inputs)
136
+ image_embeddings_np = image_features.squeeze().cpu().numpy()
137
+ embedding_snippet = ", ".join([f"{x:.4f}" for x in image_embeddings_np[:10]])
138
+
139
+ # 2. Use LLM to generate guidance for the next quantum diffusion step
140
+ import math
141
+ llm_prompt = (
142
+ f"You are an AI video director for a quantum diffusion system. Your task is to guide the transformation "
143
+ f"of a video frame based on quantum principles and user input. "
144
+ f"Given the current frame's visual context (CLIP features: [{embedding_snippet}...]), "
145
+ f"the user's creative prompt: '{prompt}', "
146
+ f"and the quantum settings (Quantum Influence: {influence}%, Entanglement Depth: {entanglement_depth} layers), "
147
+ f"describe *precisely* how the quantum diffusion effect should transform the current frame into frame {frame_number + 1}. "
148
+ f"Think of these transformations as manipulating a quantum state that manifests visually. "
149
+ f"Higher influence and depth should lead to more pronounced, chaotic, or surreal quantum effects. "
150
+ f"Focus on quantifiable visual parameters, including: "
151
+ f"color shifts (e.g., 'shift red by +{round(influence/5)}', 'hue rotate {round(influence*1.5)}deg'), "
152
+ f"blur (e.g., 'apply gaussian blur radius {max(1, round(influence/10))}'), "
153
+ f"glitch/distortion (e.g., 'pixel displacement x-axis random {max(5, round(influence/5))}px', 'chromatic aberration offset {max(1, round(influence/20))}'), "
154
+ f"zoom/pan (e.g., 'zoom in {1.00 + influence/2000}x, pan right {round(influence/10)}px'), "
155
+ f"pattern overlay (e.g., 'overlay subtle static pattern opacity {influence/200}'), "
156
+ f"motion blur (e.g., 'apply motion blur strength {round(entanglement_depth/2)}'), "
157
+ f"bloom (e.g., 'add bloom strength {influence/100}'), "
158
+ f"noise (e.g., 'add noise amount {influence/50}'), "
159
+ f"vignette (e.g., 'add vignette strength {influence/200}'), "
160
+ f"or specific quantum-themed visual cues (e.g., 'ripple effect', 'add subtle scanlines opacity {influence/200}', 'invert colors'). "
161
+ f"Combine these to create a dynamic, quantum-like visual evolution. Ensure the intensity of effects scales with Influence and Depth. "
162
+ f"Be concise and output only the transformation instructions. "
163
+ f"Example: 'shift blue by +{round(influence/5)}, apply motion blur strength {round(entanglement_depth/2)}, zoom {1.00 + influence/2000}x, add subtle scanlines opacity {influence/200}'.\n"
164
+ f"Transformation Instructions for frame {frame_number + 1}:"
165
+ )
166
+
167
+ llm_guidance = ""
168
+ try:
169
+ with mlc_lock:
170
+ mlc_chat_module.reset_chat()
171
+ llm_guidance = mlc_chat_module.generate(llm_prompt)
172
+ except Exception as llm_e:
173
+ print(f"LLM guidance generation failed: {llm_e}. Using fallback guidance.")
174
+ llm_guidance = f"apply subtle glitch effect, shift colors slightly based on quantum influence {influence}%."
175
+
176
+ print(f"LLM Guidance: {llm_guidance}")
177
+
178
+ return jsonify({
179
+ "guidance": llm_guidance,
180
+ "log": (f"Backend provided guidance for frame {frame_number + 1} based on prompt: '{prompt[:50]}...', "
181
+ f"influence: {influence}, depth: {entanglement_depth}. LLM guidance: '{llm_guidance[:50]}...'.")
182
+ }), 200
183
+ except Exception as e:
184
+ print(f"Error generating frame guidance: {e}")
185
+ return jsonify({"error": f"Failed to generate frame guidance: {str(e)}"}), 500
186
+
187
+ @api_bp.route('/upload', methods=['POST'])
188
+ def upload_file():
189
+ try:
190
+ # Check if it's a multipart form upload (FormData)
191
+ if 'file' in request.files:
192
+ file = request.files['file']
193
+ if file.filename == '':
194
+ return jsonify({"error": "No selected file"}), 400
195
+
196
+ # Read the image file
197
+ img_bytes = file.read()
198
+ # Convert to base64 for frontend use
199
+ img_base64 = base64.b64encode(img_bytes).decode('utf-8')
200
+
201
+ # Determine mime type
202
+ content_type = file.content_type or 'image/jpeg'
203
+ img_data_url = f"data:{content_type};base64,{img_base64}"
204
+
205
+ return jsonify({
206
+ "message": "File uploaded successfully",
207
+ "image_url": img_data_url
208
+ }), 200
209
+
210
+ # Check if it's JSON with base64 data
211
+ elif request.is_json:
212
+ data = request.get_json()
213
+ image_data = data.get('image') or data.get('image_url') or data.get('image_data')
214
+
215
+ if not image_data:
216
+ return jsonify({"error": "No image data provided"}), 400
217
+
218
+ # If already a data URL, return as-is
219
+ if image_data.startswith('data:image'):
220
+ return jsonify({
221
+ "message": "Image data received",
222
+ "image_url": image_data
223
+ }), 200
224
+
225
+ # If base64 without header, add it
226
+ img_data_url = f"data:image/jpeg;base64,{image_data}"
227
+ return jsonify({
228
+ "message": "Image data processed",
229
+ "image_url": img_data_url
230
+ }), 200
231
+
232
+ else:
233
+ return jsonify({"error": "Invalid request format. Send either FormData with 'file' or JSON with 'image' field"}), 400
234
+
235
+ except Exception as e:
236
+ print(f"Error uploading file: {e}")
237
+ import traceback
238
+ traceback.print_exc()
239
+ return jsonify({"error": f"Failed to upload file: {str(e)}"}), 500
app.js ADDED
@@ -0,0 +1,1057 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Advanced Simulation Controller
2
+
3
+ // Dynamically set backend URL for HuggingFace Spaces compatibility
4
+
5
+ class MainSystemSimulator {
6
+ constructor() {
7
+ console.log('SystemSimulator initialized');
8
+ this.logs = document.getElementById('system-logs');
9
+ this.outputCanvas = document.getElementById('output-canvas');
10
+ this.outputCtx = this.outputCanvas ? this.outputCanvas.getContext('2d') : null;
11
+
12
+ this.isGenerating = false;
13
+ this.sourceImage = null;
14
+ this.config = {
15
+ prompt: '',
16
+ influence: 5, // Default 5%
17
+ depth: 16, // Default 16 layers
18
+ method: 'adaptive'
19
+ };
20
+
21
+ // Director Mode State
22
+ this.directorMode = true;
23
+ this.movieFrames = []; // Stores ImageBitmaps or DataURLs
24
+ this.accumulatedFrames = 0;
25
+
26
+ // Flag to prevent multiple simultaneous API calls
27
+ this.isProcessingImage = false;
28
+
29
+ this.init();
30
+ }
31
+
32
+ async callBackendApi(endpoint, data) {
33
+ try {
34
+ // Ensure endpoint starts with /
35
+ //const normalizedEndpoint = endpoint.startsWith('/') ? endpoint : `/${endpoint}`;
36
+ var BACKEND_URL = window.location.origin + '/api/'
37
+ // Construct full URL (backendUrl should NOT have trailing slash)
38
+ const backendUrl = BACKEND_URL;
39
+ const url = `${backendUrl}` + `${endpoint}`;
40
+
41
+ console.log(`Calling API: ${url}`);
42
+
43
+ const response = await fetch(url, {
44
+ method: 'POST',
45
+ headers: {
46
+ 'Content-Type': 'application/json',
47
+ },
48
+ body: JSON.stringify(data),
49
+ });
50
+
51
+ const contentType = response.headers.get('content-type') || '';
52
+
53
+ // Read the response body only once
54
+ const responseText = await response.text();
55
+
56
+
57
+ // Handle non-JSON responses that aren't HTML
58
+ if (!contentType.includes('application/json')) {
59
+ // Use the already read responseText instead of trying to read it again
60
+ const textResponse = responseText; // Fix: use the already read value
61
+ console.error(`Expected JSON, got ${contentType}:`, textResponse);
62
+
63
+ // Try to extract error information from HTML
64
+ let errorMessage = `Expected JSON response, got ${contentType}`;
65
+ try {
66
+ // Check if it's an error page
67
+ if (textResponse.includes('<title>')) {
68
+ const titleMatch = textResponse.match(/<title>(.*?)<\/title>/s);
69
+ if (titleMatch && titleMatch[1]) {
70
+ errorMessage = `Server returned error: ${titleMatch[1]}`;
71
+ }
72
+ }
73
+
74
+ // Check for Flask error details
75
+ if (textResponse.includes('<pre>')) {
76
+ const preMatch = textResponse.match(/<pre>(.*?)<\/pre>/s);
77
+ if (preMatch && preMatch[1]) {
78
+ errorMessage += ` - ${preMatch[1].substring(0, 100)}`;
79
+ }
80
+ }
81
+ } catch (e) {
82
+ console.error('Error parsing HTML response:', e);
83
+ }
84
+
85
+ throw new Error(errorMessage);
86
+ }
87
+
88
+ // Parse JSON response
89
+ let jsonResponse;
90
+ try {
91
+ jsonResponse = JSON.parse(responseText);
92
+ } catch (parseError) {
93
+ console.error('JSON parse error:', parseError);
94
+ throw new Error(`Failed to parse JSON response: ${parseError.message}`);
95
+ }
96
+
97
+ // Check for HTTP error status
98
+ if (!response.ok) {
99
+ const errorMsg = jsonResponse.error || jsonResponse.message || `HTTP ${response.status}: ${response.statusText}`;
100
+ throw new Error(errorMsg);
101
+ }
102
+
103
+ return jsonResponse;
104
+
105
+ } catch (error) {
106
+ // Re-throw with enhanced error message
107
+ // console.error(`Backend API Error (${endpoint}):`, error.message);
108
+
109
+ // Provide helpful debugging info for connection errors
110
+ // if (error.message.includes('Failed to fetch') || error.message.includes('NetworkError')) {
111
+ // const backendUrl = this.backendUrl || BACKEND_URL || window.location.origin;
112
+ // throw new Error(`Cannot connect to backend at ${backendUrl}. Is the server running?`);
113
+ // }
114
+
115
+ throw error;
116
+ }
117
+ }
118
+
119
+ init() {
120
+ console.log('Initializing SystemSimulator...');
121
+ this.setupListeners();
122
+ this.setupNavigation();
123
+ this.resizeCanvas();
124
+ window.addEventListener('resize', () => this.resizeCanvas());
125
+
126
+ // Initial visual state
127
+ this.drawStaticNoise();
128
+ }
129
+
130
+ setupNavigation() {
131
+ // Setup navigation links
132
+ const navLinks = document.querySelectorAll('.nav-link');
133
+ navLinks.forEach(link => {
134
+ link.addEventListener('click', (e) => {
135
+ e.preventDefault();
136
+ const targetSection = link.dataset.section;
137
+
138
+ // Update active nav link
139
+ navLinks.forEach(l => l.classList.remove('active'));
140
+ link.classList.add('active');
141
+
142
+ // Show target section
143
+ document.querySelectorAll('.section').forEach(section => {
144
+ section.classList.remove('active');
145
+ });
146
+ document.getElementById(targetSection).classList.add('active');
147
+
148
+ // Special handling for architecture section
149
+ if (targetSection === 'architecture' && window.drawArchitectureDiagram) {
150
+ window.drawArchitectureDiagram();
151
+ }
152
+ });
153
+ });
154
+ }
155
+
156
+ setupListeners() {
157
+ console.log('Setting up event listeners...');
158
+
159
+ // Image Upload Handling
160
+ const dropZone = document.getElementById('drop-zone');
161
+ const fileInput = document.getElementById('image-input');
162
+
163
+ if (!dropZone || !fileInput) {
164
+ console.error('Required DOM elements not found');
165
+ return;
166
+ }
167
+
168
+ // Click to browse - fixed to prevent double clicks
169
+ dropZone.addEventListener('click', (e) => {
170
+ e.preventDefault(); // Prevent default behavior
171
+ e.stopPropagation(); // Stop event bubbling
172
+
173
+ // Check if already processing to prevent multiple calls
174
+ if (this.isProcessingImage) {
175
+ console.log('Already processing image, ignoring click');
176
+ return;
177
+ }
178
+
179
+ console.log('Drop zone clicked');
180
+ fileInput.click();
181
+ });
182
+
183
+ // Drag and drop
184
+ dropZone.addEventListener('dragover', (e) => {
185
+ e.preventDefault();
186
+ dropZone.classList.add('drag-over');
187
+ });
188
+
189
+ dropZone.addEventListener('dragleave', () => {
190
+ dropZone.classList.remove('drag-over');
191
+ });
192
+
193
+ dropZone.addEventListener('drop', (e) => {
194
+ e.preventDefault();
195
+ e.stopPropagation();
196
+
197
+ // Check if already processing to prevent multiple calls
198
+ if (this.isProcessingImage) {
199
+ console.log('Already processing image, ignoring drop');
200
+ return;
201
+ }
202
+
203
+ console.log('File dropped');
204
+ dropZone.classList.remove('drag-over');
205
+ if(e.dataTransfer.files.length) {
206
+ this.handleImage(e.dataTransfer.files[0]);
207
+ }
208
+ });
209
+
210
+ // File input change - using 'change' event instead of 'input' to avoid conflicts
211
+ fileInput.addEventListener('change', (e) => {
212
+ e.preventDefault();
213
+ e.stopPropagation();
214
+
215
+ // Check if already processing to prevent multiple calls
216
+ if (this.isProcessingImage) {
217
+ console.log('Already processing image, ignoring file input change');
218
+ return;
219
+ }
220
+
221
+ console.log('File input changed');
222
+ if(e.target.files.length) {
223
+ this.handleImage(e.target.files[0]);
224
+ }
225
+ });
226
+
227
+ // Director Mode Listeners
228
+ const directorToggle = document.getElementById('director-mode-toggle');
229
+ if (directorToggle) {
230
+ directorToggle.addEventListener('change', (e) => {
231
+ console.log('Director mode toggle changed');
232
+ this.directorMode = e.target.checked;
233
+ this.log(`Director Mode: ${this.directorMode ? 'ENABLED' : 'DISABLED'}`, 'info');
234
+ });
235
+ }
236
+
237
+ const downloadBtn = document.getElementById('download-btn');
238
+ const resetBtn = document.getElementById('reset-movie-btn');
239
+
240
+ if (downloadBtn) {
241
+ downloadBtn.addEventListener('click', (e) => {
242
+ e.preventDefault();
243
+ e.stopPropagation();
244
+ console.log('Download button clicked');
245
+ this.downloadMovie();
246
+ });
247
+ }
248
+
249
+ if (resetBtn) {
250
+ resetBtn.addEventListener('click', (e) => {
251
+ e.preventDefault();
252
+ e.stopPropagation();
253
+ console.log('Reset button clicked');
254
+ this.resetMovie();
255
+ });
256
+ }
257
+
258
+ // Inputs
259
+ const quantumInfluence = document.getElementById('quantum-influence');
260
+ const entanglementDepth = document.getElementById('entanglement-depth');
261
+ const influenceVal = document.getElementById('influence-val');
262
+ const depthVal = document.getElementById('depth-val');
263
+
264
+ if (quantumInfluence && influenceVal) {
265
+ quantumInfluence.addEventListener('input', (e) => {
266
+ console.log('Quantum influence changed');
267
+ influenceVal.textContent = `${e.target.value}%`;
268
+ this.config.influence = parseInt(e.target.value);
269
+ });
270
+ }
271
+
272
+ if (entanglementDepth && depthVal) {
273
+ entanglementDepth.addEventListener('input', (e) => {
274
+ console.log('Entanglement depth changed');
275
+ depthVal.textContent = e.target.value;
276
+ this.config.depth = parseInt(e.target.value);
277
+ });
278
+ }
279
+
280
+ // Tabs
281
+ document.querySelectorAll('.viz-tab').forEach(tab => {
282
+ tab.addEventListener('click', (e) => {
283
+ e.preventDefault();
284
+ e.stopPropagation();
285
+ console.log('Tab clicked:', tab.dataset.view);
286
+ document.querySelectorAll('.viz-tab').forEach(t => t.classList.remove('active'));
287
+ document.querySelectorAll('.viz-view').forEach(v => v.classList.remove('active'));
288
+
289
+ tab.classList.add('active');
290
+ const viewId = `view-${tab.dataset.view}`;
291
+ const viewElement = document.getElementById(viewId);
292
+ if (viewElement) viewElement.classList.add('active');
293
+
294
+ // Initialize visualizations if needed
295
+ if (tab.dataset.view === 'circuit' && window.circuitViz) {
296
+ window.circuitViz.resize();
297
+ } else if (tab.dataset.view === 'state' && window.stateViz) {
298
+ window.stateViz.resize();
299
+ }
300
+ });
301
+ });
302
+
303
+ // Start Button
304
+ const startBtn = document.getElementById('start-btn');
305
+ if (startBtn) {
306
+ startBtn.addEventListener('click', (e) => {
307
+ e.preventDefault();
308
+ e.stopPropagation();
309
+ console.log('Start button clicked');
310
+ this.startGeneration();
311
+ });
312
+ }
313
+
314
+ console.log('All event listeners set up');
315
+ }
316
+
317
+ handleImage(file) {
318
+ console.log('Handling image:', file.name);
319
+ if (!file || !file.type.startsWith('image/')) {
320
+ this.log('Invalid file type. Please upload an image.', 'error');
321
+ return;
322
+ }
323
+
324
+ // Set processing flag to prevent multiple calls
325
+ this.isProcessingImage = true;
326
+
327
+ const reader = new FileReader();
328
+ reader.onload = async (e) => {
329
+ console.log('File reader loaded');
330
+
331
+ // Validate data URL format
332
+ const imageDataURL = e.target.result;
333
+ if (!imageDataURL || !imageDataURL.startsWith('data:image')) {
334
+ this.log('Invalid image data format', 'error');
335
+ this.isProcessingImage = false;
336
+ return;
337
+ }
338
+
339
+ this.sourceImage = new Image();
340
+ this.sourceImage.onload = async () => {
341
+ console.log('Source image loaded');
342
+
343
+ // Show preview
344
+ const preview = document.getElementById('preview-img');
345
+ if (preview) {
346
+ preview.src = this.sourceImage.src;
347
+ preview.classList.remove('hidden');
348
+ }
349
+
350
+ const dropContent = document.querySelector('.drop-content');
351
+ if (dropContent) dropContent.style.opacity = '0';
352
+
353
+ this.log(`Image loaded: ${file.name} (${this.sourceImage.width}x${this.sourceImage.height})`, 'success');
354
+
355
+ // Call backend for CLIP analysis
356
+ try {
357
+ await this.analyzeImageContext(imageDataURL);
358
+ } catch (error) {
359
+ this.log(`Failed CLIP analysis for ${file.name}: ${error.message}`, 'error');
360
+ console.error('Full error:', error);
361
+ } finally {
362
+ // Reset processing flag after all operations are complete
363
+ this.isProcessingImage = false;
364
+ }
365
+ };
366
+
367
+ this.sourceImage.onerror = () => {
368
+ this.log(`Failed to load image: ${file.name}`, 'error');
369
+ this.isProcessingImage = false;
370
+ };
371
+
372
+ this.sourceImage.src = imageDataURL;
373
+ };
374
+
375
+ reader.onerror = () => {
376
+ this.log(`Failed to read file: ${file.name}`, 'error');
377
+ this.isProcessingImage = false;
378
+ };
379
+
380
+ reader.readAsDataURL(file);
381
+ }
382
+
383
+ async analyzeImageContext(imageDataURL) {
384
+ console.log('Analyzing image context');
385
+ this.log('CLIP-Encoder: Sending image for feature extraction...', 'info');
386
+
387
+ try {
388
+ // Validate input
389
+ if (!imageDataURL || !imageDataURL.startsWith('data:image')) {
390
+ throw new Error('Invalid image data URL format');
391
+ }
392
+
393
+ // Try different possible endpoint paths
394
+ let response;
395
+ const endpoints = [
396
+ 'embed_image'
397
+ ];
398
+
399
+
400
+
401
+ let lastError;
402
+ let successfulEndpoint = null;
403
+
404
+ //for (const endpointz of endpoints) {
405
+ const endpoint = "embed_image";
406
+ // try {
407
+ console.log(`Trying endpoint: ${endpoint}`);
408
+ response = await this.callBackendApi(endpoint, { image: imageDataURL });
409
+ successfulEndpoint = endpoint;
410
+ this.log(`Successfully connected to endpoint: ${endpoint}`, 'success');
411
+ // Exit loop if successful
412
+ // }// catch (error) {
413
+ // console.log(`Failed with endpoint ${endpoint}: ${error.message}`);
414
+ // lastError = error;
415
+ // Continue to next endpoint
416
+ // }
417
+ //}
418
+
419
+ if (!response || !successfulEndpoint) {
420
+ // Provide helpful error message
421
+ const errorMsg = lastError ? lastError.message : 'Could not connect to any CLIP endpoint';
422
+ this.log(`CLIP-Encoder: All endpoints failed. ${errorMsg}`, 'error');
423
+
424
+ // Give user troubleshooting tips
425
+ console.error(`
426
+ 🔍 Troubleshooting CLIP Endpoint Connection:
427
+ 1. Check backend is running: ${this.backendUrl || 'http://localhost:5000'}
428
+ 2. Verify Flask blueprint is registered: app.register_blueprint(api_bp, url_prefix='/api')
429
+ 3. Check Flask console for errors
430
+ 4. Verify /api/embed_image route exists in api.py
431
+ 5. Test manually: curl -X POST ${this.backendUrl}/api/embed_image -H "Content-Type: application/json" -d '{"image":"data:image/png;base64,..."}'
432
+ `);
433
+
434
+ throw new Error(`All CLIP endpoints failed. Last error: ${errorMsg}`);
435
+ }
436
+
437
+ // Validate response
438
+ if (!response.embeddings || !Array.isArray(response.embeddings)) {
439
+ throw new Error('Invalid response: missing or malformed embeddings array');
440
+ }
441
+
442
+ const embeddings = response.embeddings;
443
+
444
+ if (embeddings.length === 0) {
445
+ throw new Error('Received empty embeddings array');
446
+ }
447
+
448
+ // Log success with sample of embeddings
449
+ const embeddingPreview = embeddings.slice(0, 3)
450
+ .map(val => val.toFixed(4))
451
+ .join(', ');
452
+
453
+ this.log(`CLIP-Encoder: Extracted feature vector [${embeddingPreview}, ...] (${embeddings.length} dimensions)`, 'success');
454
+
455
+ // Store embeddings for later use
456
+ this.currentEmbeddings = embeddings;
457
+
458
+ return embeddings;
459
+
460
+ } catch (error) {
461
+ this.log(`CLIP-Encoder: Failed to get embeddings. ${error.message}`, 'error');
462
+ console.error('Full error details:', error);
463
+ throw error;
464
+ }
465
+ }
466
+
467
+
468
+
469
+ updateDirectorUI() {
470
+ console.log('Updating director UI');
471
+ const totalFramesElement = document.getElementById('total-frames');
472
+ const downloadBtn = document.getElementById('download-btn');
473
+ const resetBtn = document.getElementById('reset-movie-btn');
474
+
475
+ if (totalFramesElement) totalFramesElement.textContent = `${this.movieFrames.length} FRAMES`;
476
+ if (downloadBtn) downloadBtn.disabled = this.movieFrames.length === 0;
477
+ if (resetBtn) resetBtn.disabled = this.movieFrames.length === 0;
478
+ }
479
+
480
+ resetMovie() {
481
+ console.log('Resetting movie');
482
+ this.movieFrames = [];
483
+ this.updateDirectorUI();
484
+ this.log('Director Mode: Timeline cleared.', 'warn');
485
+ }
486
+
487
+ resizeCanvas() {
488
+ if (!this.outputCanvas) return;
489
+ const rect = this.outputCanvas.parentElement.getBoundingClientRect();
490
+ this.outputCanvas.width = rect.width;
491
+ this.outputCanvas.height = rect.height;
492
+ if (!this.isGenerating) this.drawStaticNoise();
493
+ }
494
+
495
+ log(message, type = 'info') {
496
+ if (!this.logs) return;
497
+
498
+ const div = document.createElement('div');
499
+ div.className = `log-line ${type}`;
500
+ const time = new Date().toLocaleTimeString('en-US', { hour12: false });
501
+ div.innerHTML = `<span class="ts">[${time}]</span> ${message}`;
502
+ this.logs.appendChild(div);
503
+ this.logs.scrollTop = this.logs.scrollHeight;
504
+ }
505
+
506
+ async startGeneration() {
507
+ console.log('Starting generation');
508
+ if (this.isGenerating) {
509
+ console.log('Generation already in progress');
510
+ return;
511
+ }
512
+
513
+ // Disable UI elements during generation
514
+ this.isGenerating = true;
515
+ const startBtn = document.getElementById('start-btn');
516
+ if (startBtn) startBtn.disabled = true;
517
+
518
+ const promptInput = document.getElementById('prompt-input');
519
+ const imageInput = document.getElementById('image-input');
520
+ const quantumInfluence = document.getElementById('quantum-influence');
521
+ const entanglementDepth = document.getElementById('entanglement-depth');
522
+ const samplingMethod = document.getElementById('sampling-method');
523
+
524
+ if (promptInput) promptInput.disabled = true;
525
+ if (imageInput) imageInput.disabled = true;
526
+ if (quantumInfluence) quantumInfluence.disabled = true;
527
+ if (entanglementDepth) entanglementDepth.disabled = true;
528
+ if (samplingMethod) samplingMethod.disabled = true;
529
+
530
+ const generationStatsElement = document.getElementById('generation-stats');
531
+ if (generationStatsElement) generationStatsElement.style.display = 'block';
532
+
533
+ try {
534
+ if (!this.sourceImage) {
535
+ this.log('Error: Source image required for I2V generation.');
536
+ alert("Please upload a source image first.");
537
+ return;
538
+ }
539
+
540
+ const prompt = promptInput ? promptInput.value.trim() || "Quantum interpolation" : "Quantum interpolation";
541
+
542
+ // --- Backend Health Check ---
543
+ this.log('Checking backend availability...', 'info');
544
+ try {
545
+ // Try different possible health endpoints
546
+ let health;
547
+ const healthEndpoints = [
548
+ '/api/health',
549
+ '/health',
550
+ '/api/status',
551
+ '/status',
552
+ '/api/v1/health',
553
+ '/v1/health',
554
+ '/health',
555
+ '/api/healthz'
556
+ ];
557
+
558
+ let lastError;
559
+ for (const endpoint of healthEndpoints) {
560
+ try {
561
+ health = await this.callBackendApi(endpoint, {});
562
+ this.log(`Successfully connected to health endpoint: ${endpoint}`, 'success');
563
+ break; // Exit loop if successful
564
+ } catch (error) {
565
+ console.log(`Failed with health endpoint ${endpoint}: ${error.message}`);
566
+ lastError = error;
567
+ // Continue to next endpoint
568
+ }
569
+ }
570
+
571
+ if (!health) {
572
+ throw lastError || new Error('Could not connect to any health endpoint');
573
+ }
574
+
575
+ this.log(`Backend Status: ${health.status} (LLM: ${health.llm_status}, CLIP: ${health.clip_status})`, 'success');
576
+ if (health.llm_status.includes("not loaded") || health.clip_status.includes("not loaded")) {
577
+ throw new Error("One or more AI models not loaded on backend. Check backend console.");
578
+ }
579
+ } catch (error) {
580
+ this.log(`Backend not available or unhealthy: ${error.message}. Please ensure your Python Flask backend is running.`, 'error');
581
+ alert(`Backend Error: ${error.message}. Please start the backend.`);
582
+ return;
583
+ }
584
+ // --- End Backend Health Check ---
585
+
586
+ this.log(`Initializing I2V pipeline for: "${prompt.substring(0, 30)}..."`, 'info');
587
+
588
+ // Phase 1: Initialization
589
+ await this.phaseInitialization();
590
+
591
+ // Phase 2: Quantum Circuit
592
+ await this.phaseQuantumCircuit();
593
+
594
+ // Phase 3: WebGPU Compute
595
+ await this.phaseWebGPU();
596
+
597
+ // Phase 4: Bridge & Diffusion (Real Emulation)
598
+ await this.phaseRealDiffusion(prompt);
599
+
600
+ this.log('Generation Sequence Complete.', 'success');
601
+ if (generationStatsElement) generationStatsElement.innerHTML = 'GENERATION COMPLETE';
602
+
603
+ // DIRECTOR MODE: PREP NEXT FRAME
604
+ if (this.directorMode && this.movieFrames.length > 0) {
605
+ this.prepareNextContext();
606
+ }
607
+
608
+ } catch (error) {
609
+ this.log(`System Error during generation: ${error.message}`, 'error');
610
+ if (generationStatsElement) generationStatsElement.innerHTML = `ERROR: ${error.message}`;
611
+ console.error(error);
612
+ } finally {
613
+ // Re-enable UI elements
614
+ this.isGenerating = false;
615
+ if (startBtn) startBtn.disabled = false;
616
+ if (promptInput) promptInput.disabled = false;
617
+ if (imageInput) imageInput.disabled = false;
618
+ if (quantumInfluence) quantumInfluence.disabled = false;
619
+ if (entanglementDepth) entanglementDepth.disabled = false;
620
+ if (samplingMethod) samplingMethod.disabled = false;
621
+ }
622
+ }
623
+
624
+ prepareNextContext() {
625
+ if (!this.movieFrames.length) return;
626
+
627
+ // Get the last frame from the movie array
628
+ const lastFrameBitmap = this.movieFrames[this.movieFrames.length - 1];
629
+
630
+ // Create a temp canvas to extract the image
631
+ const canvas = document.createElement('canvas');
632
+ canvas.width = this.outputCanvas.width;
633
+ canvas.height = this.outputCanvas.height;
634
+ const ctx = canvas.getContext('2d');
635
+ ctx.drawImage(lastFrameBitmap, 0, 0);
636
+
637
+ // Convert to Image object for sourceImage
638
+ const newUrl = canvas.toDataURL();
639
+ const nextImg = new Image();
640
+ nextImg.onload = () => {
641
+ console.log('Next context prepared');
642
+ this.sourceImage = nextImg;
643
+ // Update Preview UI
644
+ const preview = document.getElementById('preview-img');
645
+ if (preview) preview.src = newUrl;
646
+ this.log('Director Mode: Context refreshed. Last frame set as input for next sequence.', 'secondary');
647
+ };
648
+ nextImg.src = newUrl;
649
+ }
650
+
651
+ async sleep(ms) {
652
+ return new Promise(r => setTimeout(r, ms));
653
+ }
654
+
655
+ async phaseInitialization() {
656
+ this.log('Phase 1: Initialization');
657
+ await this.sleep(600);
658
+ this.log('Quantizing source image to 512-dim latent space...');
659
+ await this.sleep(800);
660
+ }
661
+
662
+ async phaseQuantumCircuit() {
663
+ this.log('Phase 2: Quantum Circuit');
664
+ this.log(`Constructing ${this.config.depth}-layer quantum circuit...`);
665
+
666
+ // Trigger Viz animation if available globally
667
+ if (window.circuitViz) window.circuitViz.updateVizParameters(this.config.influence, this.config.depth);
668
+
669
+ await this.sleep(1000);
670
+ this.log('Applying Hadamard gates to initialization layer...');
671
+ await this.sleep(400);
672
+ this.log(`Entangling qubits 0-511 with depth ${this.config.depth}...`);
673
+ await this.sleep(800);
674
+ }
675
+
676
+ async phaseWebGPU() {
677
+ this.log('Phase 3: WebGPU Compute');
678
+ this.log('Compiling circuit to WGSL shaders...');
679
+ await this.sleep(600);
680
+ this.log('Injecting quantum noise into CLIP embeddings...');
681
+
682
+ // Simulate intense calculation, trigger stateViz with parameters
683
+ if (window.stateViz) window.stateViz.updateVizParameters(this.config.influence, this.config.depth);
684
+
685
+ // Keep sleep for visual pacing
686
+ for (let i = 0; i < 5; i++) {
687
+ await this.sleep(200);
688
+ }
689
+
690
+ const entropy = (Math.random() * 3 + 0.5).toFixed(4);
691
+ const entropyElement = document.getElementById('entropy-value');
692
+ if (entropyElement) entropyElement.textContent = entropy;
693
+ this.log(`Latent perturbation complete. Entropy: ${entropy}`, 'success');
694
+ }
695
+
696
+ async phaseRealDiffusion(prompt) {
697
+ this.log('Phase 4: Bridge & Diffusion');
698
+ this.log('Starting Frame-by-Frame Quantum Diffusion...');
699
+
700
+ // Switch tab to output to show the magic
701
+ const outputTab = document.querySelector('[data-view="output"]');
702
+ if (outputTab) outputTab.click();
703
+
704
+ // Get initial image data from the source image
705
+ let currentImage = this.sourceImage;
706
+ const totalFrames = 48; // Total frames for the movie
707
+ let currentFrameDataURL = currentImage.src; // Data URL of the current frame
708
+
709
+ for (let frame = 0; frame < totalFrames; frame++) {
710
+ this.log(`Requesting guidance for Frame ${frame + 1}/${totalFrames}...`);
711
+
712
+ const generationStatsElement = document.getElementById('generation-stats');
713
+ if (generationStatsElement) generationStatsElement.innerHTML = `GETTING GUIDANCE FOR FRAME ${frame + 1}/${totalFrames}<br>Quantum-Diffusing...`;
714
+
715
+ // Call backend for LLM guidance on how to transform the current frame
716
+ let guidanceResponse;
717
+ const guidanceEndpoints = [
718
+ '/api/generate_frame_guidance',
719
+ '/generate_frame_guidance',
720
+ '/api/llm/guidance',
721
+ '/llm/guidance',
722
+ '/api/v1/generate_frame_guidance',
723
+ '/v1/generate_frame_guidance',
724
+ '/generate_frame_guidance',
725
+ '/api/generate_frame',
726
+ '/generate_frame',
727
+ '/api/llm',
728
+ '/llm'
729
+ ];
730
+
731
+ let lastError;
732
+ for (const endpoint of guidanceEndpoints) {
733
+ try {
734
+ guidanceResponse = await this.callBackendApi(endpoint, {
735
+ image: currentFrameDataURL,
736
+ prompt: prompt,
737
+ influence: this.config.influence,
738
+ depth: this.config.depth,
739
+ frame_number: frame
740
+ });
741
+ this.log(`Successfully connected to guidance endpoint: ${endpoint}`, 'success');
742
+ break; // Exit loop if successful
743
+ } catch (error) {
744
+ console.log(`Failed with guidance endpoint ${endpoint}: ${error.message}`);
745
+ lastError = error;
746
+ // Continue to next endpoint
747
+ }
748
+ }
749
+
750
+ if (!guidanceResponse) {
751
+ throw lastError || new Error('Could not connect to any guidance endpoint');
752
+ }
753
+
754
+ const llmGuidance = guidanceResponse.guidance;
755
+ this.log(`LLM Guidance (Frame ${frame + 1}): ${llmGuidance.substring(0, 80)}...`);
756
+
757
+ if (generationStatsElement) generationStatsElement.innerHTML = `RENDERING FRAME ${frame + 1}/${totalFrames}<br>Applying Quantum Effects...`;
758
+
759
+ // Render the next frame based on LLM guidance and current image
760
+ const newFrameDataURL = await this.renderFrameTransition(currentImage, this.config.influence, llmGuidance, frame);
761
+
762
+ // Update currentImage for the next iteration
763
+ currentImage = await this.loadImageFromDataURL(newFrameDataURL);
764
+ currentFrameDataURL = newFrameDataURL; // Update dataURL as well
765
+
766
+ // Director Mode: Record Frame
767
+ if (this.directorMode && this.outputCanvas) {
768
+ const bitmap = await createImageBitmap(this.outputCanvas);
769
+ this.movieFrames.push(bitmap);
770
+ this.updateDirectorUI();
771
+ }
772
+
773
+ await this.sleep(50); // Render speed
774
+ }
775
+ }
776
+
777
+ async loadImageFromDataURL(dataURL) {
778
+ return new Promise((resolve, reject) => {
779
+ const img = new Image();
780
+ img.onload = () => resolve(img);
781
+ img.onerror = reject;
782
+ img.src = dataURL;
783
+ });
784
+ }
785
+
786
+ async renderFrameTransition(currentImage, influence, llmGuidance, frameNumber) {
787
+ if (!this.outputCanvas || !this.outputCtx) {
788
+ console.error('Canvas not available for rendering');
789
+ return currentImage.src;
790
+ }
791
+
792
+ const w = this.outputCanvas.width;
793
+ const h = this.outputCanvas.height;
794
+ this.outputCtx.clearRect(0, 0, w, h); // Clear canvas for new frame
795
+
796
+ // Create a temporary canvas to draw the current image and apply effects
797
+ const tempCanvas = document.createElement('canvas');
798
+ tempCanvas.width = w;
799
+ tempCanvas.height = h;
800
+ const tempCtx = tempCanvas.getContext('2d');
801
+
802
+ // Draw the current image, scaled to fit
803
+ const aspectRatio = currentImage.width / currentImage.height;
804
+ let drawWidth = w;
805
+ let drawHeight = h;
806
+ if (w / h > aspectRatio) { // Canvas is wider than image
807
+ drawWidth = h * aspectRatio;
808
+ } else { // Canvas is taller than image
809
+ drawHeight = w / aspectRatio;
810
+ }
811
+ const offsetX = (w - drawWidth) / 2;
812
+ const offsetY = (h - drawHeight) / 2;
813
+ tempCtx.drawImage(currentImage, offsetX, offsetY, drawWidth, drawHeight);
814
+
815
+ // Get ImageData for pixel manipulation
816
+ let imageData = tempCtx.getImageData(0, 0, w, h);
817
+ let data = imageData.data;
818
+
819
+ // --- Parse LLM Guidance and apply effects ---
820
+ const instructions = llmGuidance.toLowerCase().split(',').map(s => s.trim());
821
+ let pixelShiftX = 0;
822
+ let pixelShiftY = 0;
823
+ let colorShiftR = 0;
824
+ let colorShiftG = 0;
825
+ let colorShiftB = 0;
826
+ let blurRadius = 0;
827
+ let zoomFactor = 1;
828
+ let staticOverlayOpacity = 0;
829
+
830
+ for (const instruction of instructions) {
831
+ if (instruction.includes("shift red by")) {
832
+ colorShiftR += parseInt(instruction.match(/by (-?\d+)/)?.[1] || "0");
833
+ } else if (instruction.includes("shift green by")) {
834
+ colorShiftG += parseInt(instruction.match(/by (-?\d+)/)?.[1] || "0");
835
+ } else if (instruction.includes("shift blue by")) {
836
+ colorShiftB += parseInt(instruction.match(/by (-?\d+)/)?.[1] || "0");
837
+ } else if (instruction.includes("pixel displacement x-axis")) {
838
+ pixelShiftX += parseInt(instruction.match(/random (-?\d+)px/)?.[1] || "0");
839
+ } else if (instruction.includes("pixel displacement y-axis")) {
840
+ pixelShiftY += parseInt(instruction.match(/random (-?\d+)px/)?.[1] || "0");
841
+ } else if (instruction.includes("apply gaussian blur radius")) {
842
+ blurRadius = Math.max(blurRadius, parseInt(instruction.match(/radius (\d+)/)?.[1] || "0"));
843
+ } else if (instruction.includes("zoom in")) {
844
+ zoomFactor *= (1 + parseFloat(instruction.match(/zoom in (\d+(\.\d+)?)/)?.[1] || "0"));
845
+ } else if (instruction.includes("zoom out")) {
846
+ zoomFactor /= (1 + parseFloat(instruction.match(/zoom out (\d+(\.\d+)?)/)?.[1] || "0"));
847
+ } else if (instruction.includes("static pattern opacity")) {
848
+ staticOverlayOpacity = Math.max(staticOverlayOpacity, parseFloat(instruction.match(/opacity (\d+(\.\d+)?)/)?.[1] || "0"));
849
+ }
850
+ // Add more parsing for other instructions...
851
+ }
852
+
853
+ // Apply pixel shifts and color changes
854
+ const tempImageData = tempCtx.createImageData(w, h);
855
+ const tempData = tempImageData.data;
856
+
857
+ for (let y = 0; y < h; y++) {
858
+ for (let x = 0; x < w; x++) {
859
+ const originalIndex = (y * w + x) * 4;
860
+
861
+ const shiftedX = (x - pixelShiftX + w) % w;
862
+ const shiftedY = (y - pixelShiftY + h) % h;
863
+ const shiftedIndex = (shiftedY * w + shiftedX) * 4;
864
+
865
+ if (shiftedIndex >= 0 && shiftedIndex < data.length) {
866
+ tempData[originalIndex] = Math.min(255, Math.max(0, data[shiftedIndex] + colorShiftR)); // Red
867
+ tempData[originalIndex + 1] = Math.min(255, Math.max(0, data[shiftedIndex + 1] + colorShiftG)); // Green
868
+ tempData[originalIndex + 2] = Math.min(255, Math.max(0, data[shiftedIndex + 2] + colorShiftB)); // Blue
869
+ tempData[originalIndex + 3] = data[shiftedIndex + 3]; // Alpha
870
+ } else {
871
+ // Handle out-of-bounds pixels (e.g., fill with black or transparent)
872
+ tempData[originalIndex] = 0;
873
+ tempData[originalIndex + 1] = 0;
874
+ tempData[originalIndex + 2] = 0;
875
+ tempData[originalIndex + 3] = 255;
876
+ }
877
+ }
878
+ }
879
+ imageData = tempImageData; // Update imageData with shifted pixels
880
+
881
+ // Apply blur (very basic box blur for performance, Gaussian is complex with pixel data)
882
+ if (blurRadius > 0) {
883
+ const blurredImageData = tempCtx.createImageData(w, h);
884
+ const blurredData = blurredImageData.data;
885
+ for (let y = 0; y < h; y++) {
886
+ for (let x = 0; x < w; x++) {
887
+ let rSum = 0, gSum = 0, bSum = 0, aSum = 0;
888
+ let count = 0;
889
+ for (let ky = -blurRadius; ky <= blurRadius; ky++) {
890
+ for (let kx = -blurRadius; kx <= blurRadius; kx++) {
891
+ const nx = x + kx;
892
+ const ny = y + ky;
893
+ if (nx >= 0 && nx < w && ny >= 0 && ny < h) {
894
+ const idx = (ny * w + nx) * 4;
895
+ rSum += data[idx];
896
+ gSum += data[idx + 1];
897
+ bSum += data[idx + 2];
898
+ aSum += data[idx + 3];
899
+ count++;
900
+ }
901
+ }
902
+ }
903
+ const outputIndex = (y * w + x) * 4;
904
+ blurredData[outputIndex] = rSum / count;
905
+ blurredData[outputIndex + 1] = gSum / count;
906
+ blurredData[outputIndex + 2] = bSum / count;
907
+ blurredData[outputIndex + 3] = aSum / count;
908
+ }
909
+ }
910
+ imageData = blurredImageData;
911
+ }
912
+
913
+ // Apply static overlay
914
+ if (staticOverlayOpacity > 0) {
915
+ for (let i = 0; i < imageData.data.length; i += 4) {
916
+ const staticValue = Math.random() * 255;
917
+ imageData.data[i] = (imageData.data[i] * (1 - staticOverlayOpacity)) + (staticValue * staticOverlayOpacity);
918
+ imageData.data[i+1] = (imageData.data[i+1] * (1 - staticOverlayOpacity)) + (staticValue * staticOverlayOpacity);
919
+ imageData.data[i+2] = (imageData.data[i+2] * (1 - staticOverlayOpacity)) + (staticValue * staticOverlayOpacity);
920
+ }
921
+ }
922
+
923
+ // Draw the processed ImageData back to the temporary canvas
924
+ tempCtx.putImageData(imageData, 0, 0);
925
+
926
+ // Apply zoom (done by redrawing tempCanvas onto outputCanvas)
927
+ const zoomedWidth = w * zoomFactor;
928
+ const zoomedHeight = h * zoomFactor;
929
+ const zoomOffsetX = (w - zoomedWidth) / 2;
930
+ const zoomOffsetY = (h - zoomedHeight) / 2;
931
+
932
+ this.outputCtx.drawImage(tempCanvas, zoomOffsetX, zoomOffsetY, zoomedWidth, zoomedHeight);
933
+
934
+ // Periodically draw circuit overlay if influence is high
935
+ if (influence > 50 && frameNumber % 5 === 0) {
936
+ this.drawCircuitOverlay();
937
+ }
938
+
939
+ // Convert the final rendered canvas state to a DataURL for the next iteration
940
+ return this.outputCanvas.toDataURL();
941
+ }
942
+
943
+ drawCircuitOverlay() {
944
+ if (!this.outputCtx) return;
945
+
946
+ const ctx = this.outputCtx;
947
+ const w = this.outputCanvas.width;
948
+ const h = this.outputCanvas.height;
949
+
950
+ ctx.strokeStyle = 'rgba(0, 240, 255, 0.3)';
951
+ ctx.lineWidth = 1;
952
+ ctx.beginPath();
953
+ const y = Math.random() * h;
954
+ ctx.moveTo(0, y);
955
+ ctx.lineTo(w, y);
956
+ ctx.stroke();
957
+
958
+ ctx.fillStyle = 'rgba(0, 240, 255, 0.5)';
959
+ // Attempt to get a more dynamic font size
960
+ const fontSize = Math.max(10, Math.min(w, h) / 30);
961
+ ctx.font = `${fontSize}px Arial`;
962
+ ctx.fillText(`Q-GATE-${Math.floor(Math.random()*100)}`, 10, y - 5);
963
+ }
964
+
965
+ drawStaticNoise() {
966
+ if (!this.outputCanvas || !this.outputCtx) return;
967
+
968
+ const w = this.outputCanvas.width;
969
+ const h = this.outputCanvas.height;
970
+ const id = this.outputCtx.createImageData(w, h);
971
+ const d = id.data;
972
+
973
+ for (let i = 0; i < d.length; i += 4) {
974
+ const v = Math.random() * 20; // Dark noise
975
+ d[i] = v; d[i+1] = v; d[i+2] = v + 10; d[i+3] = 255;
976
+ }
977
+ this.outputCtx.putImageData(id, 0, 0);
978
+ }
979
+
980
+ async downloadMovie() {
981
+ if (!this.movieFrames.length) return;
982
+
983
+ const btn = document.getElementById('download-btn');
984
+ const originalText = btn.innerHTML;
985
+ if (btn) {
986
+ btn.disabled = true;
987
+ btn.innerHTML = 'RENDER...';
988
+ }
989
+
990
+ this.log('Starting Movie Rendering...', 'info');
991
+
992
+ try {
993
+ // Create a hidden canvas for playback
994
+ const canvas = document.createElement('canvas');
995
+ canvas.width = this.outputCanvas.width;
996
+ canvas.height = this.outputCanvas.height;
997
+ const ctx = canvas.getContext('2d');
998
+
999
+ // Setup MediaRecorder
1000
+ const stream = canvas.captureStream(30); // 30 FPS
1001
+ const mimeType = MediaRecorder.isTypeSupported('video/webm;codecs=vp9')
1002
+ ? 'video/webm;codecs=vp9'
1003
+ : 'video/webm';
1004
+
1005
+ const recorder = new MediaRecorder(stream, {
1006
+ mimeType: mimeType,
1007
+ videoBitsPerSecond: 5000000 // 5Mbps
1008
+ });
1009
+
1010
+ const chunks = [];
1011
+ recorder.ondataavailable = (e) => {
1012
+ if (e.data.size > 0) chunks.push(e.data);
1013
+ };
1014
+
1015
+ recorder.onstop = () => {
1016
+ const blob = new Blob(chunks, { type: 'video/webm' });
1017
+ const url = URL.createObjectURL(blob);
1018
+ const a = document.createElement('a');
1019
+ a.href = url;
1020
+ a.download = `wan-quantum-director-cut-${Date.now()}.webm`;
1021
+ a.click();
1022
+ URL.revokeObjectURL(url);
1023
+ this.log('Movie Downloaded Successfully.', 'success');
1024
+ if (btn) {
1025
+ btn.innerHTML = originalText;
1026
+ btn.disabled = false;
1027
+ }
1028
+ };
1029
+
1030
+ recorder.start();
1031
+
1032
+ // Play frames into recorder
1033
+ const frameDuration = 1000 / 30; // 30fps
1034
+
1035
+ for (const bitmap of this.movieFrames) {
1036
+ ctx.drawImage(bitmap, 0, 0);
1037
+ // Request dummy frame to keep stream active if needed,
1038
+ // but loop should be enough if async enough.
1039
+ // Actually, for captureStream to pick it up, we should wait a tick.
1040
+ // But for simplicity, we'll use a short timeout
1041
+ await new Promise(r => setTimeout(r, frameDuration));
1042
+ }
1043
+
1044
+ recorder.stop();
1045
+
1046
+ } catch (e) {
1047
+ this.log(`Export failed: ${e.message}`, 'error');
1048
+ if (btn) {
1049
+ btn.innerHTML = originalText;
1050
+ btn.disabled = false;
1051
+ }
1052
+ }
1053
+ }
1054
+ }
1055
+
1056
+ // Make the class available globally
1057
+ window.MainSystemSimulator = MainSystemSimulator;
app.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # main.py or app.py for Hugging Face Spaces
2
+ from fastapi import FastAPI, Request, File, UploadFile, Form, HTTPException
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ from fastapi.responses import JSONResponse, HTMLResponse
5
+ from pydantic import BaseModel, Field
6
+ import os
7
+ import base64
8
+ from PIL import Image
9
+ import io
10
+ import torch
11
+ from transformers import CLIPProcessor, CLIPModel
12
+ from mlc_llm import ChatModule
13
+ import threading
14
+ import logging
15
+
16
+ # Configure logging
17
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
18
+
19
+ # --- FastAPI App Initialization ---
20
+ app = FastAPI(
21
+ title="Q-WAN-2 Backend API",
22
+ description="API for the Quantum-Enhanced WAN 2.1 system.",
23
+ version="2.1"
24
+ )
25
+
26
+ # --- CORS Middleware ---
27
+ # Allow requests from your frontend's origin
28
+ app.add_middleware(
29
+ CORSMiddleware,
30
+ allow_origins=["*"], # For development. In production, specify your frontend's URL.
31
+ allow_credentials=True,
32
+ allow_methods=["*"],
33
+ allow_headers=["*"],
34
+ )
35
+
36
+ # --- Configuration ---
37
+ MLC_MODEL_ARTIFACTS_DIR = os.getenv("MLC_MODEL_ARTIFACTS_DIR", "./model_artifacts")
38
+ MLC_MODEL_NAME = os.getenv("MLC_MODEL_NAME", "Llama-2-7b-chat-hf-q4f16_1")
39
+ MLC_MODEL_PATH = os.path.join(MLC_MODEL_ARTIFACTS_DIR, MLC_MODEL_NAME)
40
+ CLIP_MODEL_NAME = "openai/clip-vit-base-patch32"
41
+
42
+ # --- Global Model Instances ---
43
+ clip_processor = None
44
+ clip_model = None
45
+ mlc_chat_module = None
46
+ mlc_lock = threading.Lock()
47
+
48
+ # --- Pydantic Models for Request Bodies ---
49
+ class ImageEmbedRequest(BaseModel):
50
+ image: str = Field(..., description="Base64 encoded image data URL (e.g., 'data:image/png;base64,...')")
51
+
52
+ class CompletionRequest(BaseModel):
53
+ prompt: str
54
+ system_message: str = "You are a creative AI assistant for video generation."
55
+
56
+ class FrameGuidanceRequest(BaseModel):
57
+ image: str = Field(..., description="Base64 encoded image data URL of the current frame.")
58
+ prompt: str = "Quantum interpolation"
59
+ influence: int = Field(5, ge=0, le=100, description="Quantum influence percentage (0-100).")
60
+ depth: int = Field(16, ge=1, description="Entanglement depth (number of layers).")
61
+ frame_number: int = 0
62
+
63
+ # --- Model Loading Functions ---
64
+ def load_mlc_llm_model():
65
+ global mlc_chat_module
66
+ if mlc_chat_module is None:
67
+ logging.info(f"Attempting to load LLM model: {MLC_MODEL_NAME} from {MLC_MODEL_PATH}...")
68
+ try:
69
+ if not os.path.exists(MLC_MODEL_PATH):
70
+ logging.error(f"Error: MLC LLM model path not found: {MLC_MODEL_PATH}")
71
+ return None
72
+ mlc_chat_module = ChatModule(model=MLC_MODEL_NAME, model_path=MLC_MODEL_PATH)
73
+ logging.info("MLC LLM model loaded successfully.")
74
+ except Exception as e:
75
+ logging.error(f"Error loading MLC LLM model: {e}")
76
+ mlc_chat_module = None
77
+ return mlc_chat_module
78
+
79
+ def load_clip_model():
80
+ global clip_processor, clip_model
81
+ if clip_model is None:
82
+ logging.info(f"Attempting to load CLIP model: {CLIP_MODEL_NAME}...")
83
+ try:
84
+ clip_processor = CLIPProcessor.from_pretrained(CLIP_MODEL_NAME)
85
+ clip_model = CLIPModel.from_pretrained(CLIP_MODEL_NAME)
86
+ if torch.cuda.is_available():
87
+ clip_model.to("cuda")
88
+ logging.info("CLIP model moved to CUDA.")
89
+ logging.info("CLIP model loaded successfully.")
90
+ except Exception as e:
91
+ logging.error(f"Error loading CLIP model: {e}")
92
+ clip_processor, clip_model = None, None
93
+ return clip_processor, clip_model
94
+
95
+ # Load models on startup
96
+ @app.on_event("startup")
97
+ def startup_event():
98
+ load_mlc_llm_model()
99
+ load_clip_model()
100
+
101
+ # --- API Endpoints ---
102
+
103
+ @app.get("/api/health", tags=["General"])
104
+ async def health_check():
105
+ """Checks the health and status of the backend services."""
106
+ return {
107
+ "status": "Quantum-Enhanced WAN 2.1 Backend is running!",
108
+ "llm_status": "loaded" if mlc_chat_module else "not loaded (check logs)",
109
+ "clip_status": "loaded" if clip_model else "not loaded (check logs)"
110
+ }
111
+
112
+ @app.post("/api/embed_image", tags=["CLIP"])
113
+ async def embed_image(request: ImageEmbedRequest):
114
+ """Generates a CLIP embedding vector for a given image."""
115
+ if clip_processor is None or clip_model is None:
116
+ raise HTTPException(status_code=500, detail="CLIP model not loaded. Check server logs for details.")
117
+
118
+ try:
119
+ # Split the data URL from the base64 part
120
+ header, encoded = request.image.split(",", 1)
121
+ image_bytes = base64.b64decode(encoded)
122
+ image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
123
+
124
+ inputs = clip_processor(images=image, return_tensors="pt")
125
+ if torch.cuda.is_available():
126
+ inputs = {k: v.to("cuda") for k, v in inputs.items()}
127
+
128
+ with torch.no_grad():
129
+ image_features = clip_model.get_image_features(**inputs)
130
+
131
+ # Normalize and convert to list
132
+ image_embeddings = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
133
+
134
+ return {"embeddings": image_embeddings.squeeze().tolist()}
135
+ except Exception as e:
136
+ logging.error(f"Error embedding image: {e}", exc_info=True)
137
+ raise HTTPException(status_code=500, detail=f"Failed to embed image: {str(e)}")
138
+
139
+ @app.post("/api/completions", tags=["LLM"])
140
+ async def chat_completions(request: CompletionRequest):
141
+ """Generates a text completion from the LLM."""
142
+ if mlc_chat_module is None:
143
+ raise HTTPException(status_code=500, detail="LLM model not loaded. Check server logs for details.")
144
+
145
+ try:
146
+ full_prompt = f"{request.system_message}\nUser: {request.prompt}"
147
+ with mlc_lock:
148
+ mlc_chat_module.reset_chat()
149
+ response = mlc_chat_module.generate(full_prompt)
150
+ return {"completion": response}
151
+ except Exception as e:
152
+ logging.error(f"Error getting chat completion: {e}")
153
+ raise HTTPException(status_code=500, detail=f"Failed to get chat completion: {str(e)}")
154
+
155
+ @app.post("/api/generate_frame_guidance", tags=["LLM", "CLIP"])
156
+ async def generate_frame_guidance(request: FrameGuidanceRequest):
157
+ """Generates LLM guidance for transforming a video frame."""
158
+ if mlc_chat_module is None or clip_model is None:
159
+ raise HTTPException(status_code=500, detail="One or more AI models not loaded. Check server logs for details.")
160
+
161
+ try:
162
+ # 1. Get CLIP embeddings for the current frame
163
+ header, encoded = request.image.split(",", 1)
164
+ image_bytes = base64.b64decode(encoded)
165
+ input_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
166
+
167
+ clip_inputs = clip_processor(images=input_image, return_tensors="pt")
168
+ if torch.cuda.is_available():
169
+ clip_inputs = {k: v.to("cuda") for k, v in clip_inputs.items()}
170
+
171
+ with torch.no_grad():
172
+ image_features = clip_model.get_image_features(**clip_inputs)
173
+ image_embeddings_np = image_features.squeeze().cpu().numpy()
174
+ embedding_snippet = ", ".join([f"{x:.4f}" for x in image_embeddings_np[:10]])
175
+
176
+ # 2. Use LLM to generate guidance
177
+ llm_prompt = (
178
+ f"You are an AI video director for a quantum diffusion system. Your task is to guide the transformation "
179
+ f"of a video frame based on quantum principles and user input. "
180
+ f"Given the current frame's visual context (CLIP features: [{embedding_snippet}...]), "
181
+ f"the user's creative prompt: '{request.prompt}', "
182
+ f"and the quantum settings (Quantum Influence: {request.influence}%, Entanglement Depth: {request.depth} layers), "
183
+ f"describe *precisely* how the quantum diffusion effect should transform the current frame into frame {request.frame_number + 1}. "
184
+ f"Focus on quantifiable visual parameters like color shifts, blur, pixel displacement, zoom, and noise. "
185
+ f"Be concise and output only transformation instructions. "
186
+ f"Example: 'shift blue by +10, apply motion blur strength 8, zoom 1.05x, add subtle scanlines opacity 0.1'.\n"
187
+ f"Transformation Instructions for frame {request.frame_number + 1}:"
188
+ )
189
+
190
+ llm_guidance = ""
191
+ try:
192
+ with mlc_lock:
193
+ mlc_chat_module.reset_chat()
194
+ llm_guidance = mlc_chat_module.generate(llm_prompt)
195
+ except Exception as llm_e:
196
+ logging.error(f"LLM guidance generation failed: {llm_e}. Using fallback guidance.")
197
+ llm_guidance = f"apply subtle glitch effect, shift colors slightly based on quantum influence {request.influence}%."
198
+
199
+ return {
200
+ "guidance": llm_guidance,
201
+ "log": (f"Backend provided guidance for frame {request.frame_number + 1} based on prompt: '{request.prompt[:50]}...', "
202
+ f"influence: {request.influence}, depth: {request.depth}. LLM guidance: '{llm_guidance[:50]}...'")
203
+ }
204
+ except Exception as e:
205
+ logging.error(f"Error generating frame guidance: {e}")
206
+ raise HTTPException(status_code=500, detail=f"Failed to generate frame guidance: {str(e)}")
207
+
208
+
209
+ @app.post("/api/upload", tags=["File Handling"])
210
+ async def upload_file(file: UploadFile = File(...)):
211
+ """Uploads an image file and returns it as a base64 data URL."""
212
+ try:
213
+ # Read the image file
214
+ img_bytes = await file.read()
215
+ # Convert to base64 for frontend use
216
+ img_base64 = base64.b64encode(img_bytes).decode('utf-8')
217
+ # Determine MIME type
218
+ content_type = file.content_type or 'image/jpeg'
219
+ img_data_url = f"data:{content_type};base64,{img_base64}"
220
+
221
+ return {
222
+ "message": "File uploaded successfully",
223
+ "image_url": img_data_url
224
+ }
225
+ except Exception as e:
226
+ logging.error(f"Error uploading file: {e}")
227
+ raise HTTPException(status_code=500, detail=f"Failed to upload file: {str(e)}")
228
+
229
+
230
+ # --- UI Routes (for serving frontend files) ---
231
+ # NOTE: For production, it's better to serve static files with a proper web server.
232
+ # This is fine for Hugging Face Spaces.
233
+
234
+ @app.get("/", response_class=HTMLResponse, include_in_schema=False)
235
+ async def read_index():
236
+ try:
237
+ with open('index.html', 'r') as f:
238
+ return HTMLResponse(content=f.read(), status_code=200)
239
+ except FileNotFoundError:
240
+ raise HTTPException(status_code=404, detail="index.html not found")
241
+
242
+ # You can add other static file routes here if needed, e.g., for CSS/JS
243
+ # @app.get("/styles.css", include_in_schema=False)
244
+ # async def read_css():
245
+ # ...
246
+
247
+ # To run this locally:
248
+ # 1. pip install "fastapi[all]"
249
+ # 2. uvicorn main:app --reload
architecture-diagram.js ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Architecture visualization
2
+
3
+ class ArchitectureDiagram {
4
+ constructor() {
5
+ this.canvas = document.getElementById('architecture-canvas');
6
+ if (!this.canvas) return;
7
+
8
+ this.ctx = this.canvas.getContext('2d');
9
+ this.resize();
10
+ window.addEventListener('resize', () => this.resize());
11
+
12
+ this.layers = [
13
+ { name: "CLIENT", y: 0.15, color: "#00f0ff" },
14
+ { name: "BACKEND (NODE)", y: 0.4, color: "#ffffff" },
15
+ { name: "QUANTUM (GPU)", y: 0.65, color: "#7000ff" },
16
+ { name: "STORAGE", y: 0.9, color: "#444444" }
17
+ ];
18
+
19
+ this.packets = [];
20
+ this.animate();
21
+ }
22
+
23
+ resize() {
24
+ const rect = this.canvas.getBoundingClientRect();
25
+ this.canvas.width = rect.width;
26
+ this.canvas.height = rect.height;
27
+ }
28
+
29
+ draw() {
30
+ const w = this.canvas.width;
31
+ const h = this.canvas.height;
32
+ this.ctx.clearRect(0,0,w,h);
33
+
34
+ // Draw Layers
35
+ this.layers.forEach(layer => {
36
+ const y = h * layer.y;
37
+
38
+ // Line
39
+ this.ctx.beginPath();
40
+ this.ctx.strokeStyle = layer.color;
41
+ this.ctx.globalAlpha = 0.3;
42
+ this.ctx.lineWidth = 2;
43
+ this.ctx.moveTo(50, y);
44
+ this.ctx.lineTo(w-50, y);
45
+ this.ctx.stroke();
46
+
47
+ // Label
48
+ this.ctx.globalAlpha = 1;
49
+ this.ctx.fillStyle = layer.color;
50
+ this.ctx.font = "12px Space Mono";
51
+ this.ctx.fillText(layer.name, 50, y - 10);
52
+
53
+ // Nodes
54
+ for(let i=1; i<=3; i++) {
55
+ const nx = 50 + (w-100) * (i/4);
56
+ this.ctx.beginPath();
57
+ this.ctx.fillStyle = '#0f111a';
58
+ this.ctx.strokeStyle = layer.color;
59
+ this.ctx.lineWidth = 2;
60
+ this.ctx.rect(nx-15, y-15, 30, 30);
61
+ this.ctx.fill();
62
+ this.ctx.stroke();
63
+ }
64
+ });
65
+
66
+ // Spawn packets
67
+ if (Math.random() > 0.95) {
68
+ this.packets.push({
69
+ x: 50 + (w-100) * (Math.random() * 0.5 + 0.25),
70
+ y: h * this.layers[0].y,
71
+ targetY: h * this.layers[3].y,
72
+ speed: 2 + Math.random() * 2
73
+ });
74
+ }
75
+
76
+ // Draw packets
77
+ this.ctx.fillStyle = '#fff';
78
+ for(let i = this.packets.length - 1; i >= 0; i--) {
79
+ let p = this.packets[i];
80
+ p.y += p.speed;
81
+
82
+ this.ctx.beginPath();
83
+ this.ctx.arc(p.x, p.y, 3, 0, Math.PI*2);
84
+ this.ctx.fill();
85
+
86
+ if(p.y > p.targetY) this.packets.splice(i, 1);
87
+ }
88
+ }
89
+
90
+ animate() {
91
+ this.draw();
92
+ requestAnimationFrame(() => this.animate());
93
+ }
94
+ }
95
+
96
+ new ArchitectureDiagram();
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
index (1).html ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Quantum-Enhanced WAN 2.1 | System Simulation</title>
7
+ <link rel="stylesheet" href="styles.css">
8
+ </head>
9
+ <body><script>
10
+ // Configure backend URL based on environment
11
+ window.BACKEND_URL = window.location.origin; // Use same origin on HF Space
12
+ console.log('Backend configured for:', window.BACKEND_URL);
13
+ </script>
14
+ <div class="app-container">
15
+ <nav class="side-nav">
16
+ <div class="nav-brand">
17
+ <div class="brand-icon">Q</div>
18
+ <span>WAN 2.1</span>
19
+ </div>
20
+ <div class="nav-links">
21
+ <a href="#simulator" class="nav-link active" data-section="simulator">
22
+ <span class="icon">⚡</span> Simulation
23
+ </a>
24
+ <a href="#overview" class="nav-link" data-section="overview">
25
+ <span class="icon">⦿</span> Executive Summary
26
+ </a>
27
+ <a href="#architecture" class="nav-link" data-section="architecture">
28
+ <span class="icon">☊</span> Architecture
29
+ </a>
30
+ <a href="#backend" class="nav-link" data-section="backend">
31
+ <span class="icon">◈</span> Quantum Backend
32
+ </a>
33
+ <a href="#specs" class="nav-link" data-section="specs">
34
+ <span class="icon">⚙</span> Specifications
35
+ </a>
36
+ </div>
37
+ <div class="nav-status">
38
+ <div class="status-row">
39
+ <span>WebGPU</span>
40
+ <span class="status-dot active"></span>
41
+ </div>
42
+ <div class="status-row">
43
+ <span>Qiskit.js</span>
44
+ <span class="status-dot active"></span>
45
+ </div>
46
+ <div class="status-row">
47
+ <span>Qubits</span>
48
+ <span class="mono">512</span>
49
+ </div>
50
+ </div>
51
+ </nav>
52
+
53
+ <main class="content-area">
54
+ <!-- Simulation Section (Default) -->
55
+ <section id="simulator" class="section active">
56
+ <header class="section-header">
57
+ <h1>Live System Simulation</h1>
58
+ <p class="subtitle">Hybrid Quantum-Classical Video Generation Interface</p>
59
+ </header>
60
+
61
+ <div class="simulation-grid">
62
+ <div class="control-panel glass-panel">
63
+ <h3>I2V Input Parameters</h3>
64
+
65
+ <div class="input-group file-drop-zone" id="drop-zone">
66
+ <input type="file" id="image-input" accept="image/*">
67
+ <div class="drop-content">
68
+ <span class="icon">📁</span>
69
+ <span class="drop-text">UPLOAD SOURCE IMAGE</span>
70
+ <span class="drop-sub">Drag & Drop or Click to Browse</span>
71
+ </div>
72
+ <img id="preview-img" class="hidden" alt="Preview">
73
+ </div>
74
+
75
+ <div class="input-group">
76
+ <label>Prompt Context (CLIP Guidance)</label>
77
+ <textarea id="prompt-input" placeholder="Describe the motion to generate... (e.g., 'Camera zoom with quantum distortion')"></textarea>
78
+ </div>
79
+
80
+ <div class="controls-row">
81
+ <div class="input-group">
82
+ <label>Quantum Influence <span class="value-badge" id="influence-val">5%</span></label>
83
+ <input type="range" id="quantum-influence" min="0" max="100" value="5">
84
+ <div class="slider-meta">Low (Deterministic) — High (Chaotic)</div>
85
+ </div>
86
+
87
+ <div class="input-group">
88
+ <label>Entanglement Depth <span class="value-badge" id="depth-val">16</span></label>
89
+ <input type="range" id="entanglement-depth" min="1" max="16" value="16">
90
+ <div class="slider-meta">Circuit Layers (4-16)</div>
91
+ </div>
92
+ </div>
93
+
94
+ <div class="controls-row">
95
+ <div class="input-group">
96
+ <label>Sampling Method</label>
97
+ <select id="sampling-method">
98
+ <option value="adaptive">Adaptive Quantum Injection</option>
99
+ <option value="direct">Direct Latent Modulation</option>
100
+ <option value="feedback">Continuous Feedback Loop</option>
101
+ </select>
102
+ </div>
103
+ </div>
104
+
105
+ <div class="director-controls glass-panel-inner">
106
+ <div class="director-header">
107
+ <label class="switch-container">
108
+ <span class="label-text">DIRECTOR MODE</span>
109
+ <input type="checkbox" id="director-mode-toggle" checked>
110
+ <span class="toggle-slider"></span>
111
+ </label>
112
+ <span class="frame-count" id="total-frames">0 FRAMES</span>
113
+ </div>
114
+ <div class="director-actions">
115
+ <button id="download-btn" class="btn-secondary" disabled>
116
+ <span class="icon">💾</span> SAVE MOVIE
117
+ </button>
118
+ <button id="reset-movie-btn" class="btn-danger" disabled>
119
+ <span class="icon">✖</span> CLEAR
120
+ </button>
121
+ </div>
122
+ </div>
123
+
124
+ <div class="controls-row">
125
+ <button id="start-btn" class="btn-primary">
126
+ <span class="btn-text">INITIALIZE GENERATION</span>
127
+ <span class="btn-glitch"></span>
128
+ </button>
129
+ </div>
130
+ </div>
131
+
132
+ <div class="visualization-panel glass-panel">
133
+ <div class="viz-tabs">
134
+ <button class="viz-tab active" data-view="output">Video Output</button>
135
+ <button class="viz-tab" data-view="circuit">Quantum Circuit</button>
136
+ <button class="viz-tab" data-view="state">State Vector</button>
137
+ </div>
138
+
139
+ <div class="viz-content">
140
+ <div id="view-output" class="viz-view active">
141
+ <canvas id="output-canvas"></canvas>
142
+ <div class="overlay-stats" id="generation-stats">WAITING FOR INPUT...</div>
143
+ <div class="scanline"></div>
144
+ </div>
145
+ <div id="view-circuit" class="viz-view">
146
+ <canvas id="quantum-circuit-canvas"></canvas>
147
+ </div>
148
+ <div id="view-state" class="viz-view">
149
+ <canvas id="state-vector-canvas"></canvas>
150
+ <div class="entropy-readout">Entanglement Entropy: <span id="entropy-value">0.00</span></div>
151
+ </div>
152
+ </div>
153
+ </div>
154
+
155
+ <div class="terminal-panel glass-panel">
156
+ <div class="terminal-header">
157
+ <span>SYSTEM_LOGS</span>
158
+ <span class="terminal-status">CONNECTED</span>
159
+ </div>
160
+ <div class="terminal-body" id="system-logs">
161
+ <div class="log-line"><span class="ts">[00:00:00]</span> System ready. Waiting for user input...</div>
162
+ </div>
163
+ </div>
164
+ </div>
165
+ </section>
166
+
167
+ <!-- Executive Summary -->
168
+ <section id="overview" class="section">
169
+ <div class="document-wrapper glass-panel">
170
+ <h1>Executive Summary</h1>
171
+ <p class="lead">This document defines a complete rebuild of Alibaba's WAN 2.1 video generation system with a revolutionary web-based quantum compute backend.</p>
172
+
173
+ <p>The system replaces traditional GPU inference with a <strong>hybrid quantum-classical architecture</strong> running entirely in browser using WebGPU and Qiskit-powered WebWorkers. The core innovation is a novel interface where real quantum circuit evaluations directly influence the diffusion model's latent space, creating a unique AI system where quantum superposition can directly affect generative outputs.</p>
174
+
175
+ <div class="metric-cards">
176
+ <div class="card">
177
+ <div class="metric-val">512</div>
178
+ <div class="metric-label">Qubits (State Matrix)</div>
179
+ </div>
180
+ <div class="card">
181
+ <div class="metric-val">WebGPU</div>
182
+ <div class="metric-label">Compute Engine</div>
183
+ </div>
184
+ <div class="card">
185
+ <div class="metric-val">Local</div>
186
+ <div class="metric-label">Privacy-First Inference</div>
187
+ </div>
188
+ </div>
189
+
190
+ <h3>Core Objectives</h3>
191
+ <ul class="feature-list">
192
+ <li>Reproduce WAN 2.1's video generation capabilities in a web browser.</li>
193
+ <li>Leverage quantum computing for unique generative variability.</li>
194
+ <li>Implement a privacy-first, high-performance video generator on consumer hardware.</li>
195
+ </ul>
196
+ </div>
197
+ </section>
198
+
199
+ <!-- Architecture -->
200
+ <section id="architecture" class="section">
201
+ <div class="document-wrapper glass-panel">
202
+ <h1>System Architecture Overview</h1>
203
+ <p>The quantum-enhanced WAN 2.1 system is composed of four main layers orchestrated to deliver the final video output.</p>
204
+
205
+ <div class="diagram-container">
206
+ <canvas id="architecture-canvas"></canvas>
207
+ </div>
208
+
209
+ <div class="architecture-grid">
210
+ <div class="arch-card">
211
+ <h3>1. Browser Client Layer</h3>
212
+ <p>User-facing front-end running in the browser.</p>
213
+ <ul>
214
+ <li><strong>UI Canvas:</strong> Control center for prompt input and parameter adjustment.</li>
215
+ <li><strong>Quantum Visualizer:</strong> Real-time display of quantum circuits and state vectors.</li>
216
+ <li><strong>Video Player:</strong> Embedded player for generated results.</li>
217
+ <li><strong>Main Thread Orchestrator:</strong> Manages state and coordinates visualization.</li>
218
+ </ul>
219
+ </div>
220
+ <div class="arch-card">
221
+ <h3>2. Quantum Compute Backend</h3>
222
+ <p>Core system leveraging quantum computing for generative influence.</p>
223
+ <ul>
224
+ <li><strong>WebWorker Pool:</strong> 4-8 parallel workers for circuit simulation.</li>
225
+ <li><strong>Qiskit.js:</strong> Circuit builder for custom gate sequences.</li>
226
+ <li><strong>WebGPU Engine:</strong> Accelerates 512D state vector evolution using WGSL shaders.</li>
227
+ <li><strong>State Analyzer:</strong> Computes entanglement entropy and fidelity.</li>
228
+ </ul>
229
+ </div>
230
+ <div class="arch-card">
231
+ <h3>3. Web Backend Server</h3>
232
+ <p>Orchestration and classical deep learning inference (Node.js/Python).</p>
233
+ <ul>
234
+ <li><strong>REST API Gateway:</strong> Handles requests and authentication.</li>
235
+ <li><strong>Quantum-Classical Bridge:</strong> Translates quantum features into diffusion parameters.</li>
236
+ <li><strong>WAN 2.1 Engine:</strong> Distributed inference layer for T5 encoder and VAE/Diffusion models.</li>
237
+ </ul>
238
+ </div>
239
+ <div class="arch-card">
240
+ <h3>4. Storage & Cache</h3>
241
+ <p>Persistence layer for models and results.</p>
242
+ <ul>
243
+ <li><strong>Redis Cache:</strong> In-memory storage for model weights.</li>
244
+ <li><strong>S3/Minio:</strong> Durable storage for generated videos.</li>
245
+ <li><strong>Circuit Library:</strong> Repository of pre-defined quantum circuits.</li>
246
+ </ul>
247
+ </div>
248
+ </div>
249
+ </div>
250
+ </section>
251
+
252
+ <!-- Backend Specs -->
253
+ <section id="backend" class="section">
254
+ <div class="document-wrapper glass-panel">
255
+ <h1>Quantum Compute Backend Specification</h1>
256
+
257
+ <h3>512-Dimensional Quantum State Architecture</h3>
258
+ <p>Operating on a 512-qubit system requires sophisticated memory management. The system uses a sparse representation strategy:</p>
259
+
260
+ <div class="specs-table">
261
+ <div class="spec-row header">
262
+ <span>Layer</span>
263
+ <span>Qubits</span>
264
+ <span>Storage Strategy</span>
265
+ <span>Approx. Size</span>
266
+ </div>
267
+ <div class="spec-row">
268
+ <span>Layer 1</span>
269
+ <span>0-12</span>
270
+ <span>Dense Vector</span>
271
+ <span>~16MB</span>
272
+ </div>
273
+ <div class="spec-row">
274
+ <span>Layer 2</span>
275
+ <span>13-24</span>
276
+ <span>Compressed Tensor</span>
277
+ <span>~256MB</span>
278
+ </div>
279
+ <div class="spec-row">
280
+ <span>Layer 3</span>
281
+ <span>25-512</span>
282
+ <span>Sparse + MPS (Adaptive)</span>
283
+ <span>Variable</span>
284
+ </div>
285
+ </div>
286
+
287
+ <h3>Qiskit.js Integration</h3>
288
+ <p>The <strong>QuantumComputeEngine</strong> class orchestrates the backend:</p>
289
+ <ul class="process-list">
290
+ <li><strong>Initialization:</strong> Sets up WebGPU device and 16-bit floating point support.</li>
291
+ <li><strong>Circuit Building:</strong> Constructs circuits via Qiskit.js API (Gates: H, CNOT, RY, etc.).</li>
292
+ <li><strong>Compilation:</strong> Generates optimized WGSL shaders for the specific gate sequence.</li>
293
+ <li><strong>Execution:</strong> Dispatches compute shaders to GPU for parallel state evolution.</li>
294
+ <li><strong>Analysis:</strong> Extracts metrics (Entropy, Fidelity) for the Bridge.</li>
295
+ </ul>
296
+ </div>
297
+ </section>
298
+
299
+ <!-- Detailed Specs / Development -->
300
+ <section id="specs" class="section">
301
+ <div class="document-wrapper glass-panel">
302
+ <h1>Development & Optimization</h1>
303
+
304
+ <div class="two-col">
305
+ <div>
306
+ <h3>Quantum-Classical Bridge</h3>
307
+ <p>The critical link between quantum randomness and creative output.</p>
308
+ <ul>
309
+ <li><strong>Feature Extraction:</strong> Pulls entropy and phase data from the quantum state.</li>
310
+ <li><strong>Adaptive Sampling:</strong> Dynamically adjusts quantum influence based on generation complexity.</li>
311
+ <li><strong>Injection Pipeline:</strong> Modifies Text Encoder Latents, modulates Diffusion Noise, or alters VAE Decoding.</li>
312
+ </ul>
313
+ </div>
314
+ <div>
315
+ <h3>Optimization Strategies</h3>
316
+ <ul>
317
+ <li><strong>WebGPU Acceleration:</strong> Parallel matrix operations on consumer GPUs.</li>
318
+ <li><strong>Circuit Decomposition:</strong> simplifying gates before compilation.</li>
319
+ <li><strong>Hybrid Inference:</strong> Interleaving CPU classical tasks with GPU quantum tasks.</li>
320
+ <li><strong>ONNX Runtime:</strong> Optimized execution for the classical diffusion model.</li>
321
+ </ul>
322
+ </div>
323
+ </div>
324
+
325
+ <h3>Phases of Development</h3>
326
+ <div class="timeline">
327
+ <div class="timeline-item">
328
+ <div class="phase">Phase 1</div>
329
+ <div class="desc"><strong>Environment & PoC:</strong> Setup Qiskit.js, WebGPU, and basic pipeline.</div>
330
+ </div>
331
+ <div class="timeline-item">
332
+ <div class="phase">Phase 2</div>
333
+ <div class="desc"><strong>Circuit Integration:</strong> Implement circuit templates and WGSL shader generation.</div>
334
+ </div>
335
+ <div class="timeline-item">
336
+ <div class="phase">Phase 3</div>
337
+ <div class="desc"><strong>Classical Backend:</strong> Integrate WAN 2.1 model via ONNX Runtime/Node.js.</div>
338
+ </div>
339
+ <div class="timeline-item">
340
+ <div class="phase">Phase 4</div>
341
+ <div class="desc"><strong>The Bridge:</strong> Implement parameter injection logic and adaptive sampling.</div>
342
+ </div>
343
+ <div class="timeline-item">
344
+ <div class="phase">Phase 5</div>
345
+ <div class="desc"><strong>UI/UX:</strong> Build visualization dashboard and interactivity.</div>
346
+ </div>
347
+ </div>
348
+ </div>
349
+ </section>
350
+ </main>
351
+ </div>
352
+
353
+ <!-- Load scripts with proper error handling -->
354
+ <script>
355
+ // Basic error handling for script loading
356
+ window.addEventListener('error', function(e) {
357
+ console.error('Script error:', e.error);
358
+ });
359
+
360
+ // Function to load scripts with proper error handling
361
+ function loadScript(src, id) {
362
+ return new Promise((resolve, reject) => {
363
+ const script = document.createElement('script');
364
+ script.src = src;
365
+ script.onload = () => {
366
+ console.log(`${id} script loaded`);
367
+ resolve();
368
+ };
369
+ script.onerror = (error) => {
370
+ console.error(`Failed to load ${id}:`, error);
371
+ reject(error);
372
+ };
373
+ document.head.appendChild(script);
374
+ });
375
+ }
376
+
377
+ // Initialize the application when DOM is loaded
378
+ document.addEventListener('DOMContentLoaded', () => {
379
+ console.log('DOM loaded, initializing application...');
380
+
381
+ // Initialize architecture diagram function if it doesn't exist
382
+ if (!window.drawArchitectureDiagram) {
383
+ window.drawArchitectureDiagram = function() {
384
+ console.log('Drawing architecture diagram');
385
+ const canvas = document.getElementById('architecture-canvas');
386
+ if (canvas) {
387
+ const ctx = canvas.getContext('2d');
388
+ canvas.width = canvas.parentElement.clientWidth;
389
+ canvas.height = 400;
390
+
391
+ // Draw a simple placeholder architecture diagram
392
+ ctx.fillStyle = '#00f0ff';
393
+ ctx.font = '16px Arial';
394
+ ctx.textAlign = 'center';
395
+
396
+ const layers = [
397
+ 'Browser Client Layer',
398
+ 'Quantum Compute Backend',
399
+ 'Web Backend Server',
400
+ 'Storage & Cache'
401
+ ];
402
+
403
+ const boxWidth = canvas.width * 0.7;
404
+ const boxHeight = 60;
405
+ const startX = (canvas.width - boxWidth) / 2;
406
+
407
+ layers.forEach((layer, i) => {
408
+ const y = 50 + i * 90;
409
+
410
+ // Draw box
411
+ ctx.strokeStyle = '#00f0ff';
412
+ ctx.lineWidth = 2;
413
+ ctx.strokeRect(startX, y, boxWidth, boxHeight);
414
+
415
+ // Draw text
416
+ ctx.fillStyle = '#00f0ff';
417
+ ctx.fillText(layer, canvas.width / 2, y + boxHeight / 2 + 5);
418
+
419
+ // Draw arrow
420
+ if (i < layers.length - 1) {
421
+ ctx.beginPath();
422
+ ctx.moveTo(canvas.width / 2, y + boxHeight);
423
+ ctx.lineTo(canvas.width / 2, y + boxHeight + 30);
424
+ ctx.stroke();
425
+
426
+ // Arrowhead
427
+ ctx.beginPath();
428
+ ctx.moveTo(canvas.width / 2 - 10, y + boxHeight + 20);
429
+ ctx.lineTo(canvas.width / 2, y + boxHeight + 30);
430
+ ctx.lineTo(canvas.width / 2 + 10, y + boxHeight + 20);
431
+ ctx.stroke();
432
+ }
433
+ });
434
+ }
435
+ };
436
+ }
437
+
438
+ // Load all required scripts
439
+ Promise.all([
440
+ loadScript('quantum-viz.js', 'quantum-viz'),
441
+ loadScript('app.js', 'app')
442
+ ]).then(() => {
443
+ console.log('All scripts loaded successfully');
444
+
445
+ // Initialize the MainSystemSimulator
446
+ if (window.MainSystemSimulator) {
447
+ window.simulator = new window.MainSystemSimulator();
448
+ console.log('MainSystemSimulator initialized successfully');
449
+ } else {
450
+ console.error('MainSystemSimulator class not found');
451
+ }
452
+ })
453
+ .catch(error => {
454
+ console.error('Failed to load scripts:', error);
455
+ });
456
+ });
457
+ </script>
458
+ </body>
459
+ </html>
quantum-viz.js ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Enhanced Quantum Visualizations with Vivid Effects
2
+
3
+ class QuantumCircuitViz {
4
+ constructor() {
5
+ this.container = document.getElementById('view-circuit');
6
+ this.canvas = document.getElementById('quantum-circuit-canvas');
7
+ this.ctx = this.canvas?.getContext('2d');
8
+
9
+ this.qubits = 8; // Fixed for visualization
10
+ this.gates = [];
11
+ this.photons = [];
12
+ this.currentInfluence = 0;
13
+ this.currentDepth = 0;
14
+
15
+ this.generateRandomGates(0); // Initial generation
16
+
17
+ // Observe visibility changes for proper initialization
18
+ if (this.container) {
19
+ const observer = new ResizeObserver(() => this.resize());
20
+ observer.observe(this.container);
21
+ }
22
+
23
+ this.resize();
24
+ this.animate(0);
25
+ }
26
+
27
+ resize() {
28
+ if (!this.container || !this.canvas) return;
29
+ const rect = this.container.getBoundingClientRect();
30
+ if (rect.width > 0 && rect.height > 0) {
31
+ this.canvas.width = rect.width;
32
+ this.canvas.height = rect.height;
33
+ }
34
+ }
35
+
36
+ generateRandomGates(depthModifier = 0) {
37
+ this.gates = [];
38
+ const types = ['H', 'X', 'Y', 'Z', 'CNOT', 'P', 'T', 'S'];
39
+ const baseSteps = 10;
40
+ // Increase complexity based on depth
41
+ const steps = Math.min(20, baseSteps + Math.floor(depthModifier / 2));
42
+
43
+ for(let i=0; i<steps; i++) {
44
+ let col = [];
45
+ let usedQubits = new Set();
46
+
47
+ for(let q=0; q<this.qubits; q++) {
48
+ if (usedQubits.has(q)) continue;
49
+
50
+ // Influence can increase the chance of gates appearing
51
+ if (Math.random() > (0.6 - (this.currentInfluence / 200))) { // More gates with higher influence
52
+ const type = types[Math.floor(Math.random() * types.length)];
53
+
54
+ if (type === 'CNOT' && q < this.qubits - 1) {
55
+ // Control bit
56
+ col.push({ type: '•', qubit: q, target: q+1 });
57
+ // Target bit (virtual gate for viz)
58
+ col.push({ type: '⊕', qubit: q+1 });
59
+ usedQubits.add(q);
60
+ usedQubits.add(q+1);
61
+ } else {
62
+ col.push({ type: type, qubit: q });
63
+ usedQubits.add(q);
64
+ }
65
+ }
66
+ }
67
+ this.gates.push(col);
68
+ }
69
+ }
70
+
71
+ // New method to update influence and depth
72
+ updateVizParameters(influence, depth) {
73
+ this.currentInfluence = influence;
74
+ this.currentDepth = depth;
75
+ // Regenerate gates with new depth influence
76
+ this.generateRandomGates(depth);
77
+ // Trigger a burst of activity based on influence
78
+ const photonBurst = Math.floor(influence / 10);
79
+ for(let i=0; i<photonBurst; i++) {
80
+ this.spawnPhoton();
81
+ }
82
+ }
83
+
84
+ randomize() { // Keep for backward compatibility if simulator still calls it, but use updateVizParameters
85
+ this.generateRandomGates(this.currentDepth);
86
+ for(let i=0; i<8; i++) {
87
+ this.spawnPhoton();
88
+ }
89
+ }
90
+
91
+ spawnPhoton() {
92
+ this.photons.push({
93
+ x: 0,
94
+ y: Math.floor(Math.random() * this.qubits),
95
+ speed: (0.02 + Math.random() * 0.04) * (1 + this.currentInfluence / 100), // Faster with more influence
96
+ color: Math.random() > 0.5 ? '#00f0ff' : '#7000ff',
97
+ size: 2 + Math.random() * 3 * (1 + this.currentInfluence / 200) // Larger with more influence
98
+ });
99
+ }
100
+
101
+ draw(time) {
102
+ if(!this.ctx) return;
103
+ const w = this.canvas.width;
104
+ const h = this.canvas.height;
105
+
106
+ this.ctx.fillStyle = '#050510';
107
+ this.ctx.fillRect(0, 0, w, h);
108
+
109
+ const rowH = h / (this.qubits + 1);
110
+ const colW = w / (this.gates.length + 1);
111
+
112
+ // Draw Qubit Wires
113
+ this.ctx.shadowBlur = 0;
114
+ for(let i=0; i<this.qubits; i++) {
115
+ const y = rowH * (i + 1);
116
+
117
+ this.ctx.beginPath();
118
+ this.ctx.strokeStyle = 'rgba(255, 255, 255, 0.1)';
119
+ this.ctx.lineWidth = 1;
120
+ this.ctx.moveTo(40, y);
121
+ this.ctx.lineTo(w - 20, y);
122
+ this.ctx.stroke();
123
+
124
+ // Label
125
+ this.ctx.fillStyle = 'rgba(0, 240, 255, 0.8)';
126
+ this.ctx.font = '11px "Space Mono"';
127
+ this.ctx.fillText(`|0⟩`, 10, y + 4);
128
+ }
129
+
130
+ // Update and Draw Photons (Data flow)
131
+ // Spawn rate influenced by 'influence'
132
+ if (Math.random() > (0.92 - this.currentInfluence / 500)) this.spawnPhoton();
133
+
134
+ this.photons.forEach((p, idx) => {
135
+ p.x += p.speed;
136
+ const px = 40 + p.x * (w - 60);
137
+ const py = rowH * (p.y + 1);
138
+
139
+ this.ctx.shadowBlur = 10 * (1 + this.currentInfluence / 100); // Larger glow with more influence
140
+ this.ctx.shadowColor = p.color;
141
+ this.ctx.fillStyle = p.color;
142
+
143
+ this.ctx.beginPath();
144
+ this.ctx.arc(px, py, p.size, 0, Math.PI*2);
145
+ this.ctx.fill();
146
+
147
+ if (p.x > 1) this.photons.splice(idx, 1);
148
+ });
149
+
150
+ // Draw Gates
151
+ this.gates.forEach((col, xIndex) => {
152
+ const x = 60 + xIndex * colW;
153
+
154
+ col.forEach(gate => {
155
+ const y = rowH * (gate.qubit + 1);
156
+
157
+ this.ctx.shadowBlur = 12 * (1 + this.currentInfluence / 100); // Larger glow with more influence
158
+ this.ctx.shadowColor = 'rgba(112, 0, 255, 0.4)';
159
+
160
+ if (gate.type === '•') {
161
+ // Control Dot
162
+ this.ctx.fillStyle = '#fff';
163
+ this.ctx.beginPath();
164
+ this.ctx.arc(x, y, 4, 0, Math.PI*2);
165
+ this.ctx.fill();
166
+
167
+ // Line to target
168
+ this.ctx.strokeStyle = '#fff';
169
+ this.ctx.lineWidth = 2;
170
+ this.ctx.beginPath();
171
+ this.ctx.moveTo(x, y);
172
+ this.ctx.lineTo(x, rowH * (gate.target + 1));
173
+ this.ctx.stroke();
174
+ }
175
+ else if (gate.type === '⊕') {
176
+ // Target CNOT
177
+ this.ctx.fillStyle = '#000';
178
+ this.ctx.strokeStyle = '#fff';
179
+ this.ctx.lineWidth = 2;
180
+ this.ctx.beginPath();
181
+ this.ctx.arc(x, y, 8, 0, Math.PI*2);
182
+ this.ctx.fill();
183
+ this.ctx.stroke();
184
+ // Plus
185
+ this.ctx.beginPath();
186
+ this.ctx.moveTo(x-5, y);
187
+ this.ctx.lineTo(x+5, y);
188
+ this.ctx.moveTo(x, y-5);
189
+ this.ctx.lineTo(x, y+5);
190
+ this.ctx.stroke();
191
+ }
192
+ else {
193
+ // Standard Gate
194
+ const isH = gate.type === 'H';
195
+ this.ctx.fillStyle = isH ? '#7000ff' : '#050510';
196
+ this.ctx.strokeStyle = '#00f0ff';
197
+ this.ctx.shadowColor = isH ? '#7000ff' : '#00f0ff';
198
+
199
+ this.ctx.fillRect(x-12, y-12, 24, 24);
200
+ this.ctx.strokeRect(x-12, y-12, 24, 24);
201
+
202
+ this.ctx.fillStyle = '#fff';
203
+ this.ctx.textAlign = 'center';
204
+ this.ctx.font = 'bold 12px "Space Mono"';
205
+ this.ctx.shadowBlur = 0;
206
+ this.ctx.fillText(gate.type, x, y+4);
207
+ }
208
+ });
209
+ });
210
+ }
211
+
212
+ animate(time) {
213
+ this.draw(time);
214
+ // Auto-correction for size if container changed while hidden
215
+ if (this.container && this.canvas && this.container.clientWidth !== this.canvas.width && this.container.clientWidth > 0) {
216
+ this.resize();
217
+ }
218
+ requestAnimationFrame((t) => this.animate(t));
219
+ }
220
+ }
221
+
222
+ class StateVectorViz {
223
+ constructor() {
224
+ this.container = document.getElementById('view-state');
225
+ this.canvas = document.getElementById('state-vector-canvas');
226
+ this.ctx = this.canvas?.getContext('2d');
227
+
228
+ // Complex amplitudes simulation (Magnitude + Phase)
229
+ this.bins = 64;
230
+ this.magnitudes = new Array(this.bins).fill(0).map(() => Math.random() * 0.4);
231
+ this.phases = new Array(this.bins).fill(0).map(() => Math.random() * Math.PI * 2);
232
+ this.currentInfluence = 0;
233
+ this.currentDepth = 0;
234
+
235
+ if (this.container) {
236
+ new ResizeObserver(() => this.resize()).observe(this.container);
237
+ }
238
+
239
+ this.resize();
240
+ this.animate(0);
241
+ }
242
+
243
+ resize() {
244
+ if (!this.container || !this.canvas) return;
245
+ const rect = this.container.getBoundingClientRect();
246
+ if (rect.width > 0 && rect.height > 0) {
247
+ this.canvas.width = rect.width;
248
+ this.canvas.height = rect.height;
249
+ }
250
+ }
251
+
252
+ updateVizParameters(influence, depth) {
253
+ this.currentInfluence = influence;
254
+ this.currentDepth = depth;
255
+ // Trigger a burst of energy based on influence
256
+ const spikeBurst = Math.floor(influence / 20);
257
+ for(let i=0; i<spikeBurst; i++) {
258
+ this.spike();
259
+ }
260
+ }
261
+
262
+ spike() {
263
+ // Add energy to system, more intense with higher influence
264
+ const spikeAmount = 0.5 * (1 + this.currentInfluence / 50);
265
+ for(let i=0; i<10; i++) {
266
+ const idx = Math.floor(Math.random() * this.bins);
267
+ this.magnitudes[idx] = Math.min(1.0, this.magnitudes[idx] + spikeAmount);
268
+ }
269
+ }
270
+
271
+ draw(time) {
272
+ if(!this.ctx) return;
273
+ const w = this.canvas.width;
274
+ const h = this.canvas.height;
275
+
276
+ this.ctx.clearRect(0, 0, w, h);
277
+
278
+ // Grid
279
+ this.ctx.strokeStyle = 'rgba(255,255,255,0.05)';
280
+ this.ctx.lineWidth = 1;
281
+ this.ctx.beginPath();
282
+ for(let x=0; x<w; x+=40) { this.ctx.moveTo(x,0); this.ctx.lineTo(x,h); }
283
+ this.ctx.stroke();
284
+
285
+ const barW = w / this.bins;
286
+
287
+ for(let i=0; i<this.bins; i++) {
288
+ // Physics update
289
+ // Decay faster with higher influence
290
+ const decayRate = 0.98 - (this.currentInfluence / 2000);
291
+ this.magnitudes[i] = Math.max(0.05, this.magnitudes[i] * decayRate);
292
+ // Random fluctuation (Quantum foam) more pronounced with higher influence
293
+ if (Math.random() > (0.9 - this.currentInfluence / 1000)) this.magnitudes[i] += 0.05 * (1 + this.currentInfluence / 100);
294
+
295
+ // Phase fluctuation more chaotic with higher depth and influence
296
+ this.phases[i] += (Math.random() - 0.5) * (0.2 + this.currentInfluence / 500 + this.currentDepth / 100);
297
+
298
+ const mag = this.magnitudes[i];
299
+ const barH = mag * h * 0.9;
300
+ const x = i * barW;
301
+ const y = h - barH;
302
+
303
+ // Color based on index (frequency) + magnitude intensity
304
+ // Map index to Hue: 200 (Blue) -> 300 (Purple)
305
+ const hue = 200 + (i / this.bins) * 10;
306
+ const color = `hsl(${hue}, 100%, 50%)`;
307
+
308
+ this.ctx.fillStyle = color;
309
+ this.ctx.fillRect(x, y, barW-1, barH);
310
+ }
311
+
312
+ // Occasional random update, more frequent with influence/depth
313
+ if (Math.random() > (0.95 - this.currentInfluence / 1000 - this.currentDepth / 500)) {
314
+ const idx = Math.floor(Math.random() * this.bins);
315
+ this.magnitudes[idx] = Math.random();
316
+ }
317
+ }
318
+
319
+ animate(time) {
320
+ this.draw(time);
321
+ requestAnimationFrame(() => this.animate());
322
+ }
323
+ }
324
+
325
+ // Expose globally for simulator to trigger
326
+ window.circuitViz = new QuantumCircuitViz();
327
+ window.stateViz = new StateVectorViz();
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
3
+ python-multipart
4
+ transformers
5
+ torch
6
+ Pillow
7
+ Flask
8
+ flask-cors
9
+ mlc-llm
10
+ sentencepiece
11
+ outlines
simulator.js ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Advanced Simulation Controller
2
+
3
+ var BACKEND_URL = window.location.origin; // Dynamically set backend URL for HuggingFace Spaces compatibility
4
+
5
+ class SystemSimulator {
6
+ constructor() {
7
+ this.logs = document.getElementById('system-logs');
8
+ this.outputCanvas = document.getElementById('output-canvas');
9
+ this.outputCtx = this.outputCanvas ? this.outputCanvas.getContext('2d') : null;
10
+
11
+ this.isGenerating = false;
12
+ this.sourceImage = null;
13
+ this.config = {
14
+ prompt: '',
15
+ influence: 5, // Default 5%
16
+ depth: 16, // Default 16 layers
17
+ method: 'adaptive'
18
+ };
19
+
20
+ // Director Mode State
21
+ this.directorMode = true;
22
+ this.movieFrames = []; // Stores ImageBitmaps or DataURLs
23
+ this.accumulatedFrames = 0;
24
+
25
+ this.init();
26
+ }
27
+
28
+ async callBackendApi(endpoint, data) {
29
+ try {
30
+ // Make sure we're using the correct endpoint format
31
+ const apiEndpoint = endpoint.startsWith('/') ? endpoint : `/${endpoint}`;
32
+ console.log(`Calling API: ${BACKEND_URL}${apiEndpoint}`);
33
+
34
+ const response = await fetch(`${BACKEND_URL}${apiEndpoint}`, {
35
+ method: 'POST',
36
+ headers: {
37
+ 'Content-Type': 'application/json',
38
+ },
39
+ body: JSON.stringify(data),
40
+ });
41
+
42
+ // Check if the response is actually JSON
43
+ const contentType = response.headers.get('content-type');
44
+ if (!contentType || !contentType.includes('application/json')) {
45
+ // If it's not JSON, get the text to see what we received
46
+ const textResponse = await response.text();
47
+ console.error(`Expected JSON response, got ${contentType || 'unknown'}:`, textResponse);
48
+ throw new Error(`Expected JSON response, got ${contentType || 'unknown'}`);
49
+ }
50
+
51
+ const jsonResponse = await response.json();
52
+ if (!response.ok) {
53
+ throw new Error(jsonResponse.error || `Backend error: ${response.statusText}`);
54
+ }
55
+ return jsonResponse;
56
+ } catch (error) {
57
+ this.log(`Backend API Error (${endpoint}): ${error.message}`, 'error');
58
+ console.error(`Backend API Error (${endpoint}):`, error);
59
+ throw error; // Re-throw to be caught by the calling function
60
+ }
61
+ }
62
+
63
+ init() {
64
+ this.setupListeners();
65
+ this.resizeCanvas();
66
+ window.addEventListener('resize', () => this.resizeCanvas());
67
+
68
+ // Initial visual state
69
+ this.drawStaticNoise();
70
+ }
71
+
72
+ setupListeners() {
73
+ // Image Upload Handling
74
+ const dropZone = document.getElementById('drop-zone');
75
+ const fileInput = document.getElementById('image-input');
76
+
77
+ if (!dropZone || !fileInput) {
78
+ console.error('Required DOM elements not found');
79
+ return;
80
+ }
81
+
82
+ dropZone.addEventListener('click', () => fileInput.click());
83
+
84
+ dropZone.addEventListener('dragover', (e) => {
85
+ e.preventDefault();
86
+ dropZone.classList.add('drag-over');
87
+ });
88
+
89
+ dropZone.addEventListener('dragleave', () => {
90
+ dropZone.classList.remove('drag-over');
91
+ });
92
+
93
+ dropZone.addEventListener('drop', (e) => {
94
+ e.preventDefault();
95
+ dropZone.classList.remove('drag-over');
96
+ if(e.dataTransfer.files.length) {
97
+ this.handleImage(e.dataTransfer.files[0]);
98
+ }
99
+ });
100
+
101
+ fileInput.addEventListener('change', (e) => {
102
+ if(e.target.files.length) {
103
+ this.handleImage(e.target.files[0]);
104
+ }
105
+ });
106
+
107
+ // Director Mode Listeners
108
+ const directorToggle = document.getElementById('director-mode-toggle');
109
+ if (directorToggle) {
110
+ directorToggle.addEventListener('change', (e) => {
111
+ this.directorMode = e.target.checked;
112
+ this.log(`Director Mode: ${this.directorMode ? 'ENABLED' : 'DISABLED'}`, 'info');
113
+ });
114
+ }
115
+
116
+ const downloadBtn = document.getElementById('download-btn');
117
+ const resetBtn = document.getElementById('reset-movie-btn');
118
+
119
+ if (downloadBtn) downloadBtn.addEventListener('click', () => this.downloadMovie());
120
+ if (resetBtn) resetBtn.addEventListener('click', () => this.resetMovie());
121
+
122
+ // Inputs
123
+ const quantumInfluence = document.getElementById('quantum-influence');
124
+ const entanglementDepth = document.getElementById('entanglement-depth');
125
+ const influenceVal = document.getElementById('influence-val');
126
+ const depthVal = document.getElementById('depth-val');
127
+
128
+ if (quantumInfluence && influenceVal) {
129
+ quantumInfluence.addEventListener('input', (e) => {
130
+ influenceVal.textContent = `${e.target.value}%`;
131
+ this.config.influence = parseInt(e.target.value);
132
+ });
133
+ }
134
+
135
+ if (entanglementDepth && depthVal) {
136
+ entanglementDepth.addEventListener('input', (e) => {
137
+ depthVal.textContent = e.target.value;
138
+ this.config.depth = parseInt(e.target.value);
139
+ });
140
+ }
141
+
142
+ // Tabs
143
+ document.querySelectorAll('.viz-tab').forEach(tab => {
144
+ tab.addEventListener('click', () => {
145
+ document.querySelectorAll('.viz-tab').forEach(t => t.classList.remove('active'));
146
+ document.querySelectorAll('.viz-view').forEach(v => v.classList.remove('active'));
147
+
148
+ tab.classList.add('active');
149
+ const viewId = `view-${tab.dataset.view}`;
150
+ const viewElement = document.getElementById(viewId);
151
+ if (viewElement) viewElement.classList.add('active');
152
+ });
153
+ });
154
+
155
+ // Start Button
156
+ const startBtn = document.getElementById('start-btn');
157
+ if (startBtn) {
158
+ startBtn.addEventListener('click', () => this.startGeneration());
159
+ }
160
+ }
161
+
162
+ handleImage(file) {
163
+ if (!file || !file.type.startsWith('image/')) return;
164
+
165
+ const reader = new FileReader();
166
+ reader.onload = async (e) => {
167
+ this.sourceImage = new Image();
168
+ this.sourceImage.onload = async () => {
169
+ // Show preview
170
+ const preview = document.getElementById('preview-img');
171
+ if (preview) {
172
+ preview.src = this.sourceImage.src;
173
+ preview.classList.remove('hidden');
174
+ }
175
+
176
+ const dropContent = document.querySelector('.drop-content');
177
+ if (dropContent) dropContent.style.opacity = '0';
178
+
179
+ this.log(`Image loaded: ${file.name} (${this.sourceImage.width}x${this.sourceImage.height})`, 'success');
180
+
181
+ // Call backend for CLIP analysis
182
+ try {
183
+ await this.analyzeImageContext(e.target.result);
184
+ } catch (error) {
185
+ this.log(`Failed CLIP analysis for ${file.name}: ${error.message}`, 'error');
186
+ }
187
+ };
188
+ this.sourceImage.src = e.target.result;
189
+ };
190
+ reader.readAsDataURL(file);
191
+ }
192
+
193
+ async analyzeImageContext(imageDataURL) {
194
+ this.log('CLIP-Encoder: Sending image for feature extraction...', 'info');
195
+ try {
196
+ const response = await this.callBackendApi('api/embed_image', { image: imageDataURL });
197
+ const embeddings = response.embeddings;
198
+ this.log(`CLIP-Encoder: Extracted feature vector [${embeddings[0].toFixed(4)}, ${embeddings[1].toFixed(4)}, ${embeddings[2].toFixed(4)}, ...]`, 'success');
199
+ } catch (error) {
200
+ this.log(`CLIP-Encoder: Failed to get embeddings. Is backend running? ${error.message}`, 'error');
201
+ throw error;
202
+ }
203
+ }
204
+
205
+ updateDirectorUI() {
206
+ const totalFramesElement = document.getElementById('total-frames');
207
+ const downloadBtn = document.getElementById('download-btn');
208
+ const resetBtn = document.getElementById('reset-movie-btn');
209
+
210
+ if (totalFramesElement) totalFramesElement.textContent = `${this.movieFrames.length} FRAMES`;
211
+ if (downloadBtn) downloadBtn.disabled = this.movieFrames.length === 0;
212
+ if (resetBtn) resetBtn.disabled = this.movieFrames.length === 0;
213
+ }
214
+
215
+ resetMovie() {
216
+ this.movieFrames = [];
217
+ this.updateDirectorUI();
218
+ this.log('Director Mode: Timeline cleared.', 'warn');
219
+ }
220
+
221
+ resizeCanvas() {
222
+ if (!this.outputCanvas) return;
223
+ const rect = this.outputCanvas.parentElement.getBoundingClientRect();
224
+ this.outputCanvas.width = rect.width;
225
+ this.outputCanvas.height = rect.height;
226
+ if (!this.isGenerating) this.drawStaticNoise();
227
+ }
228
+
229
+ log(message, type = 'info') {
230
+ if (!this.logs) return;
231
+
232
+ const div = document.createElement('div');
233
+ div.className = `log-line ${type}`;
234
+ const time = new Date().toLocaleTimeString('en-US', { hour12: false });
235
+ div.innerHTML = `<span class="ts">[${time}]</span> ${message}`;
236
+ this.logs.appendChild(div);
237
+ this.logs.scrollTop = this.logs.scrollHeight;
238
+ }
239
+
240
+ async startGeneration() {
241
+ if (this.isGenerating) return;
242
+
243
+ // Disable UI elements during generation
244
+ this.isGenerating = true;
245
+ const startBtn = document.getElementById('start-btn');
246
+ if (startBtn) startBtn.disabled = true;
247
+
248
+ const promptInput = document.getElementById('prompt-input');
249
+ const imageInput = document.getElementById('image-input');
250
+ const quantumInfluence = document.getElementById('quantum-influence');
251
+ const entanglementDepth = document.getElementById('entanglement-depth');
252
+ const samplingMethod = document.getElementById('sampling-method');
253
+
254
+ if (promptInput) promptInput.disabled = true;
255
+ if (imageInput) imageInput.disabled = true;
256
+ if (quantumInfluence) quantumInfluence.disabled = true;
257
+ if (entanglementDepth) entanglementDepth.disabled = true;
258
+ if (samplingMethod) samplingMethod.disabled = true;
259
+
260
+ const generationStatsElement = document.getElementById('generation-stats');
261
+ if (generationStatsElement) generationStatsElement.style.display = 'block';
262
+
263
+ try {
264
+ if (!this.sourceImage) {
265
+ this.log('Error: Source image required for I2V generation.', 'error');
266
+ alert("Please upload a source image first.");
267
+ return;
268
+ }
269
+
270
+ const prompt = promptInput ? promptInput.value.trim() || "Quantum interpolation" : "Quantum interpolation";
271
+
272
+ // --- Backend Health Check ---
273
+ this.log('Checking backend availability...', 'info');
274
+ try {
275
+ const health = await this.callBackendApi('api/health');
276
+ this.log(`Backend Status: ${health.status} (LLM: ${health.llm_status}, CLIP: ${health.clip_status})`, 'success');
277
+ if (health.llm_status.includes("not loaded") || health.clip_status.includes("not loaded")) {
278
+ throw new Error("One or more AI models not loaded on backend. Check backend console.");
279
+ }
280
+ } catch (error) {
281
+ this.log(`Backend not available or unhealthy: ${error.message}. Please ensure your Python Flask backend is running.`, 'error');
282
+ alert(`Backend Error: ${error.message}. Please start the backend.`);
283
+ return;
284
+ }
285
+ // --- End Backend Health Check ---
286
+
287
+ this.log(`Initializing I2V pipeline for: "${prompt.substring(0, 30)}..."`, 'info');
288
+
289
+ // Phase 1: Initialization
290
+ await this.phaseInitialization();
291
+
292
+ // Phase 2: Quantum Circuit
293
+ await this.phaseQuantumCircuit();
294
+
295
+ // Phase 3: WebGPU Compute
296
+ await this.phaseWebGPU();
297
+
298
+ // Phase 4: Bridge & Diffusion (Real Emulation)
299
+ await this.phaseRealDiffusion(prompt);
300
+
301
+ this.log('Generation Sequence Complete.', 'success');
302
+ if (generationStatsElement) generationStatsElement.innerHTML = 'GENERATION COMPLETE';
303
+
304
+ // DIRECTOR MODE: PREP NEXT FRAME
305
+ if (this.directorMode && this.movieFrames.length > 0) {
306
+ this.prepareNextContext();
307
+ }
308
+
309
+ } catch (error) {
310
+ this.log(`System Error during generation: ${error.message}`, 'error');
311
+ if (generationStatsElement) generationStatsElement.innerHTML = `ERROR: ${error.message}`;
312
+ console.error(error);
313
+ } finally {
314
+ // Re-enable UI elements
315
+ this.isGenerating = false;
316
+ if (startBtn) startBtn.disabled = false;
317
+ if (promptInput) promptInput.disabled = false;
318
+ if (imageInput) imageInput.disabled = false;
319
+ if (quantumInfluence) quantumInfluence.disabled = false;
320
+ if (entanglementDepth) entanglementDepth.disabled = false;
321
+ if (samplingMethod) samplingMethod.disabled = false;
322
+ }
323
+ }
324
+
325
+ prepareNextContext() {
326
+ if (!this.movieFrames.length) return;
327
+
328
+ // Get the last frame from the movie array
329
+ const lastFrameBitmap = this.movieFrames[this.movieFrames.length - 1];
330
+
331
+ // Create a temp canvas to extract the image
332
+ const canvas = document.createElement('canvas');
333
+ canvas.width = this.outputCanvas.width;
334
+ canvas.height = this.outputCanvas.height;
335
+ const ctx = canvas.getContext('2d');
336
+ ctx.drawImage(lastFrameBitmap, 0, 0);
337
+
338
+ // Convert to Image object for sourceImage
339
+ const newUrl = canvas.toDataURL();
340
+ const nextImg = new Image();
341
+ nextImg.onload = () => {
342
+ this.sourceImage = nextImg;
343
+ // Update Preview UI
344
+ const preview = document.getElementById('preview-img');
345
+ if (preview) preview.src = newUrl;
346
+ this.log('Director Mode: Context refreshed. Last frame set as input for next sequence.', 'secondary');
347
+ };
348
+ nextImg.src = newUrl;
349
+ }
350
+
351
+ async sleep(ms) {
352
+ return new Promise(r => setTimeout(r, ms));
353
+ }
354
+
355
+ async phaseInitialization() {
356
+ this.log('Allocating WebGPU buffers for I2V tensor...', 'info');
357
+ await this.sleep(600);
358
+ this.log('Quantizing source image to 512-dim latent space...', 'info');
359
+ await this.sleep(800);
360
+ }
361
+
362
+ async phaseQuantumCircuit() {
363
+ this.log(`Constructing ${this.config.depth}-layer quantum circuit...`, 'info');
364
+
365
+ // Trigger Viz animation if available globally
366
+ if (window.circuitViz) window.circuitViz.updateVizParameters(this.config.influence, this.config.depth);
367
+
368
+ await this.sleep(1000);
369
+ this.log('Applying Hadamard gates to initialization layer...', 'info');
370
+ await this.sleep(400);
371
+ this.log(`Entangling qubits 0-511 with depth ${this.config.depth}...`, 'info');
372
+ await this.sleep(800);
373
+ }
374
+
375
+ async phaseWebGPU() {
376
+ this.log('Compiling circuit to WGSL shaders...', 'info');
377
+ await this.sleep(600);
378
+ this.log('Injecting quantum noise into CLIP embeddings...', 'info');
379
+
380
+ // Simulate intense calculation, trigger stateViz with parameters
381
+ if (window.stateViz) window.stateViz.updateVizParameters(this.config.influence, this.config.depth);
382
+
383
+ // Keep sleep for visual pacing
384
+ for (let i = 0; i < 5; i++) {
385
+ await this.sleep(200);
386
+ }
387
+
388
+ const entropy = (Math.random() * 3 + 0.5).toFixed(4);
389
+ const entropyElement = document.getElementById('entropy-value');
390
+ if (entropyElement) entropyElement.textContent = entropy;
391
+ this.log(`Latent perturbation complete. Entropy: ${entropy}`, 'success');
392
+ }
393
+
394
+ async phaseRealDiffusion(prompt) {
395
+ this.log('Starting Frame-by-Frame Quantum Diffusion...', 'warn');
396
+
397
+ // Switch tab to output to show the magic
398
+ const outputTab = document.querySelector('[data-view="output"]');
399
+ if (outputTab) outputTab.click();
400
+
401
+ // Get initial image data from the source image
402
+ let currentImage = this.sourceImage;
403
+ const totalFrames = 48; // Total frames for the movie
404
+ let currentFrameDataURL = currentImage.src; // Data URL of the current frame
405
+
406
+ for (let frame = 0; frame < totalFrames; frame++) {
407
+ this.log(`Requesting guidance for Frame ${frame + 1}/${totalFrames}...`, 'info');
408
+
409
+ const generationStatsElement = document.getElementById('generation-stats');
410
+ if (generationStatsElement) generationStatsElement.innerHTML = `GETTING GUIDANCE FOR FRAME ${frame + 1}/${totalFrames}<br>Quantum-Diffusing...`;
411
+
412
+ // Call backend for LLM guidance on how to transform the current frame
413
+ const guidanceResponse = await this.callBackendApi('api/generate_frame_guidance', {
414
+ image: currentFrameDataURL,
415
+ prompt: prompt,
416
+ influence: this.config.influence,
417
+ depth: this.config.depth,
418
+ frame_number: frame
419
+ });
420
+
421
+ const llmGuidance = guidanceResponse.guidance;
422
+ this.log(`LLM Guidance (Frame ${frame + 1}): ${llmGuidance.substring(0, 80)}...`, 'secondary');
423
+
424
+ if (generationStatsElement) generationStatsElement.innerHTML = `RENDERING FRAME ${frame + 1}/${totalFrames}<br>Applying Quantum Effects...`;
425
+
426
+ // Render the next frame based on LLM guidance and current image
427
+ const newFrameDataURL = await this.renderFrameTransition(currentImage, this.config.influence, llmGuidance, frame);
428
+
429
+ // Update currentImage for the next iteration
430
+ currentImage = await this.loadImageFromDataURL(newFrameDataURL);
431
+ currentFrameDataURL = newFrameDataURL; // Update dataURL as well
432
+
433
+ // Director Mode: Record Frame
434
+ if (this.directorMode && this.outputCanvas) {
435
+ const bitmap = await createImageBitmap(this.outputCanvas);
436
+ this.movieFrames.push(bitmap);
437
+ this.updateDirectorUI();
438
+ }
439
+
440
+ await this.sleep(50); // Render speed
441
+ }
442
+ }
443
+
444
+ async loadImageFromDataURL(dataURL) {
445
+ return new Promise((resolve, reject) => {
446
+ const img = new Image();
447
+ img.onload = () => resolve(img);
448
+ img.onerror = reject;
449
+ img.src = dataURL;
450
+ });
451
+ }
452
+
453
+ async renderFrameTransition(currentImage, influence, llmGuidance, frameNumber) {
454
+ if (!this.outputCanvas || !this.outputCtx) {
455
+ console.error('Canvas not available for rendering');
456
+ return currentImage.src;
457
+ }
458
+
459
+ const w = this.outputCanvas.width;
460
+ const h = this.outputCanvas.height;
461
+ this.outputCtx.clearRect(0, 0, w, h); // Clear canvas for new frame
462
+
463
+ // Create a temporary canvas to draw the currentImage and apply effects
464
+ const tempCanvas = document.createElement('canvas');
465
+ tempCanvas.width = w;
466
+ tempCanvas.height = h;
467
+ const tempCtx = tempCanvas.getContext('2d');
468
+
469
+ // Draw the current image, scaled to fit
470
+ const aspectRatio = currentImage.width / currentImage.height;
471
+ let drawWidth = w;
472
+ let drawHeight = h;
473
+ if (w / h > aspectRatio) { // Canvas is wider than image
474
+ drawWidth = h * aspectRatio;
475
+ } else { // Canvas is taller than image
476
+ drawHeight = w / aspectRatio;
477
+ }
478
+ const offsetX = (w - drawWidth) / 2;
479
+ const offsetY = (h - drawHeight) / 2;
480
+ tempCtx.drawImage(currentImage, offsetX, offsetY, drawWidth, drawHeight);
481
+
482
+ // Get ImageData for pixel manipulation
483
+ let imageData = tempCtx.getImageData(0, 0, w, h);
484
+ let data = imageData.data;
485
+
486
+ // --- Parse LLM Guidance and apply effects ---
487
+ const instructions = llmGuidance.toLowerCase().split(',').map(s => s.trim());
488
+ let pixelShiftX = 0;
489
+ let pixelShiftY = 0;
490
+ let colorShiftR = 0;
491
+ let colorShiftG = 0;
492
+ let colorShiftB = 0;
493
+ let blurRadius = 0;
494
+ let zoomFactor = 1;
495
+ let staticOverlayOpacity = 0;
496
+
497
+ for (const instruction of instructions) {
498
+ if (instruction.includes("shift red by")) {
499
+ colorShiftR += parseInt(instruction.match(/by (-?\d+)/)?.[1] || "0");
500
+ } else if (instruction.includes("shift green by")) {
501
+ colorShiftG += parseInt(instruction.match(/by (-?\d+)/)?.[1] || "0");
502
+ } else if (instruction.includes("shift blue by")) {
503
+ colorShiftB += parseInt(instruction.match(/by (-?\d+)/)?.[1] || "0");
504
+ } else if (instruction.includes("pixel displacement x-axis")) {
505
+ pixelShiftX += parseInt(instruction.match(/random (-?\d+)px/)?.[1] || "0");
506
+ } else if (instruction.includes("pixel displacement y-axis")) {
507
+ pixelShiftY += parseInt(instruction.match(/random (-?\d+)px/)?.[1] || "0");
508
+ } else if (instruction.includes("apply gaussian blur radius")) {
509
+ blurRadius = Math.max(blurRadius, parseInt(instruction.match(/radius (\d+)/)?.[1] || "0"));
510
+ } else if (instruction.includes("zoom in")) {
511
+ zoomFactor *= (1 + parseFloat(instruction.match(/zoom in (\d+(\.\d+)?)/)?.[1] || "0"));
512
+ } else if (instruction.includes("zoom out")) {
513
+ zoomFactor /= (1 + parseFloat(instruction.match(/zoom out (\d+(\.\d+)?)/)?.[1] || "0"));
514
+ } else if (instruction.includes("static pattern opacity")) {
515
+ staticOverlayOpacity = Math.max(staticOverlayOpacity, parseFloat(instruction.match(/opacity (\d+(\.\d+)?)/)?.[1] || "0"));
516
+ }
517
+ // Add more parsing for other instructions...
518
+ }
519
+
520
+ // Apply pixel shifts and color changes
521
+ const tempImageData = tempCtx.createImageData(w, h);
522
+ const tempData = tempImageData.data;
523
+
524
+ for (let y = 0; y < h; y++) {
525
+ for (let x = 0; x < w; x++) {
526
+ const originalIndex = (y * w + x) * 4;
527
+
528
+ const shiftedX = (x - pixelShiftX + w) % w;
529
+ const shiftedY = (y - pixelShiftY + h) % h;
530
+ const shiftedIndex = (shiftedY * w + shiftedX) * 4;
531
+
532
+ if (shiftedIndex >= 0 && shiftedIndex < data.length) {
533
+ tempData[originalIndex] = Math.min(255, Math.max(0, data[shiftedIndex] + colorShiftR)); // Red
534
+ tempData[originalIndex + 1] = Math.min(255, Math.max(0, data[shiftedIndex + 1] + colorShiftG)); // Green
535
+ tempData[originalIndex + 2] = Math.min(255, Math.max(0, data[shiftedIndex + 2] + colorShiftB)); // Blue
536
+ tempData[originalIndex + 3] = data[shiftedIndex + 3]; // Alpha
537
+ } else {
538
+ // Handle out-of-bounds pixels (e.g., fill with black or transparent)
539
+ tempData[originalIndex] = 0;
540
+ tempData[originalIndex + 1] = 0;
541
+ tempData[originalIndex + 2] = 0;
542
+ tempData[originalIndex + 3] = 255;
543
+ }
544
+ }
545
+ }
546
+ imageData = tempImageData; // Update imageData with shifted pixels
547
+
548
+ // Apply blur (very basic box blur for performance, Gaussian is complex with pixel data)
549
+ if (blurRadius > 0) {
550
+ const blurredImageData = tempCtx.createImageData(w, h);
551
+ const blurredData = blurredImageData.data;
552
+ for (let y = 0; y < h; y++) {
553
+ for (let x = 0; x < w; x++) {
554
+ let rSum = 0, gSum = 0, bSum = 0, aSum = 0;
555
+ let count = 0;
556
+ for (let ky = -blurRadius; ky <= blurRadius; ky++) {
557
+ for (let kx = -blurRadius; kx <= blurRadius; kx++) {
558
+ const nx = x + kx;
559
+ const ny = y + ky;
560
+ if (nx >= 0 && nx < w && ny >= 0 && ny < h) {
561
+ const idx = (ny * w + nx) * 4;
562
+ rSum += data[idx];
563
+ gSum += data[idx + 1];
564
+ bSum += data[idx + 2];
565
+ aSum += data[idx + 3];
566
+ count++;
567
+ }
568
+ }
569
+ }
570
+ const outputIndex = (y * w + x) * 4;
571
+ blurredData[outputIndex] = rSum / count;
572
+ blurredData[outputIndex + 1] = gSum / count;
573
+ blurredData[outputIndex + 2] = bSum / count;
574
+ blurredData[outputIndex + 3] = aSum / count;
575
+ }
576
+ }
577
+ imageData = blurredImageData;
578
+ }
579
+
580
+ // Apply static overlay
581
+ if (staticOverlayOpacity > 0) {
582
+ for (let i = 0; i < imageData.data.length; i += 4) {
583
+ const staticValue = Math.random() * 255;
584
+ imageData.data[i] = (imageData.data[i] * (1 - staticOverlayOpacity)) + (staticValue * staticOverlayOpacity);
585
+ imageData.data[i+1] = (imageData.data[i+1] * (1 - staticOverlayOpacity)) + (staticValue * staticOverlayOpacity);
586
+ imageData.data[i+2] = (imageData.data[i+2] * (1 - staticOverlayOpacity)) + (staticValue * staticOverlayOpacity);
587
+ }
588
+ }
589
+
590
+ // Draw the processed ImageData back to the temporary canvas
591
+ tempCtx.putImageData(imageData, 0, 0);
592
+
593
+ // Apply zoom (done by redrawing tempCanvas onto outputCanvas)
594
+ const zoomedWidth = w * zoomFactor;
595
+ const zoomedHeight = h * zoomFactor;
596
+ const zoomOffsetX = (w - zoomedWidth) / 2;
597
+ const zoomOffsetY = (h - zoomedHeight) / 2;
598
+
599
+ this.outputCtx.drawImage(tempCanvas, zoomOffsetX, zoomOffsetY, zoomedWidth, zoomedHeight);
600
+
601
+ // Periodically draw circuit overlay if influence is high
602
+ if (influence > 50 && frameNumber % 5 === 0) {
603
+ this.drawCircuitOverlay();
604
+ }
605
+
606
+ // Convert the final rendered canvas state to a DataURL for the next iteration
607
+ return this.outputCanvas.toDataURL();
608
+ }
609
+
610
+ drawCircuitOverlay() {
611
+ if (!this.outputCtx) return;
612
+
613
+ const ctx = this.outputCtx;
614
+ const w = this.outputCanvas.width;
615
+ const h = this.outputCanvas.height;
616
+
617
+ ctx.strokeStyle = 'rgba(0, 240, 255, 0.3)';
618
+ ctx.lineWidth = 1;
619
+ ctx.beginPath();
620
+ const y = Math.random() * h;
621
+ ctx.moveTo(0, y);
622
+ ctx.lineTo(w, y);
623
+ ctx.stroke();
624
+
625
+ ctx.fillStyle = 'rgba(0, 240, 255, 0.5)';
626
+ // Attempt to get a more dynamic font size
627
+ const fontSize = Math.max(10, Math.min(w, h) / 30);
628
+ ctx.font = `${fontSize}px Arial`;
629
+ ctx.fillText(`Q-GATE-${Math.floor(Math.random()*100)}`, 10, y - 5);
630
+ }
631
+
632
+ drawStaticNoise() {
633
+ if (!this.outputCanvas || !this.outputCtx) return;
634
+
635
+ const w = this.outputCanvas.width;
636
+ const h = this.outputCanvas.height;
637
+ const id = this.outputCtx.createImageData(w, h);
638
+ const d = id.data;
639
+
640
+ for (let i = 0; i < d.length; i += 4) {
641
+ const v = Math.random() * 20; // Dark noise
642
+ d[i] = v; d[i+1] = v; d[i+2] = v + 10; d[i+3] = 255;
643
+ }
644
+ this.outputCtx.putImageData(id, 0, 0);
645
+ }
646
+
647
+ async downloadMovie() {
648
+ if (!this.movieFrames.length) return;
649
+
650
+ const btn = document.getElementById('download-btn');
651
+ const originalText = btn.innerHTML;
652
+ if (btn) {
653
+ btn.disabled = true;
654
+ btn.innerHTML = 'RENDER...';
655
+ }
656
+
657
+ this.log('Starting Movie Rendering...', 'info');
658
+
659
+ try {
660
+ // Create a hidden canvas for playback
661
+ const canvas = document.createElement('canvas');
662
+ canvas.width = this.outputCanvas.width;
663
+ canvas.height = this.outputCanvas.height;
664
+ const ctx = canvas.getContext('2d');
665
+
666
+ // Setup MediaRecorder
667
+ const stream = canvas.captureStream(30); // 30 FPS
668
+ const mimeType = MediaRecorder.isTypeSupported('video/webm;codecs=vp9')
669
+ ? 'video/webm;codecs=vp9'
670
+ : 'video/webm';
671
+
672
+ const recorder = new MediaRecorder(stream, {
673
+ mimeType: mimeType,
674
+ videoBitsPerSecond: 5000000 // 5Mbps
675
+ });
676
+
677
+ const chunks = [];
678
+ recorder.ondataavailable = (e) => {
679
+ if (e.data.size > 0) chunks.push(e.data);
680
+ };
681
+
682
+ recorder.onstop = () => {
683
+ const blob = new Blob(chunks, { type: 'video/webm' });
684
+ const url = URL.createObjectURL(blob);
685
+ const a = document.createElement('a');
686
+ a.href = url;
687
+ a.download = `wan-quantum-director-cut-${Date.now()}.webm`;
688
+ a.click();
689
+ URL.revokeObjectURL(url);
690
+ this.log('Movie Downloaded Successfully.', 'success');
691
+ if (btn) {
692
+ btn.innerHTML = originalText;
693
+ btn.disabled = false;
694
+ }
695
+ };
696
+
697
+ recorder.start();
698
+
699
+ // Play frames into recorder
700
+ const frameDuration = 1000 / 30; // 30fps
701
+
702
+ for (const bitmap of this.movieFrames) {
703
+ ctx.drawImage(bitmap, 0, 0);
704
+ // Request dummy frame to keep stream active if needed,
705
+ // but loop should be enough if async enough.
706
+ // Actually, for captureStream to pick it up, we should wait a tick.
707
+ // But for simplicity, we'll use a short timeout
708
+ await new Promise(r => setTimeout(r, frameDuration));
709
+ }
710
+
711
+ recorder.stop();
712
+
713
+ } catch (e) {
714
+ this.log(`Export failed: ${e.message}`, 'error');
715
+ if (btn) {
716
+ btn.innerHTML = originalText;
717
+ btn.disabled = false;
718
+ }
719
+ }
720
+ }
721
+ }
722
+
723
+ // Initialize the simulator when DOM is loaded
724
+ document.addEventListener('DOMContentLoaded', () => {
725
+ window.simulator = new SystemSimulator();
726
+ });
style.css ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ body {
2
+ padding: 2rem;
3
+ font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
+ }
5
+
6
+ h1 {
7
+ font-size: 16px;
8
+ margin-top: 0;
9
+ }
10
+
11
+ p {
12
+ color: rgb(107, 114, 128);
13
+ font-size: 15px;
14
+ margin-bottom: 10px;
15
+ margin-top: 5px;
16
+ }
17
+
18
+ .card {
19
+ max-width: 620px;
20
+ margin: 0 auto;
21
+ padding: 16px;
22
+ border: 1px solid lightgray;
23
+ border-radius: 16px;
24
+ }
25
+
26
+ .card p:last-child {
27
+ margin-bottom: 0;
28
+ }
styles.css ADDED
@@ -0,0 +1,803 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @import url('https://fonts.googleapis.com/css2?family=Space+Mono:wght@400;700&family=Inter:wght@300;400;600;800&display=swap');
2
+
3
+ :root {
4
+ --bg-dark: #050510;
5
+ --bg-panel: rgba(20, 25, 40, 0.7);
6
+ --accent: #00f0ff;
7
+ --accent-glow: rgba(0, 240, 255, 0.3);
8
+ --secondary: #7000ff;
9
+ --text-main: #e0e6ed;
10
+ --text-dim: #94a3b8;
11
+ --border: rgba(255, 255, 255, 0.1);
12
+ --glass: blur(12px);
13
+ --font-mono: 'Space Mono', monospace;
14
+ --font-sans: 'Inter', sans-serif;
15
+ }
16
+
17
+ * {
18
+ box-sizing: border-box;
19
+ margin: 0;
20
+ padding: 0;
21
+ }
22
+
23
+ body {
24
+ background-color: var(--bg-dark);
25
+ color: var(--text-main);
26
+ font-family: var(--font-sans);
27
+ overflow: hidden; /* App-like feel */
28
+ height: 100vh;
29
+ background-image:
30
+ radial-gradient(circle at 10% 20%, rgba(112, 0, 255, 0.1) 0%, transparent 20%),
31
+ radial-gradient(circle at 90% 80%, rgba(0, 240, 255, 0.1) 0%, transparent 20%);
32
+ }
33
+
34
+ /* Layout */
35
+ .app-container {
36
+ display: grid;
37
+ grid-template-columns: 260px 1fr;
38
+ height: 100vh;
39
+ }
40
+
41
+ /* Side Navigation */
42
+ .side-nav {
43
+ background: rgba(10, 12, 20, 0.9);
44
+ border-right: 1px solid var(--border);
45
+ padding: 24px;
46
+ display: flex;
47
+ flex-direction: column;
48
+ gap: 32px;
49
+ }
50
+
51
+ .nav-brand {
52
+ display: flex;
53
+ align-items: center;
54
+ gap: 12px;
55
+ font-weight: 800;
56
+ font-size: 1.2rem;
57
+ letter-spacing: 1px;
58
+ color: var(--accent);
59
+ }
60
+
61
+ .brand-icon {
62
+ width: 32px;
63
+ height: 32px;
64
+ background: var(--accent);
65
+ color: #000;
66
+ display: flex;
67
+ align-items: center;
68
+ justify-content: center;
69
+ border-radius: 4px;
70
+ box-shadow: 0 0 15px var(--accent-glow);
71
+ }
72
+
73
+ .nav-links {
74
+ display: flex;
75
+ flex-direction: column;
76
+ gap: 8px;
77
+ }
78
+
79
+ .nav-link {
80
+ display: flex;
81
+ align-items: center;
82
+ gap: 12px;
83
+ padding: 12px;
84
+ text-decoration: none;
85
+ color: var(--text-dim);
86
+ border-radius: 8px;
87
+ transition: all 0.2s;
88
+ font-size: 0.9rem;
89
+ }
90
+
91
+ .nav-link:hover, .nav-link.active {
92
+ background: rgba(255, 255, 255, 0.05);
93
+ color: var(--text-main);
94
+ }
95
+
96
+ .nav-link.active {
97
+ border-left: 3px solid var(--accent);
98
+ background: linear-gradient(90deg, rgba(0, 240, 255, 0.1), transparent);
99
+ }
100
+
101
+ .nav-status {
102
+ margin-top: auto;
103
+ border-top: 1px solid var(--border);
104
+ padding-top: 24px;
105
+ }
106
+
107
+ .status-row {
108
+ display: flex;
109
+ justify-content: space-between;
110
+ align-items: center;
111
+ font-size: 0.8rem;
112
+ color: var(--text-dim);
113
+ margin-bottom: 12px;
114
+ }
115
+
116
+ .status-dot {
117
+ width: 8px;
118
+ height: 8px;
119
+ border-radius: 50%;
120
+ background: #333;
121
+ }
122
+
123
+ .status-dot.active {
124
+ background: #00ff88;
125
+ box-shadow: 0 0 8px #00ff88;
126
+ }
127
+
128
+ .mono {
129
+ font-family: var(--font-mono);
130
+ color: var(--accent);
131
+ }
132
+
133
+ /* Main Content */
134
+ .content-area {
135
+ overflow-y: auto;
136
+ padding: 32px;
137
+ position: relative;
138
+ }
139
+
140
+ .section {
141
+ display: none;
142
+ max-width: 1200px;
143
+ margin: 0 auto;
144
+ animation: fadeIn 0.4s ease-out;
145
+ }
146
+
147
+ .section.active {
148
+ display: block;
149
+ }
150
+
151
+ @keyframes fadeIn {
152
+ from { opacity: 0; transform: translateY(10px); }
153
+ to { opacity: 1; transform: translateY(0); }
154
+ }
155
+
156
+ /* Simulation Grid */
157
+ .simulation-grid {
158
+ display: grid;
159
+ grid-template-columns: 350px 1fr;
160
+ grid-template-rows: auto 250px;
161
+ gap: 24px;
162
+ height: calc(100vh - 120px);
163
+ }
164
+
165
+ .glass-panel {
166
+ background: var(--bg-panel);
167
+ backdrop-filter: var(--glass);
168
+ -webkit-backdrop-filter: var(--glass);
169
+ border: 1px solid var(--border);
170
+ border-radius: 12px;
171
+ padding: 24px;
172
+ box-shadow: 0 8px 32px rgba(0, 0, 0, 0.3);
173
+ display: flex;
174
+ flex-direction: column;
175
+ }
176
+
177
+ /* Inputs */
178
+ .control-panel {
179
+ grid-row: 1 / -1;
180
+ }
181
+
182
+ .input-group {
183
+ margin-bottom: 24px;
184
+ }
185
+
186
+ /* File Upload Styles */
187
+ .file-drop-zone {
188
+ border: 2px dashed var(--border);
189
+ border-radius: 8px;
190
+ padding: 20px;
191
+ text-align: center;
192
+ background: rgba(0,0,0,0.2);
193
+ cursor: pointer;
194
+ transition: all 0.3s ease;
195
+ position: relative;
196
+ overflow: hidden;
197
+ min-height: 120px;
198
+ display: flex;
199
+ align-items: center;
200
+ justify-content: center;
201
+ }
202
+
203
+ .file-drop-zone:hover, .file-drop-zone.drag-over {
204
+ border-color: var(--accent);
205
+ background: rgba(0, 240, 255, 0.05);
206
+ }
207
+
208
+ .drop-content {
209
+ display: flex;
210
+ flex-direction: column;
211
+ align-items: center;
212
+ gap: 8px;
213
+ pointer-events: none;
214
+ z-index: 2;
215
+ }
216
+
217
+ .drop-content .icon {
218
+ font-size: 2rem;
219
+ margin-bottom: 4px;
220
+ }
221
+
222
+ .drop-text {
223
+ font-weight: 700;
224
+ color: var(--text-main);
225
+ font-size: 0.9rem;
226
+ }
227
+
228
+ .drop-sub {
229
+ color: var(--text-dim);
230
+ font-size: 0.7rem;
231
+ }
232
+
233
+ #preview-img {
234
+ position: absolute;
235
+ top: 0;
236
+ left: 0;
237
+ width: 100%;
238
+ height: 100%;
239
+ object-fit: cover;
240
+ opacity: 0.6;
241
+ z-index: 1;
242
+ }
243
+
244
+ #preview-img.hidden {
245
+ display: none;
246
+ }
247
+
248
+ label {
249
+ display: flex;
250
+ justify-content: space-between;
251
+ margin-bottom: 8px;
252
+ font-size: 0.8rem;
253
+ text-transform: uppercase;
254
+ letter-spacing: 1px;
255
+ color: var(--text-dim);
256
+ font-weight: 600;
257
+ }
258
+
259
+ textarea {
260
+ width: 100%;
261
+ height: 100px;
262
+ background: rgba(0, 0, 0, 0.3);
263
+ border: 1px solid var(--border);
264
+ border-radius: 8px;
265
+ padding: 12px;
266
+ color: var(--text-main);
267
+ font-family: var(--font-sans);
268
+ resize: none;
269
+ transition: border-color 0.2s;
270
+ }
271
+
272
+ textarea:focus {
273
+ outline: none;
274
+ border-color: var(--accent);
275
+ }
276
+
277
+ input[type="range"] {
278
+ width: 100%;
279
+ background: transparent;
280
+ -webkit-appearance: none;
281
+ }
282
+
283
+ input[type="range"]::-webkit-slider-runnable-track {
284
+ height: 4px;
285
+ background: rgba(255, 255, 255, 0.1);
286
+ border-radius: 2px;
287
+ }
288
+
289
+ input[type="range"]::-webkit-slider-thumb {
290
+ -webkit-appearance: none;
291
+ height: 16px;
292
+ width: 16px;
293
+ border-radius: 50%;
294
+ background: var(--accent);
295
+ margin-top: -6px;
296
+ cursor: pointer;
297
+ box-shadow: 0 0 10px var(--accent-glow);
298
+ }
299
+
300
+ .slider-meta {
301
+ font-size: 0.7rem;
302
+ color: var(--text-dim);
303
+ margin-top: 4px;
304
+ }
305
+
306
+ .value-badge {
307
+ color: var(--accent);
308
+ font-family: var(--font-mono);
309
+ }
310
+
311
+ select {
312
+ width: 100%;
313
+ background: rgba(0, 0, 0, 0.3);
314
+ border: 1px solid var(--border);
315
+ color: var(--text-main);
316
+ padding: 10px;
317
+ border-radius: 6px;
318
+ }
319
+
320
+ .btn-primary {
321
+ width: 100%;
322
+ padding: 16px;
323
+ background: var(--accent);
324
+ color: #000;
325
+ border: none;
326
+ border-radius: 6px;
327
+ font-weight: 800;
328
+ cursor: pointer;
329
+ position: relative;
330
+ overflow: hidden;
331
+ transition: all 0.2s;
332
+ }
333
+
334
+ .btn-primary:hover {
335
+ background: #fff;
336
+ box-shadow: 0 0 20px var(--accent-glow);
337
+ }
338
+
339
+ .btn-primary:disabled {
340
+ background: #333;
341
+ color: #666;
342
+ cursor: not-allowed;
343
+ }
344
+
345
+ /* Director Mode Styles */
346
+ .director-controls {
347
+ background: rgba(0, 0, 0, 0.3);
348
+ border: 1px solid var(--secondary);
349
+ border-radius: 8px;
350
+ padding: 16px;
351
+ margin-bottom: 24px;
352
+ }
353
+
354
+ .director-header {
355
+ display: flex;
356
+ justify-content: space-between;
357
+ align-items: center;
358
+ margin-bottom: 12px;
359
+ }
360
+
361
+ .switch-container {
362
+ display: flex;
363
+ align-items: center;
364
+ gap: 12px;
365
+ cursor: pointer;
366
+ }
367
+
368
+ .switch-container input {
369
+ display: none;
370
+ }
371
+
372
+ .toggle-slider {
373
+ width: 40px;
374
+ height: 20px;
375
+ background: #333;
376
+ border-radius: 20px;
377
+ position: relative;
378
+ transition: all 0.3s;
379
+ }
380
+
381
+ .toggle-slider::before {
382
+ content: '';
383
+ position: absolute;
384
+ width: 16px;
385
+ height: 16px;
386
+ background: #fff;
387
+ border-radius: 50%;
388
+ top: 2px;
389
+ left: 2px;
390
+ transition: all 0.3s;
391
+ }
392
+
393
+ input:checked + .toggle-slider {
394
+ background: var(--secondary);
395
+ }
396
+
397
+ input:checked + .toggle-slider::before {
398
+ transform: translateX(20px);
399
+ }
400
+
401
+ .frame-count {
402
+ font-family: var(--font-mono);
403
+ color: var(--secondary);
404
+ font-size: 0.8rem;
405
+ }
406
+
407
+ .director-actions {
408
+ display: flex;
409
+ gap: 12px;
410
+ }
411
+
412
+ .btn-secondary, .btn-danger {
413
+ flex: 1;
414
+ padding: 10px;
415
+ border: none;
416
+ border-radius: 4px;
417
+ font-size: 0.8rem;
418
+ font-weight: 700;
419
+ cursor: pointer;
420
+ display: flex;
421
+ align-items: center;
422
+ justify-content: center;
423
+ gap: 8px;
424
+ transition: all 0.2s;
425
+ }
426
+
427
+ .btn-secondary {
428
+ background: rgba(255, 255, 255, 0.1);
429
+ color: var(--text-main);
430
+ border: 1px solid var(--border);
431
+ }
432
+
433
+ .btn-secondary:hover:not(:disabled) {
434
+ background: var(--text-main);
435
+ color: #000;
436
+ }
437
+
438
+ .btn-danger {
439
+ background: rgba(255, 0, 85, 0.1);
440
+ color: #ff0055;
441
+ border: 1px solid rgba(255, 0, 85, 0.3);
442
+ }
443
+
444
+ .btn-danger:hover:not(:disabled) {
445
+ background: #ff0055;
446
+ color: #fff;
447
+ }
448
+
449
+ .btn-secondary:disabled, .btn-danger:disabled {
450
+ opacity: 0.5;
451
+ cursor: not-allowed;
452
+ }
453
+
454
+
455
+ /* Visualization Panel */
456
+ .visualization-panel {
457
+ grid-column: 2;
458
+ grid-row: 1;
459
+ padding: 0; /* Custom padding for tabs */
460
+ overflow: hidden;
461
+ }
462
+
463
+ .viz-tabs {
464
+ display: flex;
465
+ background: rgba(0, 0, 0, 0.2);
466
+ border-bottom: 1px solid var(--border);
467
+ }
468
+
469
+ .viz-tab {
470
+ padding: 12px 24px;
471
+ background: transparent;
472
+ border: none;
473
+ color: var(--text-dim);
474
+ cursor: pointer;
475
+ font-family: var(--font-mono);
476
+ font-size: 0.8rem;
477
+ border-right: 1px solid var(--border);
478
+ }
479
+
480
+ .viz-tab.active {
481
+ color: var(--accent);
482
+ background: rgba(0, 240, 255, 0.05);
483
+ }
484
+
485
+ .viz-content {
486
+ flex: 1;
487
+ position: relative;
488
+ background: #000;
489
+ }
490
+
491
+ .viz-view {
492
+ display: none;
493
+ width: 100%;
494
+ height: 100%;
495
+ }
496
+
497
+ .viz-view.active {
498
+ display: block;
499
+ }
500
+
501
+ canvas {
502
+ width: 100%;
503
+ height: 100%;
504
+ display: block;
505
+ }
506
+
507
+ .overlay-stats {
508
+ position: absolute;
509
+ top: 50%;
510
+ left: 50%;
511
+ transform: translate(-50%, -50%);
512
+ font-family: var(--font-mono);
513
+ color: var(--accent);
514
+ background: rgba(0, 0, 0, 0.8);
515
+ padding: 8px 16px;
516
+ border: 1px solid var(--accent);
517
+ pointer-events: none;
518
+ }
519
+
520
+ .scanline {
521
+ position: absolute;
522
+ top: 0;
523
+ left: 0;
524
+ width: 100%;
525
+ height: 4px;
526
+ background: rgba(0, 240, 255, 0.3);
527
+ opacity: 0.3;
528
+ animation: scan 3s linear infinite;
529
+ pointer-events: none;
530
+ }
531
+
532
+ @keyframes scan {
533
+ 0% { top: 0%; }
534
+ 100% { top: 100%; }
535
+ }
536
+
537
+ .entropy-readout {
538
+ position: absolute;
539
+ bottom: 16px;
540
+ right: 16px;
541
+ font-family: var(--font-mono);
542
+ background: rgba(0,0,0,0.6);
543
+ padding: 4px 8px;
544
+ border-radius: 4px;
545
+ color: var(--secondary);
546
+ }
547
+
548
+ /* Terminal */
549
+ .terminal-panel {
550
+ grid-column: 2;
551
+ grid-row: 2;
552
+ font-family: var(--font-mono);
553
+ font-size: 0.85rem;
554
+ padding: 0;
555
+ background: #0a0a12;
556
+ }
557
+
558
+ .terminal-header {
559
+ padding: 8px 16px;
560
+ background: rgba(255, 255, 255, 0.05);
561
+ border-bottom: 1px solid var(--border);
562
+ display: flex;
563
+ justify-content: space-between;
564
+ color: var(--text-dim);
565
+ font-size: 0.7rem;
566
+ text-transform: uppercase;
567
+ }
568
+
569
+ .terminal-status {
570
+ color: #00ff88;
571
+ }
572
+
573
+ .terminal-body {
574
+ padding: 16px;
575
+ overflow-y: auto;
576
+ color: #d0d0d0;
577
+ height: 100%;
578
+ display: flex;
579
+ flex-direction: column;
580
+ gap: 4px;
581
+ }
582
+
583
+ .log-line {
584
+ opacity: 0.8;
585
+ border-left: 2px solid transparent;
586
+ padding-left: 8px;
587
+ }
588
+
589
+ .log-line.info { border-color: var(--accent); }
590
+ .log-line.warn { border-color: #ffcc00; color: #ffcc00; }
591
+ .log-line.error { border-color: #ff0055; color: #ff0055; }
592
+ .log-line.success { border-color: #00ff88; color: #00ff88; }
593
+
594
+ .ts {
595
+ color: #666;
596
+ margin-right: 8px;
597
+ }
598
+
599
+ /* Document Styles */
600
+ .document-wrapper {
601
+ max-width: 900px;
602
+ margin: 0 auto;
603
+ line-height: 1.8;
604
+ }
605
+
606
+ .document-wrapper h1 {
607
+ font-size: 2.5rem;
608
+ margin-bottom: 16px;
609
+ background: linear-gradient(90deg, #fff, var(--text-dim));
610
+ -webkit-background-clip: text;
611
+ -webkit-text-fill-color: transparent;
612
+ }
613
+
614
+ .lead {
615
+ font-size: 1.1rem;
616
+ color: var(--text-dim);
617
+ margin-bottom: 32px;
618
+ border-left: 4px solid var(--accent);
619
+ padding-left: 16px;
620
+ }
621
+
622
+ h3 {
623
+ margin: 32px 0 16px;
624
+ color: var(--accent);
625
+ font-family: var(--font-mono);
626
+ text-transform: uppercase;
627
+ font-size: 1rem;
628
+ letter-spacing: 1px;
629
+ }
630
+
631
+ p {
632
+ margin-bottom: 16px;
633
+ color: #c0c0c0;
634
+ }
635
+
636
+ .metric-cards {
637
+ display: grid;
638
+ grid-template-columns: repeat(3, 1fr);
639
+ gap: 16px;
640
+ margin: 32px 0;
641
+ }
642
+
643
+ .card {
644
+ background: rgba(255, 255, 255, 0.05);
645
+ padding: 24px;
646
+ border-radius: 8px;
647
+ text-align: center;
648
+ border: 1px solid var(--border);
649
+ }
650
+
651
+ .metric-val {
652
+ font-size: 2rem;
653
+ font-weight: 800;
654
+ color: #fff;
655
+ margin-bottom: 4px;
656
+ }
657
+
658
+ .metric-label {
659
+ font-size: 0.7rem;
660
+ text-transform: uppercase;
661
+ color: var(--text-dim);
662
+ }
663
+
664
+ .specs-table {
665
+ width: 100%;
666
+ border: 1px solid var(--border);
667
+ border-radius: 8px;
668
+ overflow: hidden;
669
+ margin: 24px 0;
670
+ }
671
+
672
+ .spec-row {
673
+ display: grid;
674
+ grid-template-columns: 1fr 1fr 2fr 1fr;
675
+ padding: 12px 16px;
676
+ border-bottom: 1px solid var(--border);
677
+ }
678
+
679
+ .spec-row:last-child {
680
+ border-bottom: none;
681
+ }
682
+
683
+ .spec-row.header {
684
+ background: rgba(255, 255, 255, 0.05);
685
+ font-weight: bold;
686
+ color: var(--accent);
687
+ font-family: var(--font-mono);
688
+ font-size: 0.8rem;
689
+ }
690
+
691
+ .spec-row span {
692
+ font-size: 0.9rem;
693
+ }
694
+
695
+ /* Timeline */
696
+ .timeline {
697
+ position: relative;
698
+ margin: 32px 0;
699
+ padding-left: 32px;
700
+ }
701
+
702
+ .timeline::before {
703
+ content: '';
704
+ position: absolute;
705
+ left: 0;
706
+ top: 0;
707
+ bottom: 0;
708
+ width: 2px;
709
+ background: var(--border);
710
+ }
711
+
712
+ .timeline-item {
713
+ position: relative;
714
+ margin-bottom: 24px;
715
+ }
716
+
717
+ .timeline-item::before {
718
+ content: '';
719
+ position: absolute;
720
+ left: -37px;
721
+ top: 6px;
722
+ width: 12px;
723
+ height: 12px;
724
+ background: var(--bg-dark);
725
+ border: 2px solid var(--accent);
726
+ border-radius: 50%;
727
+ }
728
+
729
+ .phase {
730
+ font-family: var(--font-mono);
731
+ color: var(--accent);
732
+ font-size: 0.8rem;
733
+ margin-bottom: 4px;
734
+ }
735
+
736
+ /* Architecture specific */
737
+ .architecture-grid {
738
+ display: grid;
739
+ grid-template-columns: 1fr 1fr;
740
+ gap: 24px;
741
+ margin-top: 32px;
742
+ }
743
+
744
+ .arch-card {
745
+ background: rgba(255,255,255,0.03);
746
+ padding: 24px;
747
+ border-radius: 8px;
748
+ border: 1px solid var(--border);
749
+ }
750
+
751
+ .arch-card h3 {
752
+ margin-top: 0;
753
+ font-size: 0.9rem;
754
+ }
755
+
756
+ .arch-card ul {
757
+ list-style: none;
758
+ padding: 0;
759
+ }
760
+
761
+ .arch-card li {
762
+ margin-bottom: 8px;
763
+ font-size: 0.85rem;
764
+ padding-left: 16px;
765
+ position: relative;
766
+ }
767
+
768
+ .arch-card li::before {
769
+ content: '›';
770
+ position: absolute;
771
+ left: 0;
772
+ color: var(--accent);
773
+ }
774
+
775
+ #architecture-canvas {
776
+ height: 300px;
777
+ width: 100%;
778
+ background: #0f111a;
779
+ border-radius: 8px;
780
+ margin: 24px 0;
781
+ }
782
+
783
+ @media (max-width: 1024px) {
784
+ .app-container {
785
+ grid-template-columns: 1fr;
786
+ }
787
+ .side-nav {
788
+ display: none;
789
+ }
790
+ .simulation-grid {
791
+ grid-template-columns: 1fr;
792
+ grid-template-rows: auto auto auto;
793
+ }
794
+ .visualization-panel {
795
+ grid-column: 1;
796
+ grid-row: 2;
797
+ height: 400px;
798
+ }
799
+ .terminal-panel {
800
+ grid-column: 1;
801
+ grid-row: 3;
802
+ }
803
+ }