jhh6576 commited on
Commit
3dd2e50
·
verified ·
1 Parent(s): 942aaf2

Update app_enhanced.py

Browse files
Files changed (1) hide show
  1. app_enhanced.py +79 -88
app_enhanced.py CHANGED
@@ -12,6 +12,10 @@ import json
12
  import shutil
13
  from typing import List
14
  import traceback
 
 
 
 
15
 
16
  # --- ROBUST IMPORTS WITH FALLBACKS ---
17
  # (Assuming these modules have a method to process a single image, e.g., enhance_single)
@@ -310,7 +314,6 @@ class EnhancedComicGenerator:
310
  new_path = os.path.join(self.frames_dir, frame_filename)
311
  cv2.imwrite(new_path, frame)
312
 
313
- # Apply the same enhancements to the new frame to maintain style consistency
314
  print(f"🎨 Applying enhancements to the new frame: {frame_filename}")
315
  self._enhance_all_images(single_image_path=new_path)
316
  self._enhance_quality_colors(single_image_path=new_path)
@@ -347,7 +350,6 @@ class EnhancedComicGenerator:
347
  new_path = os.path.join(self.frames_dir, frame_filename)
348
  cv2.imwrite(new_path, frame)
349
 
350
- # Apply enhancements to the new frame from the specific timestamp
351
  print(f"🎨 Applying enhancements to the new frame from timestamp: {frame_filename}")
352
  self._enhance_all_images(single_image_path=new_path)
353
  self._enhance_quality_colors(single_image_path=new_path)
@@ -366,7 +368,7 @@ class EnhancedComicGenerator:
366
  traceback.print_exc()
367
  return {"success": False, "message": str(e)}
368
 
369
- def generate_keyframes_from_moments(self, video_path, key_moments, max_frames=48):
370
  try:
371
  cap = cv2.VideoCapture(video_path)
372
  if not cap.isOpened(): raise Exception("Cannot open video for keyframe extraction")
@@ -414,14 +416,16 @@ class EnhancedComicGenerator:
414
  with open('test1.srt', 'r', encoding='utf-8') as f:
415
  all_subs = list(srt.parse(f.read()))
416
  key_moments = [{'index': s.index, 'text': s.content, 'start': s.start.total_seconds(), 'end': s.end.total_seconds()} for s in all_subs]
417
- if not self.generate_keyframes_from_moments(self.video_path, key_moments, max_frames=48):
 
 
418
  raise Exception("Keyframe extraction failed.")
419
  update_status("Cropping black bars...", 45)
420
  black_x, black_y, _, _ = black_bar_crop()
421
- update_status("Enhancing images...", 50)
422
  self._enhance_all_images()
423
  self._enhance_quality_colors()
424
- update_status("Placing speech bubbles...", 75)
425
  bubbles = self._create_ai_bubbles_from_moments(black_x, black_y)
426
  update_status("Assembling comic pages...", 90)
427
  pages = self._generate_pages(bubbles)
@@ -437,61 +441,61 @@ class EnhancedComicGenerator:
437
  update_status(f"Error: {e}", -1)
438
  return False
439
 
440
- # <<< MODIFICATION START: Made enhancement functions process single files efficiently >>>
441
  def _enhance_all_images(self, single_image_path=None):
442
- """Enhances images. If single_image_path is provided, only enhances that file."""
443
  try:
444
  enhancer = SimpleColorEnhancer()
445
- if single_image_path and os.path.exists(single_image_path):
446
- # Assumes the enhancer class has a method for single images.
447
- # If it doesn't, this will gracefully fail or you can implement a fallback.
448
  enhancer.enhance_single(single_image_path)
449
- elif not single_image_path:
450
- enhancer.enhance_batch(self.frames_dir)
 
 
451
  except Exception as e:
452
- print(f"❌ Simple enhancement failed: {e}. Falling back to batch processing.")
453
- # Fallback for safety if enhance_single doesn't exist
454
- try:
455
- SimpleColorEnhancer().enhance_batch(self.frames_dir)
456
- except Exception as e2:
457
- print(f"❌ Fallback simple enhancement also failed: {e2}")
458
 
459
  def _enhance_quality_colors(self, single_image_path=None):
460
- """Enhances images with a quality model. If single_image_path is provided, only enhances that file."""
461
  try:
462
  enhancer = QualityColorEnhancer()
463
- if single_image_path and os.path.exists(single_image_path):
464
- # Assumes the enhancer class has a method for single images.
465
  enhancer.enhance_single(single_image_path)
466
- elif not single_image_path:
467
- enhancer.batch_enhance(self.frames_dir)
 
 
468
  except Exception as e:
469
- print(f"⚠️ Quality enhancement failed: {e}. Falling back to batch processing.")
470
- # Fallback for safety if enhance_single doesn't exist
471
- try:
472
- QualityColorEnhancer().batch_enhance(self.frames_dir)
473
- except Exception as e2:
474
- print(f"⚠️ Fallback quality enhancement also failed: {e2}")
475
  # <<< MODIFICATION END >>>
476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477
  def _create_ai_bubbles_from_moments(self, black_x, black_y):
478
- bubbles, frame_files = [], sorted([f for f in os.listdir(self.frames_dir) if f.endswith('.png')])
479
  metadata_path = 'frames/frame_metadata.json'
480
- if not os.path.exists(metadata_path): return [bubble(dialog="") for _ in frame_files]
481
- with open(metadata_path, 'r') as f: frame_metadata = json.load(f)
482
- for i, frame_file in enumerate(frame_files):
483
- update_status(f"Placing bubble {i+1}/{len(frame_files)}...", 75 + int(15 * (i / len(frame_files))))
484
- frame_path = os.path.join(self.frames_dir, frame_file)
485
- dialogue = frame_metadata.get(frame_file, {}).get('dialogue', "")
486
- try:
487
- faces = face_detector.detect_faces(frame_path)
488
- lip_x, lip_y = face_detector.get_lip_position(frame_path, faces[0]) if faces else (-1, -1)
489
- bubble_x, bubble_y = ai_bubble_placer.place_bubble_ai(frame_path, (lip_x, lip_y))
490
- bubbles.append(bubble(bubble_offset_x=bubble_x, bubble_offset_y=bubble_y, lip_x=lip_x, lip_y=lip_y, dialog=dialogue, emotion='normal'))
491
- except Exception as e:
492
- print(f"-> Could not place bubble for {frame_file}: {e}. Using default.")
493
- bubbles.append(bubble(bubble_offset_x=50, bubble_offset_y=20, lip_x=-1, lip_y=-1, dialog=dialogue, emotion='normal'))
494
  return bubbles
 
495
 
496
  def _generate_pages(self, bubbles):
497
  try:
@@ -550,40 +554,37 @@ class EnhancedComicGenerator:
550
  .speech-bubble.selected { outline: 2px dashed #4CAF50; }
551
  .speech-bubble textarea { position: absolute; top: 0; left: 0; width: 100%; height: 100%; box-sizing: border-box; border: 1px solid #4CAF50; background: rgba(255,255,255,0.95); font: inherit; text-align: center; resize: none; padding: 8px; z-index: 102; }
552
 
553
- /* <<< MODIFICATION START: New CSS for 'speech' bubble type >>> */
554
  .speech-bubble.speech {
555
- color: #fff;
556
- font-size: 16px;
557
- text-align: center;
558
- padding: 1em;
559
- /* Remove standard border/background to allow gradient to show */
560
- border: none;
561
- background: none;
562
- border-radius: 0;
563
- /* Gradient background applied via border-image */
564
- border-image: fill 0 linear-gradient(30deg, #4ECDC4, #6A4A3C);
565
  }
566
 
567
- /* Generic tail for all bubbles */
568
- .speech-bubble::after, .speech-bubble.idea::after { content: ''; position: absolute; width: 0; height: 0; border-left: 10px solid transparent; border-right: 10px solid transparent; }
569
-
570
- /* Specific tail for the new 'speech' bubble style */
571
  .speech-bubble.speech::after {
572
- border-top: 20px solid #6A4A3C; /* Long tail, color matches one end of the gradient */
 
 
 
 
 
 
 
573
  }
574
 
575
- /* 4-WAY TAIL POSITIONING CLASSES for .speech bubbles */
576
- .speech-bubble.speech.tail-bl::after { bottom: -19px; left: 20px; transform: rotate(0deg); }
577
- .speech-bubble.speech.tail-br::after { bottom: -19px; right: 20px; transform: rotate(0deg); }
578
- .speech-bubble.speech.tail-tr::after { top: -19px; right: 20px; transform: rotate(180deg); }
579
- .speech-bubble.speech.tail-tl::after { top: -19px; left: 20px; transform: rotate(180deg); }
580
  /* <<< MODIFICATION END >>> */
581
 
582
  .speech-bubble.thought { background: white; border: 2px dashed #555; color: #333; border-radius: 50%; }
583
  .speech-bubble.reaction { background: #FFD700; border: 3px solid #E53935; color: #D32F2F; font-weight: 900; text-transform: uppercase; width: 180px; clip-path: polygon(0% 25%, 17% 21%, 17% 0%, 31% 16%, 50% 4%, 69% 16%, 83% 0%, 83% 21%, 100% 25%, 85% 45%, 95% 62%, 82% 79%, 100% 97%, 79% 89%, 60% 98%, 46% 82%, 27% 95%, 15% 78%, 5% 62%, 15% 45%); }
584
  .speech-bubble.narration { background: #FAFAFA; border: 2px solid #BDBDBD; color: #424242; border-radius: 3px; }
585
  .speech-bubble.idea { background: linear-gradient(180deg,#FFFDD0 0%, #FFF8B5 100%); border: 2px solid #FFA500; color: #6a4b00; border-radius: 40% 60% 40% 60% / 60% 40% 60% 40%; }
586
- .speech-bubble.idea::after { border-top: 10px solid #FFA500; bottom: -9px; left: 20px; }
587
 
588
  .speech-bubble.thought::after { display: none; }
589
  .thought-dot { position: absolute; background-color: white; border: 2px solid #555; border-radius: 50%; z-index: -1; }
@@ -731,14 +732,15 @@ class EnhancedComicGenerator:
731
  document.getElementById('bubble-text-color').addEventListener('input', (e) => {
732
  if(currentlySelectedBubble) currentlySelectedBubble.style.color = e.target.value;
733
  });
 
734
  document.getElementById('bubble-fill-color').addEventListener('input', (e) => {
735
  if(currentlySelectedBubble) {
736
- // The new speech bubble uses a gradient, so standard fill color is disabled for it.
737
- if (currentlySelectedBubble.dataset.type !== 'speech') {
738
- currentlySelectedBubble.style.backgroundColor = e.target.value;
739
- }
740
  }
741
  });
 
742
 
743
  document.addEventListener('mousemove', e => { if (isPanning) panImage(e); if (draggedBubble) drag(e); if(isResizing) resizeBubble(e); });
744
  document.addEventListener('mouseup', e => { if (isPanning) stopPan(e); if (draggedBubble) stopDrag(e); if(isResizing) stopResize(e);});
@@ -783,7 +785,6 @@ class EnhancedComicGenerator:
783
 
784
  function applyBubbleType(bubble, type) {
785
  bubble.querySelectorAll('.thought-dot').forEach(el => el.remove());
786
- // Preserve essential classes
787
  let classesToKeep = 'speech-bubble';
788
  if (bubble.classList.contains('selected')) classesToKeep += ' selected';
789
 
@@ -792,7 +793,6 @@ class EnhancedComicGenerator:
792
  bubble.dataset.type = type;
793
 
794
  if (type === 'speech') {
795
- // Set initial tail position for speech bubbles
796
  bubble.classList.add('tail-bl');
797
  bubble.dataset.tailPos = '0';
798
  }
@@ -815,10 +815,8 @@ class EnhancedComicGenerator:
815
  currentlySelectedBubble.style.fontFamily = font;
816
  }
817
 
818
- // <<< MODIFICATION START: Rewritten 4-way tail rotation function >>>
819
  function rotateBubbleTail() {
820
  if (!currentlySelectedBubble) { alert("Please select a bubble first."); return; }
821
- // This function now only works for the 'speech' type which has the new tail logic
822
  if (currentlySelectedBubble.dataset.type !== 'speech') {
823
  alert("Tail rotation is only available for the 'Speech' bubble type.");
824
  return;
@@ -826,18 +824,11 @@ class EnhancedComicGenerator:
826
 
827
  const positions = ['tail-bl', 'tail-br', 'tail-tr', 'tail-tl'];
828
  let currentPos = parseInt(currentlySelectedBubble.dataset.tailPos || 0);
829
-
830
- // Remove current position class
831
  currentlySelectedBubble.classList.remove(positions[currentPos]);
832
-
833
- // Get next position, cycling back to 0
834
  let nextPos = (currentPos + 1) % positions.length;
835
-
836
- // Add new position class and update data attribute
837
  currentlySelectedBubble.classList.add(positions[nextPos]);
838
  currentlySelectedBubble.dataset.tailPos = nextPos;
839
  }
840
- // <<< MODIFICATION END >>>
841
 
842
  function selectPanel(panel) {
843
  document.querySelectorAll('.panel.selected').forEach(p => p.classList.remove('selected'));
@@ -859,13 +850,13 @@ class EnhancedComicGenerator:
859
  const styles = window.getComputedStyle(currentlySelectedBubble);
860
  document.getElementById('bubble-text-color').value = rgbToHex(styles.color);
861
 
 
862
  const fillColorPicker = document.getElementById('bubble-fill-color');
863
- if (currentlySelectedBubble.dataset.type === 'speech') {
864
- fillColorPicker.disabled = true; // Disable fill for gradient bubble
865
- } else {
866
- fillColorPicker.disabled = false;
867
- fillColorPicker.value = rgbToHex(styles.backgroundColor);
868
- }
869
 
870
  document.getElementById('bubble-type-select').value = currentlySelectedBubble.dataset.type || 'speech';
871
  document.getElementById('font-select').value = styles.fontFamily.split(',')[0].replace(/"/g, "").replace(/'/g, "").trim();
 
12
  import shutil
13
  from typing import List
14
  import traceback
15
+ # <<< MODIFICATION START: Import for parallel processing >>>
16
+ from concurrent.futures import ThreadPoolExecutor
17
+ # <<< MODIFICATION END >>>
18
+
19
 
20
  # --- ROBUST IMPORTS WITH FALLBACKS ---
21
  # (Assuming these modules have a method to process a single image, e.g., enhance_single)
 
314
  new_path = os.path.join(self.frames_dir, frame_filename)
315
  cv2.imwrite(new_path, frame)
316
 
 
317
  print(f"🎨 Applying enhancements to the new frame: {frame_filename}")
318
  self._enhance_all_images(single_image_path=new_path)
319
  self._enhance_quality_colors(single_image_path=new_path)
 
350
  new_path = os.path.join(self.frames_dir, frame_filename)
351
  cv2.imwrite(new_path, frame)
352
 
 
353
  print(f"🎨 Applying enhancements to the new frame from timestamp: {frame_filename}")
354
  self._enhance_all_images(single_image_path=new_path)
355
  self._enhance_quality_colors(single_image_path=new_path)
 
368
  traceback.print_exc()
369
  return {"success": False, "message": str(e)}
370
 
371
+ def generate_keyframes_from_moments(self, video_path, key_moments, max_frames=32):
372
  try:
373
  cap = cv2.VideoCapture(video_path)
374
  if not cap.isOpened(): raise Exception("Cannot open video for keyframe extraction")
 
416
  with open('test1.srt', 'r', encoding='utf-8') as f:
417
  all_subs = list(srt.parse(f.read()))
418
  key_moments = [{'index': s.index, 'text': s.content, 'start': s.start.total_seconds(), 'end': s.end.total_seconds()} for s in all_subs]
419
+ # <<< MODIFICATION START: Reduced default max_frames for speed >>>
420
+ if not self.generate_keyframes_from_moments(self.video_path, key_moments, max_frames=32):
421
+ # <<< MODIFICATION END >>>
422
  raise Exception("Keyframe extraction failed.")
423
  update_status("Cropping black bars...", 45)
424
  black_x, black_y, _, _ = black_bar_crop()
425
+ update_status("Enhancing images (in parallel)...", 50)
426
  self._enhance_all_images()
427
  self._enhance_quality_colors()
428
+ update_status("Placing speech bubbles (in parallel)...", 75)
429
  bubbles = self._create_ai_bubbles_from_moments(black_x, black_y)
430
  update_status("Assembling comic pages...", 90)
431
  pages = self._generate_pages(bubbles)
 
441
  update_status(f"Error: {e}", -1)
442
  return False
443
 
444
+ # <<< MODIFICATION START: Parallelized enhancement functions for speed >>>
445
  def _enhance_all_images(self, single_image_path=None):
 
446
  try:
447
  enhancer = SimpleColorEnhancer()
448
+ if single_image_path:
 
 
449
  enhancer.enhance_single(single_image_path)
450
+ else:
451
+ frame_paths = [os.path.join(self.frames_dir, f) for f in os.listdir(self.frames_dir) if f.endswith('.png')]
452
+ with ThreadPoolExecutor() as executor:
453
+ list(executor.map(enhancer.enhance_single, frame_paths))
454
  except Exception as e:
455
+ print(f"❌ Simple enhancement failed: {e}")
 
 
 
 
 
456
 
457
  def _enhance_quality_colors(self, single_image_path=None):
 
458
  try:
459
  enhancer = QualityColorEnhancer()
460
+ if single_image_path:
 
461
  enhancer.enhance_single(single_image_path)
462
+ else:
463
+ frame_paths = [os.path.join(self.frames_dir, f) for f in os.listdir(self.frames_dir) if f.endswith('.png')]
464
+ with ThreadPoolExecutor() as executor:
465
+ list(executor.map(enhancer.enhance_single, frame_paths))
466
  except Exception as e:
467
+ print(f"⚠️ Quality enhancement failed: {e}")
 
 
 
 
 
468
  # <<< MODIFICATION END >>>
469
 
470
+ # <<< MODIFICATION START: Parallelized bubble placement for speed >>>
471
+ def _process_bubble_for_frame(self, frame_file):
472
+ """Helper function to process a single frame for bubble placement."""
473
+ frame_path = os.path.join(self.frames_dir, frame_file)
474
+ dialogue = self.frame_metadata.get(frame_file, {}).get('dialogue', "")
475
+ try:
476
+ faces = face_detector.detect_faces(frame_path)
477
+ lip_x, lip_y = face_detector.get_lip_position(frame_path, faces[0]) if faces else (-1, -1)
478
+ bubble_x, bubble_y = ai_bubble_placer.place_bubble_ai(frame_path, (lip_x, lip_y))
479
+ return bubble(bubble_offset_x=bubble_x, bubble_offset_y=bubble_y, lip_x=lip_x, lip_y=lip_y, dialog=dialogue, emotion='normal')
480
+ except Exception as e:
481
+ print(f"-> Could not place bubble for {frame_file}: {e}. Using default.")
482
+ return bubble(bubble_offset_x=50, bubble_offset_y=20, lip_x=-1, lip_y=-1, dialog=dialogue, emotion='normal')
483
+
484
  def _create_ai_bubbles_from_moments(self, black_x, black_y):
485
+ frame_files = sorted([f for f in os.listdir(self.frames_dir) if f.endswith('.png')])
486
  metadata_path = 'frames/frame_metadata.json'
487
+ if not os.path.exists(metadata_path):
488
+ return [bubble(dialog="") for _ in frame_files]
489
+
490
+ with open(metadata_path, 'r') as f:
491
+ self.frame_metadata = json.load(f)
492
+
493
+ with ThreadPoolExecutor() as executor:
494
+ # Map the processing function to each frame file and collect results
495
+ bubbles = list(executor.map(self._process_bubble_for_frame, frame_files))
496
+
 
 
 
 
497
  return bubbles
498
+ # <<< MODIFICATION END >>>
499
 
500
  def _generate_pages(self, bubbles):
501
  try:
 
554
  .speech-bubble.selected { outline: 2px dashed #4CAF50; }
555
  .speech-bubble textarea { position: absolute; top: 0; left: 0; width: 100%; height: 100%; box-sizing: border-box; border: 1px solid #4CAF50; background: rgba(255,255,255,0.95); font: inherit; text-align: center; resize: none; padding: 8px; z-index: 102; }
556
 
557
+ /* <<< MODIFICATION START: New CSS for 'speech' bubble with curvy tail >>> */
558
  .speech-bubble.speech {
559
+ /* Use a CSS variable for dynamic color changes from JS */
560
+ background-color: var(--bubble-fill-color, white);
561
+ border: 2px solid #333;
562
+ color: #333;
563
+ border-radius: 15px;
 
 
 
 
 
564
  }
565
 
 
 
 
 
566
  .speech-bubble.speech::after {
567
+ content: '';
568
+ position: absolute;
569
+ width: 20px;
570
+ height: 20px;
571
+ background: var(--bubble-fill-color, white);
572
+ border-right: 2px solid #333;
573
+ border-bottom: 2px solid #333;
574
+ /* The combination of rotate and border-radius creates the curve */
575
  }
576
 
577
+ /* 4-WAY CURVY TAIL POSITIONING */
578
+ .speech-bubble.speech.tail-bl::after { bottom: -11px; left: 30px; transform: rotate(45deg); border-radius: 0 0 12px 0; border-left: none; border-top: none;}
579
+ .speech-bubble.speech.tail-br::after { bottom: -11px; right: 30px; transform: rotate(135deg); border-radius: 0 0 12px 0; border-left: none; border-top: none;}
580
+ .speech-bubble.speech.tail-tr::after { top: -11px; right: 30px; transform: rotate(225deg); border-radius: 0 0 12px 0; border-left: none; border-top: none;}
581
+ .speech-bubble.speech.tail-tl::after { top: -11px; left: 30px; transform: rotate(315deg); border-radius: 0 0 12px 0; border-left: none; border-top: none;}
582
  /* <<< MODIFICATION END >>> */
583
 
584
  .speech-bubble.thought { background: white; border: 2px dashed #555; color: #333; border-radius: 50%; }
585
  .speech-bubble.reaction { background: #FFD700; border: 3px solid #E53935; color: #D32F2F; font-weight: 900; text-transform: uppercase; width: 180px; clip-path: polygon(0% 25%, 17% 21%, 17% 0%, 31% 16%, 50% 4%, 69% 16%, 83% 0%, 83% 21%, 100% 25%, 85% 45%, 95% 62%, 82% 79%, 100% 97%, 79% 89%, 60% 98%, 46% 82%, 27% 95%, 15% 78%, 5% 62%, 15% 45%); }
586
  .speech-bubble.narration { background: #FAFAFA; border: 2px solid #BDBDBD; color: #424242; border-radius: 3px; }
587
  .speech-bubble.idea { background: linear-gradient(180deg,#FFFDD0 0%, #FFF8B5 100%); border: 2px solid #FFA500; color: #6a4b00; border-radius: 40% 60% 40% 60% / 60% 40% 60% 40%; }
 
588
 
589
  .speech-bubble.thought::after { display: none; }
590
  .thought-dot { position: absolute; background-color: white; border: 2px solid #555; border-radius: 50%; z-index: -1; }
 
732
  document.getElementById('bubble-text-color').addEventListener('input', (e) => {
733
  if(currentlySelectedBubble) currentlySelectedBubble.style.color = e.target.value;
734
  });
735
+ // <<< MODIFICATION START: Re-enabled fill color picker for all bubble types >>>
736
  document.getElementById('bubble-fill-color').addEventListener('input', (e) => {
737
  if(currentlySelectedBubble) {
738
+ // This now works for all bubbles, including the new 'speech' one.
739
+ // We use a CSS variable to color the bubble and its pseudo-element tail.
740
+ currentlySelectedBubble.style.setProperty('--bubble-fill-color', e.target.value);
 
741
  }
742
  });
743
+ // <<< MODIFICATION END >>>
744
 
745
  document.addEventListener('mousemove', e => { if (isPanning) panImage(e); if (draggedBubble) drag(e); if(isResizing) resizeBubble(e); });
746
  document.addEventListener('mouseup', e => { if (isPanning) stopPan(e); if (draggedBubble) stopDrag(e); if(isResizing) stopResize(e);});
 
785
 
786
  function applyBubbleType(bubble, type) {
787
  bubble.querySelectorAll('.thought-dot').forEach(el => el.remove());
 
788
  let classesToKeep = 'speech-bubble';
789
  if (bubble.classList.contains('selected')) classesToKeep += ' selected';
790
 
 
793
  bubble.dataset.type = type;
794
 
795
  if (type === 'speech') {
 
796
  bubble.classList.add('tail-bl');
797
  bubble.dataset.tailPos = '0';
798
  }
 
815
  currentlySelectedBubble.style.fontFamily = font;
816
  }
817
 
 
818
  function rotateBubbleTail() {
819
  if (!currentlySelectedBubble) { alert("Please select a bubble first."); return; }
 
820
  if (currentlySelectedBubble.dataset.type !== 'speech') {
821
  alert("Tail rotation is only available for the 'Speech' bubble type.");
822
  return;
 
824
 
825
  const positions = ['tail-bl', 'tail-br', 'tail-tr', 'tail-tl'];
826
  let currentPos = parseInt(currentlySelectedBubble.dataset.tailPos || 0);
 
 
827
  currentlySelectedBubble.classList.remove(positions[currentPos]);
 
 
828
  let nextPos = (currentPos + 1) % positions.length;
 
 
829
  currentlySelectedBubble.classList.add(positions[nextPos]);
830
  currentlySelectedBubble.dataset.tailPos = nextPos;
831
  }
 
832
 
833
  function selectPanel(panel) {
834
  document.querySelectorAll('.panel.selected').forEach(p => p.classList.remove('selected'));
 
850
  const styles = window.getComputedStyle(currentlySelectedBubble);
851
  document.getElementById('bubble-text-color').value = rgbToHex(styles.color);
852
 
853
+ // <<< MODIFICATION START: Updated color picker logic >>>
854
  const fillColorPicker = document.getElementById('bubble-fill-color');
855
+ fillColorPicker.disabled = false; // Always enabled
856
+ // Read the value from the CSS variable if it exists, otherwise from the computed style
857
+ const currentFill = styles.getPropertyValue('--bubble-fill-color').trim();
858
+ fillColorPicker.value = currentFill ? currentFill : rgbToHex(styles.backgroundColor);
859
+ // <<< MODIFICATION END >>>
 
860
 
861
  document.getElementById('bubble-type-select').value = currentlySelectedBubble.dataset.type || 'speech';
862
  document.getElementById('font-select').value = styles.fontFamily.split(',')[0].replace(/"/g, "").replace(/'/g, "").trim();