ASesYusuf1 commited on
Commit
799e841
·
verified ·
1 Parent(s): 1d35b52

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -476,13 +476,13 @@ def auto_ensemble_process(audio, model_keys, state, seg_size=64, overlap=0.1, ou
476
  "model_outputs": {}
477
  }
478
 
479
- # Yeni audio dosyası kontrolü
480
- if state["current_audio"] != audio or state["current_model_idx"] >= len(model_keys):
481
  state["current_audio"] = audio
482
  state["current_model_idx"] = 0
483
  state["processed_stems"] = []
484
  state["model_outputs"] = {model_key: {"vocals": [], "other": []} for model_key in model_keys}
485
- logger.info("New audio or completed cycle detected, resetting ensemble state.")
486
 
487
  use_tta = use_tta == "True"
488
  base_name = os.path.splitext(os.path.basename(audio))[0]
@@ -499,8 +499,10 @@ def auto_ensemble_process(audio, model_keys, state, seg_size=64, overlap=0.1, ou
499
 
500
  # Şu anki modeli işle
501
  current_idx = state["current_model_idx"]
 
 
 
502
  if current_idx >= len(model_keys):
503
- # Tüm modeller işlendiyse ensemble işlemini yap
504
  logger.info("All models processed, running ensemble...")
505
  progress(0.9, desc="Running ensemble...")
506
 
@@ -646,6 +648,11 @@ def auto_ensemble_process(audio, model_keys, state, seg_size=64, overlap=0.1, ou
646
  elapsed = time.time() - start_time
647
  logger.info(f"Model {model_key} completed in {elapsed:.2f}s")
648
 
 
 
 
 
 
649
  # Çıktılar
650
  file_list = state["processed_stems"]
651
  status = f"Model {model_key} (Model {current_idx + 1}/{len(model_keys)}) completed in {elapsed:.2f}s<br>Click 'Run Ensemble!' to process the next model.<br>Processed stems:<ul>"
@@ -669,7 +676,7 @@ def auto_ensemble_process(audio, model_keys, state, seg_size=64, overlap=0.1, ou
669
  logger.warning(f"Failed to delete temporary file {temp_audio_path}: {e}")
670
  if torch.cuda.is_available():
671
  torch.cuda.empty_cache()
672
- logger.info("GPU memory cleared")
673
 
674
  def update_roformer_models(category):
675
  """Update Roformer model dropdown based on selected category."""
 
476
  "model_outputs": {}
477
  }
478
 
479
+ # Yeni audio dosyası kontrolü - yalnızca audio değiştiğinde sıfırlıyoruz
480
+ if state["current_audio"] != audio:
481
  state["current_audio"] = audio
482
  state["current_model_idx"] = 0
483
  state["processed_stems"] = []
484
  state["model_outputs"] = {model_key: {"vocals": [], "other": []} for model_key in model_keys}
485
+ logger.info("New audio detected, resetting ensemble state.")
486
 
487
  use_tta = use_tta == "True"
488
  base_name = os.path.splitext(os.path.basename(audio))[0]
 
499
 
500
  # Şu anki modeli işle
501
  current_idx = state["current_model_idx"]
502
+ logger.info(f"Current model index: {current_idx}, total models: {len(model_keys)}")
503
+
504
+ # Tüm modeller işlendiyse ensemble işlemini yap
505
  if current_idx >= len(model_keys):
 
506
  logger.info("All models processed, running ensemble...")
507
  progress(0.9, desc="Running ensemble...")
508
 
 
648
  elapsed = time.time() - start_time
649
  logger.info(f"Model {model_key} completed in {elapsed:.2f}s")
650
 
651
+ # Eğer bu son modelse, ensemble işlemini hemen başlat
652
+ if state["current_model_idx"] >= len(model_keys):
653
+ logger.info("Last model processed, running ensemble immediately...")
654
+ return auto_ensemble_process(audio, model_keys, state, seg_size, overlap, out_format, use_tta, model_dir, output_dir, norm_thresh, amp_thresh, batch_size, ensemble_method, exclude_stems, weights_str, progress)
655
+
656
  # Çıktılar
657
  file_list = state["processed_stems"]
658
  status = f"Model {model_key} (Model {current_idx + 1}/{len(model_keys)}) completed in {elapsed:.2f}s<br>Click 'Run Ensemble!' to process the next model.<br>Processed stems:<ul>"
 
676
  logger.warning(f"Failed to delete temporary file {temp_audio_path}: {e}")
677
  if torch.cuda.is_available():
678
  torch.cuda.empty_cache()
679
+ logger.info("GPU memory cleared")
680
 
681
  def update_roformer_models(category):
682
  """Update Roformer model dropdown based on selected category."""