Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -42,9 +42,16 @@ class AudioSeparatorD:
|
|
| 42 |
"cuda_version": torch.version.cuda if torch.cuda.is_available() else "N/A",
|
| 43 |
"mps_available": hasattr(torch.backends, "mps") and torch.backends.mps.is_available(),
|
| 44 |
"device": "cuda" if torch.cuda.is_available() else ("mps" if hasattr(torch.backends, "mps") and torch.backends.mps.is_available() else "cpu"),
|
| 45 |
-
"memory_total": torch.cuda.get_device_properties(0).total_memory if torch.cuda.is_available() else 0,
|
| 46 |
-
"memory_allocated": torch.cuda.memory_allocated() if torch.cuda.is_available() else 0
|
| 47 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
return info
|
| 49 |
|
| 50 |
def analyze_audio_characteristics(self, audio_file: str) -> Dict:
|
|
@@ -468,7 +475,11 @@ class AudioSeparatorD:
|
|
| 468 |
|
| 469 |
# Set default model if not specified
|
| 470 |
if model_name is None:
|
| 471 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 472 |
|
| 473 |
# Initialize separator with updated parameters
|
| 474 |
self.separator = Separator(
|
|
@@ -601,7 +612,7 @@ class AudioSeparatorD:
|
|
| 601 |
|
| 602 |
return custom_params
|
| 603 |
|
| 604 |
-
def (self):
|
| 605 |
"""Get enhanced processing history with analytics"""
|
| 606 |
if not self.processing_history:
|
| 607 |
return "No processing history available"
|
|
@@ -688,7 +699,7 @@ def create_interface():
|
|
| 688 |
# Model dropdown with enhanced display
|
| 689 |
model_dropdown = gr.Dropdown(
|
| 690 |
choices=list(model_list.keys()) if model_list else [],
|
| 691 |
-
value=
|
| 692 |
label="🤖 AI Model Selection",
|
| 693 |
info="Choose an AI model or use auto-selection",
|
| 694 |
elem_id="model_dropdown"
|
|
@@ -1186,7 +1197,7 @@ if __name__ == "__main__":
|
|
| 1186 |
interface = create_interface()
|
| 1187 |
interface.launch(
|
| 1188 |
server_port=7860,
|
| 1189 |
-
theme="
|
| 1190 |
share=True,
|
| 1191 |
debug=True
|
| 1192 |
)
|
|
|
|
| 42 |
"cuda_version": torch.version.cuda if torch.cuda.is_available() else "N/A",
|
| 43 |
"mps_available": hasattr(torch.backends, "mps") and torch.backends.mps.is_available(),
|
| 44 |
"device": "cuda" if torch.cuda.is_available() else ("mps" if hasattr(torch.backends, "mps") and torch.backends.mps.is_available() else "cpu"),
|
|
|
|
|
|
|
| 45 |
}
|
| 46 |
+
|
| 47 |
+
# Only add memory info if CUDA is available
|
| 48 |
+
if torch.cuda.is_available():
|
| 49 |
+
info["memory_total"] = torch.cuda.get_device_properties(0).total_memory
|
| 50 |
+
info["memory_allocated"] = torch.cuda.memory_allocated()
|
| 51 |
+
else:
|
| 52 |
+
info["memory_total"] = 0
|
| 53 |
+
info["memory_allocated"] = 0
|
| 54 |
+
|
| 55 |
return info
|
| 56 |
|
| 57 |
def analyze_audio_characteristics(self, audio_file: str) -> Dict:
|
|
|
|
| 475 |
|
| 476 |
# Set default model if not specified
|
| 477 |
if model_name is None:
|
| 478 |
+
models = self.get_available_models()
|
| 479 |
+
if models:
|
| 480 |
+
model_name = list(models.keys())[0] # Use first available model
|
| 481 |
+
else:
|
| 482 |
+
return False, "No models available"
|
| 483 |
|
| 484 |
# Initialize separator with updated parameters
|
| 485 |
self.separator = Separator(
|
|
|
|
| 612 |
|
| 613 |
return custom_params
|
| 614 |
|
| 615 |
+
def get_phistory(self):
|
| 616 |
"""Get enhanced processing history with analytics"""
|
| 617 |
if not self.processing_history:
|
| 618 |
return "No processing history available"
|
|
|
|
| 699 |
# Model dropdown with enhanced display
|
| 700 |
model_dropdown = gr.Dropdown(
|
| 701 |
choices=list(model_list.keys()) if model_list else [],
|
| 702 |
+
value=list(model_list.keys())[0] if model_list else None,
|
| 703 |
label="🤖 AI Model Selection",
|
| 704 |
info="Choose an AI model or use auto-selection",
|
| 705 |
elem_id="model_dropdown"
|
|
|
|
| 1197 |
interface = create_interface()
|
| 1198 |
interface.launch(
|
| 1199 |
server_port=7860,
|
| 1200 |
+
theme="NeoPy/Soft",
|
| 1201 |
share=True,
|
| 1202 |
debug=True
|
| 1203 |
)
|