peterlllmm commited on
Commit
2492250
·
verified ·
1 Parent(s): 79a2ae9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -11
app.py CHANGED
@@ -7,9 +7,18 @@ import spaces
7
  import re
8
  from typing import List, Tuple
9
 
10
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
 
11
  print(f"🚀 Running on device: {DEVICE}")
12
 
 
 
 
 
 
 
 
 
13
  # --- Global Model Initialization ---
14
  MODEL = None
15
 
@@ -20,12 +29,43 @@ def get_or_load_model():
20
  if MODEL is None:
21
  print("Model not loaded, initializing...")
22
  try:
23
- MODEL = ChatterboxTTS.from_pretrained(DEVICE)
24
- if hasattr(MODEL, 'to') and str(MODEL.device) != DEVICE:
25
- MODEL.to(DEVICE)
26
- print(f"Model loaded successfully. Internal device: {getattr(MODEL, 'device', 'N/A')}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  except Exception as e:
28
- print(f"Error loading model: {e}")
29
  raise
30
  return MODEL
31
 
@@ -38,9 +78,7 @@ except Exception as e:
38
  def set_seed(seed: int):
39
  """Sets the random seed for reproducibility across torch, numpy, and random."""
40
  torch.manual_seed(seed)
41
- if DEVICE == "cuda":
42
- torch.cuda.manual_seed(seed)
43
- torch.cuda.manual_seed_all(seed)
44
  random.seed(seed)
45
  np.random.seed(seed)
46
 
@@ -155,7 +193,7 @@ def concatenate_audio_chunks(audio_chunks: List[Tuple[int, np.ndarray]],
155
 
156
  return sample_rate, np.concatenate(combined_audio)
157
 
158
- @spaces.GPU
159
  def generate_tts_audio_chunked(
160
  text_input: str,
161
  audio_prompt_path_input: str = None,
@@ -307,4 +345,4 @@ with gr.Blocks() as demo:
307
  outputs=[audio_output],
308
  )
309
 
310
- demo.launch(mcp_server=True)
 
7
  import re
8
  from typing import List, Tuple
9
 
10
+ # Force CPU usage and patch torch.load to handle CUDA tensors on CPU
11
+ DEVICE = "cpu" # Force CPU since you don't have GPU access
12
  print(f"🚀 Running on device: {DEVICE}")
13
 
14
+ # Patch torch.load to automatically map CUDA tensors to CPU
15
+ original_load = torch.load
16
+ def patched_load(f, map_location=None, **kwargs):
17
+ if map_location is None:
18
+ map_location = 'cpu' # Always map to CPU
19
+ return original_load(f, map_location=map_location, **kwargs)
20
+ torch.load = patched_load
21
+
22
  # --- Global Model Initialization ---
23
  MODEL = None
24
 
 
29
  if MODEL is None:
30
  print("Model not loaded, initializing...")
31
  try:
32
+ # Try multiple loading strategies for CPU
33
+ print("Attempting to load model on CPU...")
34
+
35
+ # Strategy 1: Direct CPU loading
36
+ try:
37
+ MODEL = ChatterboxTTS.from_pretrained("cpu")
38
+ print("✅ Model loaded successfully with direct CPU method")
39
+ except Exception as e1:
40
+ print(f"Direct CPU loading failed: {e1}")
41
+
42
+ # Strategy 2: Try with explicit map_location if supported
43
+ try:
44
+ MODEL = ChatterboxTTS.from_pretrained(DEVICE, map_location='cpu')
45
+ print("✅ Model loaded successfully with map_location method")
46
+ except Exception as e2:
47
+ print(f"map_location method failed: {e2}")
48
+
49
+ # Strategy 3: Load with default then move to CPU
50
+ try:
51
+ MODEL = ChatterboxTTS.from_pretrained()
52
+ if hasattr(MODEL, 'to'):
53
+ MODEL = MODEL.to('cpu')
54
+ print("✅ Model loaded successfully with default then CPU move")
55
+ except Exception as e3:
56
+ print(f"All loading strategies failed. Last error: {e3}")
57
+ raise e3
58
+
59
+ # Ensure model is on CPU
60
+ if hasattr(MODEL, 'to'):
61
+ MODEL = MODEL.to('cpu')
62
+ if hasattr(MODEL, 'device'):
63
+ print(f"Model device: {MODEL.device}")
64
+
65
+ print(f"Model loaded successfully on CPU")
66
+
67
  except Exception as e:
68
+ print(f"CRITICAL: All model loading attempts failed: {e}")
69
  raise
70
  return MODEL
71
 
 
78
  def set_seed(seed: int):
79
  """Sets the random seed for reproducibility across torch, numpy, and random."""
80
  torch.manual_seed(seed)
81
+ # Remove CUDA seed setting since we're on CPU only
 
 
82
  random.seed(seed)
83
  np.random.seed(seed)
84
 
 
193
 
194
  return sample_rate, np.concatenate(combined_audio)
195
 
196
+ @spaces.GPU # This decorator might not work on CPU, but keeping it for compatibility
197
  def generate_tts_audio_chunked(
198
  text_input: str,
199
  audio_prompt_path_input: str = None,
 
345
  outputs=[audio_output],
346
  )
347
 
348
+ demo.launch(share=True, mcp_server=True)