peoplepilot commited on
Commit
33cec68
·
1 Parent(s): b0890ab

use cpu instead of gpu for testing

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -42,9 +42,9 @@ EXAMPLES_DIR = "audio"
42
  EXAMPLE_FILE = os.path.join(EXAMPLES_DIR, "PromoterClipMono.wav")
43
 
44
  # Chunk processing settings
45
- DEFAULT_CHUNK_DURATION = 30 # seconds per chunk
46
- OVERLAP_DURATION = 2 # seconds of overlap between chunks
47
- MAX_DURATION_WITHOUT_CHUNKING = 60 # auto-chunk if longer than this
48
 
49
  # Global model cache
50
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -153,7 +153,7 @@ def save_audio(tensor, sample_rate):
153
  return tmp.name
154
 
155
 
156
- @spaces.GPU(duration=300)
157
  def separate_audio(model_name, file_path, text_prompt, chunk_duration=DEFAULT_CHUNK_DURATION, progress=gr.Progress()):
158
  global model, processor
159
 
 
42
  EXAMPLE_FILE = os.path.join(EXAMPLES_DIR, "PromoterClipMono.wav")
43
 
44
  # Chunk processing settings
45
+ DEFAULT_CHUNK_DURATION = 5 # seconds per chunk
46
+ OVERLAP_DURATION = 1 # seconds of overlap between chunks
47
+ MAX_DURATION_WITHOUT_CHUNKING = 10 # auto-chunk if longer than this
48
 
49
  # Global model cache
50
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
153
  return tmp.name
154
 
155
 
156
+ #@spaces.GPU(duration=300)
157
  def separate_audio(model_name, file_path, text_prompt, chunk_duration=DEFAULT_CHUNK_DURATION, progress=gr.Progress()):
158
  global model, processor
159