Spaces:
Running
on
Zero
Running
on
Zero
Update uvr5/multiprocess_cuda_infer.py
Browse files
uvr5/multiprocess_cuda_infer.py
CHANGED
|
@@ -222,9 +222,9 @@ class Inference():
|
|
| 222 |
return x.reshape([-1,2,self.chunk_size])
|
| 223 |
|
| 224 |
|
| 225 |
-
def load_model(self, model_path, threads):
|
| 226 |
model = onnx.load_model(model_path)
|
| 227 |
-
if torch.cuda.is_available():
|
| 228 |
providers = [("CUDAExecutionProvider", {"device_id": torch.cuda.current_device(),
|
| 229 |
"user_compute_stream": str(torch.cuda.current_stream().cuda_stream)})]
|
| 230 |
else:
|
|
|
|
| 222 |
return x.reshape([-1,2,self.chunk_size])
|
| 223 |
|
| 224 |
|
| 225 |
+
def load_model(self, model_path, threads, device='cpu'):
|
| 226 |
model = onnx.load_model(model_path)
|
| 227 |
+
if torch.cuda.is_available() and device != 'cpu':
|
| 228 |
providers = [("CUDAExecutionProvider", {"device_id": torch.cuda.current_device(),
|
| 229 |
"user_compute_stream": str(torch.cuda.current_stream().cuda_stream)})]
|
| 230 |
else:
|