Spaces:
Sleeping
Sleeping
alrichardbollans
commited on
Commit
·
ef8459c
1
Parent(s):
3e5ea4d
Limit processing
Browse files
python_utils/running_model.py
CHANGED
|
@@ -121,7 +121,8 @@ def run_predictions(files):
|
|
| 121 |
results = []
|
| 122 |
|
| 123 |
## When using GPU, run single instance
|
| 124 |
-
if
|
|
|
|
| 125 |
for file in files:
|
| 126 |
# Run prediction with original BGR image
|
| 127 |
predictor_ = load_model()
|
|
@@ -129,14 +130,15 @@ def run_predictions(files):
|
|
| 129 |
|
| 130 |
results.append(prediction_output)
|
| 131 |
else:
|
| 132 |
-
## Else use multiprocessing to run in parallel
|
| 133 |
-
|
|
|
|
| 134 |
# Set up to load one model per worker process
|
| 135 |
def init_worker():
|
| 136 |
global predictor
|
| 137 |
predictor = load_model() # Load once per worker process
|
| 138 |
|
| 139 |
-
with multiprocessing.Pool(initializer=init_worker) as pool:
|
| 140 |
results = pool.map(process_file, files)
|
| 141 |
return results
|
| 142 |
|
|
|
|
| 121 |
results = []
|
| 122 |
|
| 123 |
## When using GPU, run single instance
|
| 124 |
+
## Or if not checking many files, as loading multiple models isn't worthwhile
|
| 125 |
+
if torch.cuda.is_available() or len(files) < 4:
|
| 126 |
for file in files:
|
| 127 |
# Run prediction with original BGR image
|
| 128 |
predictor_ = load_model()
|
|
|
|
| 130 |
|
| 131 |
results.append(prediction_output)
|
| 132 |
else:
|
| 133 |
+
## Else use multiprocessing to run in parallel with 2 processes
|
| 134 |
+
|
| 135 |
+
print(f'Using {multiprocessing.cpu_count()} cpus apparently')
|
| 136 |
# Set up to load one model per worker process
|
| 137 |
def init_worker():
|
| 138 |
global predictor
|
| 139 |
predictor = load_model() # Load once per worker process
|
| 140 |
|
| 141 |
+
with multiprocessing.Pool(initializer=init_worker, processes=2) as pool:
|
| 142 |
results = pool.map(process_file, files)
|
| 143 |
return results
|
| 144 |
|