raylim commited on
Commit
658b7b2
·
unverified ·
1 Parent(s): c6bd865

Drastically reduce chunk sizes for ZeroGPU reliability

Browse files

- CTransPath: 15,000 b†’ 2,000 tiles per chunk (87% reduction)
- Optimus: 10,000 → 1,500 tiles per chunk (85% reduction)
- Even 1881 tiles caused token expiry, so being very conservative
- Each chunk now takes ~7-10s processing, leaves huge buffer
- Should prevent any token expiry issues

Files changed (1) hide show
  1. src/mosaic/analysis.py +5 -4
src/mosaic/analysis.py CHANGED
@@ -81,8 +81,8 @@ def _extract_ctranspath_features(coords, slide_path, attrs, num_workers):
81
  if IS_ZEROGPU:
82
  num_workers = 0
83
  logger.info("Running CTransPath on ZeroGPU: setting num_workers=0")
84
- # Split into chunks to stay within GPU time limits
85
- chunk_size = 15000
86
  total_tiles = len(coords)
87
  logger.info(f"Processing {total_tiles} tiles in chunks of {chunk_size}")
88
  else:
@@ -176,8 +176,9 @@ def _extract_optimus_features(filtered_coords, slide_path, attrs, num_workers):
176
  if IS_ZEROGPU:
177
  num_workers = 0
178
  logger.info("Running Optimus on ZeroGPU: setting num_workers=0")
179
- # Split into chunks to stay within GPU time limits
180
- chunk_size = 10000
 
181
  total_tiles = len(filtered_coords)
182
  logger.info(f"Processing {total_tiles} tiles in chunks of {chunk_size}")
183
  else:
 
81
  if IS_ZEROGPU:
82
  num_workers = 0
83
  logger.info("Running CTransPath on ZeroGPU: setting num_workers=0")
84
+ # Split into smaller chunks to stay within GPU time limits
85
+ chunk_size = 2000
86
  total_tiles = len(coords)
87
  logger.info(f"Processing {total_tiles} tiles in chunks of {chunk_size}")
88
  else:
 
176
  if IS_ZEROGPU:
177
  num_workers = 0
178
  logger.info("Running Optimus on ZeroGPU: setting num_workers=0")
179
+ # Split into very small chunks to stay within GPU time limits
180
+ # Even 1881 tiles caused expiry, so use 1500 tiles per chunk
181
+ chunk_size = 1500
182
  total_tiles = len(filtered_coords)
183
  logger.info(f"Processing {total_tiles} tiles in chunks of {chunk_size}")
184
  else: