samwell commited on
Commit
a91472c
·
verified ·
1 Parent(s): 1116cdd

Upload train_medsiglip.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_medsiglip.py +70 -1
train_medsiglip.py CHANGED
@@ -424,6 +424,75 @@ def validate(model, loader, device):
424
  return total_loss / num_batches, total_iou / num_batches
425
 
426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
  def download_dataset(config):
428
  """Download and extract dataset"""
429
  config.data_dir.mkdir(parents=True, exist_ok=True)
@@ -431,7 +500,7 @@ def download_dataset(config):
431
 
432
  if not (config.data_dir / "train").exists():
433
  print(f"Downloading dataset from {config.data_url}...")
434
- urllib.request.urlretrieve(config.data_url, zip_path)
435
 
436
  print("Extracting...")
437
  with zipfile.ZipFile(zip_path, 'r') as z:
 
424
  return total_loss / num_batches, total_iou / num_batches
425
 
426
 
427
+ def download_with_retry(url, dest_path, max_retries=3):
428
+ """Download file with retry logic using subprocess for robustness"""
429
+ import subprocess
430
+ import shutil
431
+
432
+ # Try wget first (more robust for large files)
433
+ if shutil.which("wget"):
434
+ for attempt in range(max_retries):
435
+ try:
436
+ print(f"Download attempt {attempt + 1}/{max_retries} with wget...")
437
+ result = subprocess.run(
438
+ ["wget", "-c", "-O", str(dest_path), url],
439
+ check=True, capture_output=True, text=True
440
+ )
441
+ if dest_path.exists() and dest_path.stat().st_size > 0:
442
+ return True
443
+ except subprocess.CalledProcessError as e:
444
+ print(f"wget failed: {e}")
445
+ if attempt < max_retries - 1:
446
+ import time
447
+ time.sleep(5)
448
+
449
+ # Fallback to curl
450
+ if shutil.which("curl"):
451
+ for attempt in range(max_retries):
452
+ try:
453
+ print(f"Download attempt {attempt + 1}/{max_retries} with curl...")
454
+ result = subprocess.run(
455
+ ["curl", "-L", "-C", "-", "-o", str(dest_path), url],
456
+ check=True, capture_output=True, text=True
457
+ )
458
+ if dest_path.exists() and dest_path.stat().st_size > 0:
459
+ return True
460
+ except subprocess.CalledProcessError as e:
461
+ print(f"curl failed: {e}")
462
+ if attempt < max_retries - 1:
463
+ import time
464
+ time.sleep(5)
465
+
466
+ # Last resort: urllib with chunked download
467
+ print("Falling back to urllib chunked download...")
468
+ import urllib.request
469
+ for attempt in range(max_retries):
470
+ try:
471
+ with urllib.request.urlopen(url, timeout=300) as response:
472
+ total_size = int(response.headers.get('content-length', 0))
473
+ downloaded = 0
474
+ chunk_size = 8192 * 16 # 128KB chunks
475
+ with open(dest_path, 'wb') as f:
476
+ while True:
477
+ chunk = response.read(chunk_size)
478
+ if not chunk:
479
+ break
480
+ f.write(chunk)
481
+ downloaded += len(chunk)
482
+ if total_size > 0:
483
+ pct = (downloaded / total_size) * 100
484
+ print(f"\rDownloaded {downloaded / 1e6:.1f}/{total_size / 1e6:.1f} MB ({pct:.1f}%)", end="", flush=True)
485
+ print()
486
+ return True
487
+ except Exception as e:
488
+ print(f"urllib attempt {attempt + 1} failed: {e}")
489
+ if attempt < max_retries - 1:
490
+ import time
491
+ time.sleep(5)
492
+
493
+ raise Exception(f"Failed to download {url} after {max_retries} attempts")
494
+
495
+
496
  def download_dataset(config):
497
  """Download and extract dataset"""
498
  config.data_dir.mkdir(parents=True, exist_ok=True)
 
500
 
501
  if not (config.data_dir / "train").exists():
502
  print(f"Downloading dataset from {config.data_url}...")
503
+ download_with_retry(config.data_url, zip_path)
504
 
505
  print("Extracting...")
506
  with zipfile.ZipFile(zip_path, 'r') as z: