| |
| """ |
| Script to automatically download datasets from EMBL's BioImage Archive |
| and extract metadata information. |
| """ |
|
|
| import os |
| import re |
| import requests |
| import yaml |
| from bs4 import BeautifulSoup |
| from urllib.parse import urljoin, urlparse |
| import time |
| from pathlib import Path |
|
|
|
|
| class BioImageArchiveDownloader: |
| def __init__(self, base_data_folder=""): |
| self.base_data_folder = Path(base_data_folder) |
| self.base_data_folder.mkdir(exist_ok=True) |
| self.session = requests.Session() |
| self.session.headers.update({ |
| 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' |
| }) |
| |
| def get_next_dataset_number(self): |
| """Get the next available dataset number.""" |
| existing_folders = [f for f in self.base_data_folder.iterdir() |
| if f.is_dir() and f.name.startswith('dataset_')] |
| if not existing_folders: |
| return "001" |
| |
| numbers = [] |
| for folder in existing_folders: |
| match = re.match(r'dataset_(\d+)', folder.name) |
| if match: |
| numbers.append(int(match.group(1))) |
| |
| next_num = max(numbers) + 1 if numbers else 1 |
| return f"{next_num:03d}" |
| |
| def extract_accession_from_url(self, url): |
| """Extract dataset accession from URL.""" |
| patterns = [ |
| r'/galleries/(S-[A-Z]+[0-9]+)', |
| r'/pages/(S-[A-Z]+[0-9]+)', |
| ] |
| for p in patterns: |
| match = re.search(p, url) |
| if match: |
| return match.group(1) |
| return None |
| |
| def parse_dataset_page(self, url): |
| """Parse the dataset page and extract metadata.""" |
| print(f"Fetching dataset page: {url}") |
| response = self.session.get(url) |
| response.raise_for_status() |
| |
| soup = BeautifulSoup(response.text, 'html.parser') |
| |
| |
| metadata = { |
| 'source_url': url, |
| 'download_timestamp': time.strftime('%Y-%m-%d %H:%M:%S'), |
| } |
| |
| |
| large_preview_url = self._find_large_preview_image(soup, url) |
| if large_preview_url: |
| metadata['large_preview_url'] = large_preview_url |
| print(f"Found large preview image: {large_preview_url}") |
| else: |
| print("No large preview image found on the page") |
| |
| |
| title_elem = soup.find('h1') |
| if title_elem: |
| metadata['study_title'] = title_elem.get_text(strip=True) |
| |
| |
| study_info = {} |
| |
| |
| organism_elem = soup.find(string=re.compile(r'Organism', re.I)) |
| if organism_elem: |
| organism_value = organism_elem.find_next('div') |
| if organism_value: |
| study_info['organism'] = organism_value.get_text(strip=True) |
| |
| |
| imaging_elem = soup.find(string=re.compile(r'Imaging type', re.I)) |
| if imaging_elem: |
| imaging_value = imaging_elem.find_next('div') |
| if imaging_value: |
| study_info['imaging_type'] = imaging_value.get_text(strip=True) |
| |
| |
| license_elem = soup.find(string=re.compile(r'License', re.I)) |
| if license_elem: |
| license_value = license_elem.find_next('div') |
| if license_value: |
| study_info['license'] = license_value.get_text(strip=True) |
| |
| |
| author_elem = soup.find(string=re.compile(r'By|Author', re.I)) |
| if author_elem: |
| author_value = author_elem.find_next('div') |
| if author_value: |
| study_info['author'] = author_value.get_text(strip=True) |
| |
| |
| release_elem = soup.find(string=re.compile(r'Released', re.I)) |
| if release_elem: |
| release_value = release_elem.find_next('div') |
| if release_value: |
| study_info['release_date'] = release_value.get_text(strip=True) |
| |
| metadata['study_info'] = study_info |
| |
| |
| content_info = {} |
| content_elem = soup.find(string=re.compile(r'Content', re.I)) |
| if content_elem: |
| content_text = content_elem.get_text(strip=True) |
| |
| images_match = re.search(r'(\d+)\s+images?', content_text) |
| if images_match: |
| content_info['total_images'] = int(images_match.group(1)) |
| |
| |
| files_match = re.search(r'(\d+)\s+other\s+files?', content_text) |
| if files_match: |
| content_info['other_files'] = int(files_match.group(1)) |
| |
| metadata['content_info'] = content_info |
| |
| |
| images = [] |
| tables = soup.find_all('table') |
| |
| |
| if len(tables) > 0: |
| viewable_table = tables[0] |
| print("Parsing viewable images table (with previews)...") |
| rows = viewable_table.find_all('tr')[1:] |
| for row in rows: |
| cells = row.find_all('td') |
| if len(cells) >= 6: |
| image_info = self._parse_image_row(cells, url, has_preview=True) |
| if image_info: |
| images.append(image_info) |
| |
| |
| if len(tables) > 1: |
| all_images_table = tables[1] |
| print("Parsing all images table (complete list)...") |
| rows = all_images_table.find_all('tr')[1:] |
| print(f"Found {len(rows)} rows in all images table") |
| |
| if len(rows) == 0: |
| print("All images table appears to be empty (likely loaded dynamically)") |
| print("Only images with previews are available for download") |
| else: |
| for row in rows: |
| cells = row.find_all('td') |
| if len(cells) >= 4: |
| image_id_text = cells[0].get_text(strip=True) |
| |
| |
| existing_image = next((img for img in images if img.get('image_id') == image_id_text), None) |
| |
| if not existing_image: |
| |
| image_info = self._parse_image_row(cells, url, has_preview=False) |
| if image_info: |
| images.append(image_info) |
| else: |
| |
| print(f"Image {image_id_text} already exists with preview, skipping from all images table") |
| |
| |
| images.sort(key=lambda x: int(x.get('image_id', '0').replace('IM', '')) if x.get('image_id', '').replace('IM', '').isdigit() else 999) |
| |
| metadata['images'] = images |
| print(f"Found {len(images)} images in the dataset") |
| |
| return metadata |
| |
| def _parse_image_row(self, cells, url, has_preview=False): |
| """Parse a table row to extract image information.""" |
| image_info = {} |
| |
| |
| image_id_text = cells[0].get_text(strip=True) |
| if image_id_text: |
| image_info['image_id'] = image_id_text |
| |
| |
| filename_col = 2 if has_preview else 1 |
| if len(cells) > filename_col: |
| filename_text = cells[filename_col].get_text(strip=True) |
| if filename_text: |
| image_info['filename'] = filename_text |
| |
| |
| if has_preview and len(cells) > 3: |
| dimensions_text = cells[3].get_text(strip=True) |
| if dimensions_text and dimensions_text != 'Unavailable': |
| |
| dims_match = re.search(r'\(([^)]+)\)', dimensions_text) |
| if dims_match: |
| dims = [int(x.strip()) for x in dims_match.group(1).split(',')] |
| image_info['dimensions'] = { |
| 'T': dims[0] if len(dims) > 0 else 1, |
| 'C': dims[1] if len(dims) > 1 else 1, |
| 'Z': dims[2] if len(dims) > 2 else 1, |
| 'Y': dims[3] if len(dims) > 3 else 1, |
| 'X': dims[4] if len(dims) > 4 else 1 |
| } |
| |
| |
| actions_col = 5 if has_preview else 3 |
| if len(cells) > actions_col: |
| actions_cell = cells[actions_col] |
| download_links = actions_cell.find_all('a', href=True) |
| for link in download_links: |
| href = link.get('href') |
| if href and ('download' in href.lower() or 'files' in href.lower()): |
| image_info['download_url'] = href |
| break |
| |
| |
| if has_preview and len(cells) > 1: |
| preview_cell = cells[1] |
| preview_img = preview_cell.find('img') |
| if preview_img: |
| preview_src = preview_img.get('src') |
| if preview_src: |
| |
| if preview_src.startswith('/'): |
| image_info['preview_url'] = 'https://www.ebi.ac.uk' + preview_src |
| else: |
| image_info['preview_url'] = urljoin(url, preview_src) |
| |
| return image_info if image_info else None |
| |
| def _find_large_preview_image(self, soup, base_url): |
| """Find the larger representative preview image on the page.""" |
| img_tags = soup.find_all('img') |
| print(f"Found {len(img_tags)} images on the page") |
| |
| |
| for img in img_tags: |
| src = img.get('src', '') |
| if not src: |
| continue |
| |
| |
| if src.startswith('/'): |
| full_url = 'https://www.ebi.ac.uk' + src |
| else: |
| full_url = urljoin(base_url, src) |
| |
| |
| |
| if any(pattern in src.lower() for pattern in [ |
| 'representative', 'overview', 'sample' |
| ]): |
| |
| if any(size in src for size in ['512', '1024', '2048', 'large', 'big']): |
| print(f"Found representative image: {src}") |
| return full_url |
| |
| |
| |
| large_images = [] |
| for img in img_tags: |
| src = img.get('src', '') |
| if not src: |
| continue |
| |
| |
| if src.startswith('/'): |
| full_url = 'https://www.ebi.ac.uk' + src |
| else: |
| full_url = urljoin(base_url, src) |
| |
| |
| if any(size in src for size in ['512', '1024', '2048']) and 'thumb' not in src.lower(): |
| print(f"Found potential large image: {src}") |
| large_images.append(full_url) |
| |
| |
| if large_images: |
| return large_images[0] |
| |
| |
| |
| for img in img_tags: |
| src = img.get('src', '') |
| if not src: |
| continue |
| |
| |
| if src.startswith('/'): |
| full_url = 'https://www.ebi.ac.uk' + src |
| else: |
| full_url = urljoin(base_url, src) |
| |
| |
| if 'thumb' in src.lower() or '128' in src: |
| continue |
| |
| |
| if any(pattern in src.lower() for pattern in [ |
| 'preview', 'view', 'display', 'show' |
| ]): |
| print(f"Found potential preview image: {src}") |
| return full_url |
| |
| |
| |
| dimension_images = [] |
| for img in img_tags: |
| src = img.get('src', '') |
| if not src: |
| continue |
| |
| |
| if src.startswith('/'): |
| full_url = 'https://www.ebi.ac.uk' + src |
| else: |
| full_url = urljoin(base_url, src) |
| |
| |
| import re |
| dim_match = re.search(r'(\d+)[x\-](\d+)', src) |
| if dim_match: |
| width, height = int(dim_match.group(1)), int(dim_match.group(2)) |
| if width >= 256 and height >= 256: |
| dimension_images.append((full_url, width * height)) |
| print(f"Found dimensioned image: {src} ({width}x{height})") |
| |
| |
| if dimension_images: |
| largest = max(dimension_images, key=lambda x: x[1]) |
| print(f"Selected largest image: {largest[0]} (area: {largest[1]})") |
| return largest[0] |
| |
| print("No suitable large preview image found") |
| return None |
| |
| def _find_best_individual_preview(self, images_to_download): |
| """Find the best individual preview image from the available images.""" |
| if not images_to_download: |
| return None |
| |
| |
| preview_candidates = [] |
| for image_info in images_to_download: |
| if 'preview_url' in image_info and image_info['preview_url']: |
| preview_url = image_info['preview_url'] |
| |
| |
| |
| import re |
| dim_match = re.search(r'(\d+)[x\-](\d+)', preview_url) |
| if dim_match: |
| width, height = int(dim_match.group(1)), int(dim_match.group(2)) |
| area = width * height |
| preview_candidates.append((preview_url, area, image_info.get('image_id', 'Unknown'))) |
| print(f"Found preview candidate: {image_info.get('image_id', 'Unknown')} - {width}x{height} (area: {area})") |
| else: |
| |
| preview_candidates.append((preview_url, 128*128, image_info.get('image_id', 'Unknown'))) |
| print(f"Found preview candidate: {image_info.get('image_id', 'Unknown')} - standard thumbnail") |
| |
| if not preview_candidates: |
| return None |
| |
| |
| preview_candidates.sort(key=lambda x: x[1], reverse=True) |
| best_url, best_area, best_id = preview_candidates[0] |
| print(f"Selected best individual preview: {best_id} (area: {best_area})") |
| return best_url |
| |
| def download_image(self, image_url, local_path): |
| """Download a single image.""" |
| print(f"Downloading: {image_url}") |
| response = self.session.get(image_url, stream=True) |
| response.raise_for_status() |
| |
| with open(local_path, 'wb') as f: |
| for chunk in response.iter_content(chunk_size=8192): |
| f.write(chunk) |
| |
| print(f"Downloaded: {local_path}") |
| return local_path |
| |
| def download_dataset(self, dataset_url, image_id=None, download_files=True): |
| """Download a complete dataset from BioImage Archive.""" |
| |
| dataset_num = self.get_next_dataset_number() |
| dataset_folder = self.base_data_folder / f"dataset_{dataset_num}" |
| dataset_folder.mkdir(exist_ok=True) |
| |
| print(f"Creating dataset folder: {dataset_folder}") |
| |
| |
| accession = self.extract_accession_from_url(dataset_url) |
| if not accession: |
| raise ValueError("Could not extract accession from URL") |
| |
| |
| metadata = self.parse_dataset_page(dataset_url) |
| metadata['accession'] = accession |
| metadata['dataset_number'] = dataset_num |
| metadata['download_files'] = download_files |
| |
| |
| downloaded_files = [] |
| images_to_download = metadata['images'] |
| |
| |
| if image_id: |
| images_to_download = [img for img in images_to_download if img.get('image_id') == image_id] |
| if not images_to_download: |
| available_ids = [img.get('image_id') for img in metadata['images']] |
| print(f"Available Image IDs: {available_ids}") |
| print("Note: Only images with preview thumbnails are available for download.") |
| print("The full list of 1170 images is loaded dynamically and not accessible via simple HTTP requests.") |
| raise ValueError(f"Image ID '{image_id}' not found. Available IDs: {available_ids}") |
| print(f"Filtering to Image ID: {image_id}") |
| else: |
| |
| images_to_download = images_to_download[:1] |
| print("No Image ID specified, downloading first image") |
| |
| |
| preview_files = [] |
| preview_downloaded = False |
| |
| if 'large_preview_url' in metadata and metadata['large_preview_url']: |
| |
| large_preview_filename = "dataset_preview.png" |
| large_preview_path = dataset_folder / large_preview_filename |
| |
| try: |
| print(f"Downloading large representative preview image...") |
| self.download_image(metadata['large_preview_url'], large_preview_path) |
| preview_files.append({ |
| 'filename': large_preview_filename, |
| 'local_path': str(large_preview_path), |
| 'image_id': 'representative', |
| 'preview_url': metadata['large_preview_url'], |
| 'type': 'large_representative' |
| }) |
| print(f"Successfully downloaded large preview: {large_preview_filename}") |
| preview_downloaded = True |
| except Exception as e: |
| print(f"Failed to download large preview image: {e}") |
| |
| |
| if not preview_downloaded: |
| best_preview_url = self._find_best_individual_preview(images_to_download) |
| if best_preview_url: |
| preview_filename = "dataset_preview.png" |
| preview_path = dataset_folder / preview_filename |
| |
| try: |
| print(f"Downloading best available individual preview image...") |
| self.download_image(best_preview_url, preview_path) |
| preview_files.append({ |
| 'filename': preview_filename, |
| 'local_path': str(preview_path), |
| 'image_id': 'best_available', |
| 'preview_url': best_preview_url, |
| 'type': 'best_individual' |
| }) |
| print(f"Successfully downloaded best preview: {preview_filename}") |
| preview_downloaded = True |
| except Exception as e: |
| print(f"Failed to download best individual preview: {e}") |
| |
| if not preview_downloaded: |
| print("No suitable preview image found, skipping preview download") |
| |
| if download_files: |
| print("Downloading image files...") |
| for i, image_info in enumerate(images_to_download): |
| if 'download_url' in image_info: |
| filename = image_info['filename'] |
| local_path = dataset_folder / filename |
| |
| |
| local_path.parent.mkdir(parents=True, exist_ok=True) |
| |
| try: |
| self.download_image(image_info['download_url'], local_path) |
| downloaded_files.append({ |
| 'filename': filename, |
| 'local_path': str(local_path), |
| 'image_info': image_info |
| }) |
| except Exception as e: |
| print(f"Failed to download {filename}: {e}") |
| else: |
| print("Skipping file downloads - metadata only mode") |
| |
| for i, image_info in enumerate(images_to_download): |
| if 'download_url' in image_info: |
| downloaded_files.append({ |
| 'filename': image_info['filename'], |
| 'local_path': 'not_downloaded', |
| 'image_info': image_info |
| }) |
| |
| metadata['downloaded_files'] = downloaded_files |
| metadata['preview_files'] = preview_files |
| |
| |
| metadata_file = dataset_folder / f"dataset_{dataset_num}.yaml" |
| with open(metadata_file, 'w', encoding='utf-8') as f: |
| yaml.dump(metadata, f, default_flow_style=False, allow_unicode=True) |
| |
| print(f"Metadata saved to: {metadata_file}") |
| print(f"Dataset {dataset_num} completed successfully!") |
| |
| return dataset_folder, metadata_file |
| |
| def list_available_images(self, dataset_url): |
| """List all available image IDs for a dataset.""" |
| print(f"Fetching available images from: {dataset_url}") |
| metadata = self.parse_dataset_page(dataset_url) |
| |
| print(f"\nAvailable Image IDs for {metadata.get('accession', 'Unknown')}:") |
| print("-" * 50) |
| |
| for i, image in enumerate(metadata['images'], 1): |
| image_id = image.get('image_id', 'Unknown') |
| filename = image.get('filename', 'Unknown') |
| dimensions = image.get('dimensions', {}) |
| |
| print(f"{i:2d}. {image_id:8s} - {filename}") |
| if dimensions: |
| dims_str = f"({dimensions.get('T', 1)}, {dimensions.get('C', 1)}, {dimensions.get('Z', 1)}, {dimensions.get('Y', 1)}, {dimensions.get('X', 1)})" |
| print(f" Dimensions: {dims_str}") |
| |
| return [img.get('image_id') for img in metadata['images']] |
| |
| def anonymize_dataset(self, dataset_folder, metadata_file): |
| """Anonymize the dataset by renaming folders and files to generic names within the same folder.""" |
| print(f"Anonymizing dataset: {dataset_folder}") |
| |
| |
| dataset_num = dataset_folder.name.split('_')[1] |
| |
| |
| anonymized_files = [] |
| |
| |
| for item in dataset_folder.rglob('*'): |
| if item.is_file() and item.name != f"dataset_{dataset_num}.yaml": |
| |
| rel_path = item.relative_to(dataset_folder) |
| |
| |
| if rel_path.parent == Path('.'): |
| |
| anonymized_filename = f"dataset_{dataset_num}{item.suffix}" |
| else: |
| |
| anonymized_filename = f"dataset_{dataset_num}{item.suffix}" |
| |
| |
| anonymized_path = dataset_folder / anonymized_filename |
| |
| |
| item.rename(anonymized_path) |
| |
| anonymized_files.append({ |
| 'original_path': str(rel_path), |
| 'anonymized_path': str(anonymized_path.relative_to(self.base_data_folder)), |
| 'anonymized_filename': anonymized_filename |
| }) |
| |
| print(f"Renamed: {rel_path} -> {anonymized_filename}") |
| |
| |
| for item in dataset_folder.rglob('*'): |
| if item.is_dir() and item != dataset_folder: |
| |
| try: |
| if not any(item.iterdir()): |
| item.rmdir() |
| print(f"Removed empty folder: {item.relative_to(dataset_folder)}") |
| except OSError: |
| |
| pass |
| |
| |
| with open(metadata_file, 'r', encoding='utf-8') as f: |
| metadata = yaml.safe_load(f) |
| |
| metadata['anonymized'] = True |
| metadata['anonymized_files'] = anonymized_files |
| metadata['original_dataset_folder'] = str(dataset_folder.relative_to(self.base_data_folder)) |
| metadata['anonymized_dataset_folder'] = str(dataset_folder.relative_to(self.base_data_folder)) |
| |
| |
| with open(metadata_file, 'w', encoding='utf-8') as f: |
| yaml.dump(metadata, f, default_flow_style=False, allow_unicode=True) |
| |
| print(f"Anonymized dataset: {dataset_folder}") |
| print(f"Anonymized metadata: {metadata_file}") |
| |
| return dataset_folder, metadata_file |
|
|
|
|
| def main(): |
| """Main function to test the downloader.""" |
| downloader = BioImageArchiveDownloader() |
| |
| |
| test_url_1 = "https://www.ebi.ac.uk/bioimage-archive/galleries/S-BIAD7.html" |
| |
| test_url_2 = "https://uk1s3.embassy.ebi.ac.uk/bia-integrator-data/pages/S-BIAD573.html" |
| |
| try: |
| |
| dataset_folder, metadata_file = downloader.download_dataset( |
| test_url_1, |
| image_id="IM1", |
| download_files=True |
| ) |
| print(f"\nSuccess! Dataset processed: {dataset_folder}") |
| print(f"Metadata saved to: {metadata_file}") |
| |
| |
| print("\n" + "="*50) |
| print("ANONYMIZING DATASET") |
| print("="*50) |
| anonymized_folder, anonymized_metadata = downloader.anonymize_dataset(dataset_folder, metadata_file) |
| print(f"\nAnonymized dataset: {anonymized_folder}") |
| print(f"Anonymized metadata: {anonymized_metadata}") |
|
|
| except Exception as e: |
| print(f"Error processing dataset 1: {e}") |
| |
| |
| try: |
| dataset_folder, metadata_file = downloader.download_dataset( |
| test_url_2, |
| image_id="IM1", |
| download_files=True |
| ) |
| print(f"\nSuccess! Dataset processed: {dataset_folder}") |
| print(f"Metadata saved to: {metadata_file}") |
| |
| |
| print("\n" + "="*50) |
| print("ANONYMIZING DATASET") |
| print("="*50) |
| anonymized_folder, anonymized_metadata = downloader.anonymize_dataset(dataset_folder, metadata_file) |
| print(f"\nAnonymized dataset: {anonymized_folder}") |
| print(f"Anonymized metadata: {anonymized_metadata}") |
| |
| except Exception as e: |
| print(f"Error processing dataset 2: {e}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|