Datasets:
Tasks:
Image Segmentation
Modalities:
Image
Languages:
English
Tags:
Cloud Detection
Cloud Segmentation
Remote Sensing Images
Satellite Images
HRC-WHU
CloudSEN12-High
License:
Commit
·
adca92d
verified
·
0
Parent(s):
Duplicate from XavierJiezou/cloud-adapter-datasets
Browse filesCo-authored-by: XavierJiezou <XavierJiezou@users.noreply.huggingface.co>
- .gitattributes +58 -0
- README.md +163 -0
- cloudsen12_high_l1c.zip +3 -0
- cloudsen12_high_l2a.zip +3 -0
- gf12ms_whu_gf1.zip +3 -0
- gf12ms_whu_gf2.zip +3 -0
- give_colors_to_mask.py +102 -0
- hrc_whu.zip +3 -0
- l8_biome.zip +3 -0
- upload_zip_to_hub.py +69 -0
.gitattributes
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
# Audio files - uncompressed
|
| 38 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
# Audio files - compressed
|
| 42 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
# Image files - uncompressed
|
| 48 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
# Image files - compressed
|
| 53 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
# Video files - compressed
|
| 57 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-nc-4.0
|
| 3 |
+
task_categories:
|
| 4 |
+
- image-segmentation
|
| 5 |
+
language:
|
| 6 |
+
- en
|
| 7 |
+
tags:
|
| 8 |
+
- Cloud Detection
|
| 9 |
+
- Cloud Segmentation
|
| 10 |
+
- Remote Sensing Images
|
| 11 |
+
- Satellite Images
|
| 12 |
+
- HRC-WHU
|
| 13 |
+
- CloudSEN12-High
|
| 14 |
+
- GF12MS-WHU
|
| 15 |
+
- L8-Biome
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
# Cloud-Adapter-Datasets
|
| 19 |
+
|
| 20 |
+
This dataset card aims to describe the datasets used in the [Cloud-Adapter](https://github.com/XavierJiezou/cloud-adapter), a collection of high-resolution satellite images and semantic segmentation masks for cloud detection and related tasks.
|
| 21 |
+
|
| 22 |
+
## Install
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
pip install huggingface-hub
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
## Usage
|
| 29 |
+
|
| 30 |
+
```bash
|
| 31 |
+
# Step 1: Download datasets
|
| 32 |
+
huggingface-cli download --repo-type dataset XavierJiezou/cloud-adapter-datasets --local-dir data --include hrc_whu.zip
|
| 33 |
+
huggingface-cli download --repo-type dataset XavierJiezou/cloud-adapter-datasets --local-dir data --include gf12ms_whu_gf1.zip
|
| 34 |
+
huggingface-cli download --repo-type dataset XavierJiezou/cloud-adapter-datasets --local-dir data --include gf12ms_whu_gf2.zip
|
| 35 |
+
huggingface-cli download --repo-type dataset XavierJiezou/cloud-adapter-datasets --local-dir data --include cloudsen12_high_l1c.zip
|
| 36 |
+
huggingface-cli download --repo-type dataset XavierJiezou/cloud-adapter-datasets --local-dir data --include cloudsen12_high_l2a.zip
|
| 37 |
+
huggingface-cli download --repo-type dataset XavierJiezou/cloud-adapter-datasets --local-dir data --include l8_biome.zip
|
| 38 |
+
|
| 39 |
+
# Step 2: Extract datasets
|
| 40 |
+
unzip hrc_whu.zip -d hrc_whu
|
| 41 |
+
unzip gf12ms_whu_gf1.zip -d gf12ms_whu_gf1
|
| 42 |
+
unzip gf12ms_whu_gf2.zip -d gf12ms_whu_gf2
|
| 43 |
+
unzip cloudsen12_high_l1c.zip -d cloudsen12_high_l1c
|
| 44 |
+
unzip cloudsen12_high_l2a.zip -d cloudsen12_high_l2a
|
| 45 |
+
unzip l8_biome.zip -d l8_biome
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
## Example
|
| 49 |
+
|
| 50 |
+
```python
|
| 51 |
+
import os
|
| 52 |
+
import zipfile
|
| 53 |
+
from huggingface_hub import hf_hub_download
|
| 54 |
+
|
| 55 |
+
# Define the dataset repository
|
| 56 |
+
repo_id = "XavierJiezou/Cloud-Adapter"
|
| 57 |
+
# Select the zip file of the dataset to download
|
| 58 |
+
zip_files = [
|
| 59 |
+
"hrc_whu.zip",
|
| 60 |
+
# "gf12ms_whu_gf1.zip",
|
| 61 |
+
# "gf12ms_whu_gf2.zip",
|
| 62 |
+
# "cloudsen12_high_l1c.zip",
|
| 63 |
+
# "cloudsen12_high_l2a.zip",
|
| 64 |
+
# "l8_biome.zip",
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Define a directory to extract the datasets
|
| 68 |
+
output_dir = "cloud_adapter_paper_data"
|
| 69 |
+
|
| 70 |
+
# Ensure the output directory exists
|
| 71 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 72 |
+
|
| 73 |
+
# Step 1: Download and extract each ZIP file
|
| 74 |
+
for zip_file in zip_files:
|
| 75 |
+
print(f"Downloading {zip_file}...")
|
| 76 |
+
# Download the ZIP file from Hugging Face Hub
|
| 77 |
+
zip_path = hf_hub_download(repo_id=repo_id, filename=zip_file, repo_type="dataset")
|
| 78 |
+
|
| 79 |
+
# Extract the ZIP file
|
| 80 |
+
extract_path = os.path.join(output_dir, zip_file.replace(".zip", ""))
|
| 81 |
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
| 82 |
+
print(f"Extracting {zip_file} to {extract_path}...")
|
| 83 |
+
zip_ref.extractall(extract_path)
|
| 84 |
+
|
| 85 |
+
# Step 2: Explore the extracted datasets
|
| 86 |
+
# Example: Load and display the contents of the "hrc_whu" dataset
|
| 87 |
+
dataset_path = os.path.join(output_dir, "hrc_whu")
|
| 88 |
+
train_images_path = os.path.join(dataset_path, "img_dir", "train")
|
| 89 |
+
train_annotations_path = os.path.join(dataset_path, "ann_dir", "train")
|
| 90 |
+
|
| 91 |
+
# Display some files in the training set
|
| 92 |
+
print("Training Images:", os.listdir(train_images_path)[:5])
|
| 93 |
+
print("Training Annotations:", os.listdir(train_annotations_path)[:5])
|
| 94 |
+
|
| 95 |
+
# Example: Load and display an image and its annotation
|
| 96 |
+
from PIL import Image
|
| 97 |
+
|
| 98 |
+
# Load an example image and annotation
|
| 99 |
+
image_path = os.path.join(train_images_path, os.listdir(train_images_path)[0])
|
| 100 |
+
annotation_path = os.path.join(train_annotations_path, os.listdir(train_annotations_path)[0])
|
| 101 |
+
|
| 102 |
+
# Open and display the image
|
| 103 |
+
image = Image.open(image_path)
|
| 104 |
+
annotation = Image.open(annotation_path)
|
| 105 |
+
|
| 106 |
+
print("Displaying the image...")
|
| 107 |
+
image.show()
|
| 108 |
+
|
| 109 |
+
print("Displaying the annotation...")
|
| 110 |
+
annotation.show()
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
## Source Data
|
| 114 |
+
|
| 115 |
+
- hrc_whu: https://github.com/dr-lizhiwei/HRC_WHU
|
| 116 |
+
- gf12ms_whu: https://github.com/whu-ZSC/GF1-GF2MS-WHU
|
| 117 |
+
- cloudsen12_high: https://huggingface.co/datasets/csaybar/CloudSEN12-high
|
| 118 |
+
- l8_biome: https://landsat.usgs.gov/landsat-8-cloud-cover-assessment-validation-data
|
| 119 |
+
|
| 120 |
+
## Citation
|
| 121 |
+
|
| 122 |
+
```bib
|
| 123 |
+
@article{hrc_whu,
|
| 124 |
+
title = {Deep learning based cloud detection for medium and high resolution remote sensing images of different sensors},
|
| 125 |
+
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
|
| 126 |
+
volume = {150},
|
| 127 |
+
pages = {197-212},
|
| 128 |
+
year = {2019},
|
| 129 |
+
author = {Zhiwei Li and Huanfeng Shen and Qing Cheng and Yuhao Liu and Shucheng You and Zongyi He},
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
@article{gf12ms_whu,
|
| 133 |
+
author={Zhu, Shaocong and Li, Zhiwei and Shen, Huanfeng},
|
| 134 |
+
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
| 135 |
+
title={Transferring Deep Models for Cloud Detection in Multisensor Images via Weakly Supervised Learning},
|
| 136 |
+
year={2024},
|
| 137 |
+
volume={62},
|
| 138 |
+
pages={1-18},
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
@article{cloudsen12_high,
|
| 142 |
+
title={CloudSEN12, a global dataset for semantic understanding of cloud and cloud shadow in Sentinel-2},
|
| 143 |
+
author={Aybar, Cesar and Ysuhuaylas, Luis and Loja, Jhomira and Gonzales, Karen and Herrera, Fernando and Bautista, Lesly and Yali, Roy and Flores, Angie and Diaz, Lissette and Cuenca, Nicole and others},
|
| 144 |
+
journal={Scientific data},
|
| 145 |
+
volume={9},
|
| 146 |
+
number={1},
|
| 147 |
+
pages={782},
|
| 148 |
+
year={2022},
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
@article{l8_biome,
|
| 152 |
+
title = {Cloud detection algorithm comparison and validation for operational Landsat data products},
|
| 153 |
+
journal = {Remote Sensing of Environment},
|
| 154 |
+
volume = {194},
|
| 155 |
+
pages = {379-390},
|
| 156 |
+
year = {2017},
|
| 157 |
+
author = {Steve Foga and Pat L. Scaramuzza and Song Guo and Zhe Zhu and Ronald D. Dilley and Tim Beckmann and Gail L. Schmidt and John L. Dwyer and M. {Joseph Hughes} and Brady Laue}
|
| 158 |
+
}
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
## Contact
|
| 162 |
+
|
| 163 |
+
For questions, please contact Xavier Jiezou at xuechaozou (at) foxmail (dot) com.
|
cloudsen12_high_l1c.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a019db9779eda7080b60f2220c696747dbc26c469896e0fc4865d22b72c248de
|
| 3 |
+
size 3360887512
|
cloudsen12_high_l2a.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8428b9b8cafa34f7fb12e7980700c256a6e21ae1037131c0e9173e73bccad7c5
|
| 3 |
+
size 3571709018
|
gf12ms_whu_gf1.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e791926f22a71a093c51513f134faa6c760b34c3f330c7822af5f2447bc6ce28
|
| 3 |
+
size 942211975
|
gf12ms_whu_gf2.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a7859aeb2d2ce2fd6605ecf286a0a865147fc791d80db8304ce0303d0cb135c4
|
| 3 |
+
size 1744108191
|
give_colors_to_mask.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 6 |
+
|
| 7 |
+
# Define the function to retrieve the color palette for a given dataset
|
| 8 |
+
def get_palette(dataset_name: str):
|
| 9 |
+
if dataset_name in ["cloudsen12_high_l1c", "cloudsen12_high_l2a"]:
|
| 10 |
+
return [79, 253, 199, 77, 2, 115, 251, 255, 41, 221, 53, 223]
|
| 11 |
+
if dataset_name == "l8_biome":
|
| 12 |
+
return [79, 253, 199, 221, 53, 223, 251, 255, 41, 77, 2, 115]
|
| 13 |
+
if dataset_name in ["gf12ms_whu_gf1", "gf12ms_whu_gf2", "hrc_whu"]:
|
| 14 |
+
return [79, 253, 199, 77, 2, 115]
|
| 15 |
+
raise Exception("dataset_name not supported")
|
| 16 |
+
|
| 17 |
+
# Function to apply the color palette to a mask
|
| 18 |
+
def give_colors_to_mask(mask: np.ndarray, colors=None) -> np.ndarray:
|
| 19 |
+
"""Convert a mask to a colorized version using the specified palette."""
|
| 20 |
+
im = Image.fromarray(mask.astype(np.uint8)).convert("P")
|
| 21 |
+
im.putpalette(colors)
|
| 22 |
+
return im
|
| 23 |
+
|
| 24 |
+
# Function to process a single file
|
| 25 |
+
def process_file(file_path, palette):
|
| 26 |
+
try:
|
| 27 |
+
# Load the mask
|
| 28 |
+
mask = np.array(Image.open(file_path))
|
| 29 |
+
|
| 30 |
+
# Apply the color palette
|
| 31 |
+
colored_mask = give_colors_to_mask(mask, palette)
|
| 32 |
+
|
| 33 |
+
# Save the colored mask, overwriting the original file
|
| 34 |
+
colored_mask.save(file_path)
|
| 35 |
+
return True
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Error processing {file_path}: {e}")
|
| 38 |
+
return False
|
| 39 |
+
|
| 40 |
+
# Main processing function for a dataset
|
| 41 |
+
def process_dataset(dataset_name, base_root, progress_bar):
|
| 42 |
+
ann_dir = os.path.join(base_root, dataset_name, "ann_dir")
|
| 43 |
+
if not os.path.exists(ann_dir):
|
| 44 |
+
print(f"Annotation directory does not exist for {dataset_name}: {ann_dir}")
|
| 45 |
+
return
|
| 46 |
+
|
| 47 |
+
# Get the color palette for this dataset
|
| 48 |
+
palette = get_palette(dataset_name)
|
| 49 |
+
|
| 50 |
+
# Gather all files to process
|
| 51 |
+
files_to_process = []
|
| 52 |
+
for split in ["train", "val", "test"]:
|
| 53 |
+
split_dir = os.path.join(ann_dir, split)
|
| 54 |
+
if not os.path.exists(split_dir):
|
| 55 |
+
print(f"Split directory does not exist for {dataset_name}: {split_dir}")
|
| 56 |
+
continue
|
| 57 |
+
|
| 58 |
+
# Add all png files in the directory to the list
|
| 59 |
+
for file_name in os.listdir(split_dir):
|
| 60 |
+
if file_name.endswith(".png"):
|
| 61 |
+
files_to_process.append(os.path.join(split_dir, file_name))
|
| 62 |
+
|
| 63 |
+
# Multi-threaded processing
|
| 64 |
+
with ThreadPoolExecutor() as executor:
|
| 65 |
+
results = list(tqdm(
|
| 66 |
+
executor.map(lambda f: process_file(f, palette), files_to_process),
|
| 67 |
+
total=len(files_to_process),
|
| 68 |
+
desc=f"Processing {dataset_name}",
|
| 69 |
+
leave=False
|
| 70 |
+
))
|
| 71 |
+
|
| 72 |
+
# Update the progress bar
|
| 73 |
+
progress_bar.update(len(files_to_process))
|
| 74 |
+
|
| 75 |
+
print(f"{dataset_name}: Processed {sum(results)} files out of {len(files_to_process)}.")
|
| 76 |
+
|
| 77 |
+
# Define the root directory and datasets
|
| 78 |
+
base_root = "data" # Replace with your datasets' root directory
|
| 79 |
+
dataset_names = [
|
| 80 |
+
"cloudsen12_high_l1c",
|
| 81 |
+
"cloudsen12_high_l2a",
|
| 82 |
+
"gf12ms_whu_gf1",
|
| 83 |
+
"gf12ms_whu_gf2",
|
| 84 |
+
"hrc_whu",
|
| 85 |
+
"l8_biome"
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
# Main script
|
| 89 |
+
if __name__ == "__main__":
|
| 90 |
+
# Calculate total number of files for all datasets
|
| 91 |
+
total_files = 0
|
| 92 |
+
for dataset_name in dataset_names:
|
| 93 |
+
ann_dir = os.path.join(base_root, dataset_name, "ann_dir")
|
| 94 |
+
for split in ["train", "val", "test"]:
|
| 95 |
+
split_dir = os.path.join(ann_dir, split)
|
| 96 |
+
if os.path.exists(split_dir):
|
| 97 |
+
total_files += len([f for f in os.listdir(split_dir) if f.endswith(".png")])
|
| 98 |
+
|
| 99 |
+
# Create a progress bar
|
| 100 |
+
with tqdm(total=total_files, desc="Overall Progress") as progress_bar:
|
| 101 |
+
for dataset_name in dataset_names:
|
| 102 |
+
process_dataset(dataset_name, base_root, progress_bar)
|
hrc_whu.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:48f2b26d1edda33133a77f7a3d49804750c8119b56ab24faa3c67f3c55e34517
|
| 3 |
+
size 115524658
|
l8_biome.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc94fe050613b9ef3da84df32e8a7d18cc20071982ff52dc5ce248fba66c99ea
|
| 3 |
+
size 2771988653
|
upload_zip_to_hub.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import zipfile
|
| 3 |
+
from huggingface_hub import HfApi, HfFolder
|
| 4 |
+
|
| 5 |
+
# Define the root directory containing all datasets
|
| 6 |
+
base_root = "data" # Replace with the directory containing all datasets
|
| 7 |
+
dataset_repo = "XavierJiezou/Cloud-Adapter" # Hugging Face repository name
|
| 8 |
+
dataset_names = [
|
| 9 |
+
"hrc_whu",
|
| 10 |
+
"gf12ms_whu_gf1",
|
| 11 |
+
"gf12ms_whu_gf2",
|
| 12 |
+
"cloudsen12_high_l1c",
|
| 13 |
+
"cloudsen12_high_l2a",
|
| 14 |
+
"l8_biome",
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
# Function to create a ZIP file for a dataset directory
|
| 18 |
+
def create_zip(dataset_path, output_path):
|
| 19 |
+
"""
|
| 20 |
+
Compress a dataset directory into a ZIP file.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
dataset_path (str): Path to the dataset directory.
|
| 24 |
+
output_path (str): Path to save the ZIP file.
|
| 25 |
+
"""
|
| 26 |
+
with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
| 27 |
+
for root, _, files in os.walk(dataset_path):
|
| 28 |
+
for file in files:
|
| 29 |
+
file_path = os.path.join(root, file)
|
| 30 |
+
arcname = os.path.relpath(file_path, dataset_path)
|
| 31 |
+
zipf.write(file_path, arcname)
|
| 32 |
+
print(f"Compressed {dataset_path} into {output_path}")
|
| 33 |
+
|
| 34 |
+
# Function to upload ZIP files to Hugging Face Hub
|
| 35 |
+
def upload_zip_to_hub(dataset_name, zip_path, repo_name):
|
| 36 |
+
"""
|
| 37 |
+
Upload a ZIP file to a Hugging Face repository.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
dataset_name (str): Name of the dataset (used as a file identifier).
|
| 41 |
+
zip_path (str): Path to the ZIP file.
|
| 42 |
+
repo_name (str): Hugging Face repository name.
|
| 43 |
+
"""
|
| 44 |
+
api = HfApi()
|
| 45 |
+
token = HfFolder.get_token()
|
| 46 |
+
file_name = f"{dataset_name}.zip"
|
| 47 |
+
api.upload_file(
|
| 48 |
+
path_or_fileobj=zip_path,
|
| 49 |
+
path_in_repo=file_name,
|
| 50 |
+
repo_id=repo_name,
|
| 51 |
+
repo_type="dataset",
|
| 52 |
+
token=token,
|
| 53 |
+
)
|
| 54 |
+
print(f"Uploaded {file_name} to {repo_name}")
|
| 55 |
+
|
| 56 |
+
# Main script
|
| 57 |
+
if __name__ == "__main__":
|
| 58 |
+
for dataset_name in dataset_names:
|
| 59 |
+
dataset_path = os.path.join(base_root, dataset_name)
|
| 60 |
+
if not os.path.exists(dataset_path):
|
| 61 |
+
print(f"Dataset directory does not exist: {dataset_path}")
|
| 62 |
+
continue
|
| 63 |
+
|
| 64 |
+
# Create ZIP file
|
| 65 |
+
zip_path = f"{dataset_name}.zip"
|
| 66 |
+
create_zip(dataset_path, zip_path)
|
| 67 |
+
|
| 68 |
+
# Upload ZIP file to Hugging Face Hub
|
| 69 |
+
upload_zip_to_hub(dataset_name, zip_path, dataset_repo)
|