Datasets:
Add files using upload-large-folder tool
Browse files- README.md +22 -3
- publaynet_mini.py +11 -0
README.md
CHANGED
|
@@ -25,7 +25,7 @@ The dataset contains annotations for 5 categories of document layout elements:
|
|
| 25 |
|
| 26 |
Each sample contains:
|
| 27 |
- `id`: Unique document identifier
|
| 28 |
-
- `image`: Document image (PIL Image)
|
| 29 |
- `annotations`: List of layout element annotations with:
|
| 30 |
- `category_id`: Element type (1-5)
|
| 31 |
- `bbox`: Bounding box coordinates [x, y, width, height]
|
|
@@ -35,6 +35,13 @@ Each sample contains:
|
|
| 35 |
- `image_id`: Reference to the document image
|
| 36 |
- `segmentation`: Polygon segmentation mask
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
## Category Distribution
|
| 39 |
|
| 40 |
This subset maintains diverse representation across categories:
|
|
@@ -57,8 +64,9 @@ for sample in dataset['train']:
|
|
| 57 |
print(f"Document ID: {sample['id']}")
|
| 58 |
print(f"Number of layout elements: {len(sample['annotations'])}")
|
| 59 |
|
| 60 |
-
# Access the image
|
| 61 |
-
image = sample['image'] # PIL Image
|
|
|
|
| 62 |
|
| 63 |
# Access annotations
|
| 64 |
for ann in sample['annotations']:
|
|
@@ -75,11 +83,22 @@ You can also load the data directly from the parquet file:
|
|
| 75 |
```python
|
| 76 |
import pyarrow.parquet as pq
|
| 77 |
import pandas as pd
|
|
|
|
|
|
|
| 78 |
|
| 79 |
# Read parquet file
|
| 80 |
table = pq.read_table("publaynet_mini.parquet")
|
| 81 |
df = table.to_pandas()
|
| 82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
# Access data
|
| 84 |
for idx, row in df.iterrows():
|
| 85 |
image = row['image'] # PIL Image
|
|
|
|
| 25 |
|
| 26 |
Each sample contains:
|
| 27 |
- `id`: Unique document identifier
|
| 28 |
+
- `image`: Document image (PIL Image) - automatically loaded from embedded bytes
|
| 29 |
- `annotations`: List of layout element annotations with:
|
| 30 |
- `category_id`: Element type (1-5)
|
| 31 |
- `bbox`: Bounding box coordinates [x, y, width, height]
|
|
|
|
| 35 |
- `image_id`: Reference to the document image
|
| 36 |
- `segmentation`: Polygon segmentation mask
|
| 37 |
|
| 38 |
+
## Data Storage
|
| 39 |
+
|
| 40 |
+
Images are stored as embedded bytes in the parquet file and automatically converted to PIL Images when loaded. This ensures:
|
| 41 |
+
- Self-contained dataset (no external image dependencies)
|
| 42 |
+
- Fast loading and processing
|
| 43 |
+
- Compatibility with HuggingFace datasets library
|
| 44 |
+
|
| 45 |
## Category Distribution
|
| 46 |
|
| 47 |
This subset maintains diverse representation across categories:
|
|
|
|
| 64 |
print(f"Document ID: {sample['id']}")
|
| 65 |
print(f"Number of layout elements: {len(sample['annotations'])}")
|
| 66 |
|
| 67 |
+
# Access the image (automatically converted to PIL Image)
|
| 68 |
+
image = sample['image'] # PIL Image object
|
| 69 |
+
print(f"Image size: {image.size}")
|
| 70 |
|
| 71 |
# Access annotations
|
| 72 |
for ann in sample['annotations']:
|
|
|
|
| 83 |
```python
|
| 84 |
import pyarrow.parquet as pq
|
| 85 |
import pandas as pd
|
| 86 |
+
from PIL import Image as PILImage
|
| 87 |
+
import io
|
| 88 |
|
| 89 |
# Read parquet file
|
| 90 |
table = pq.read_table("publaynet_mini.parquet")
|
| 91 |
df = table.to_pandas()
|
| 92 |
|
| 93 |
+
# Convert images from bytes to PIL Images
|
| 94 |
+
def convert_image(img_data):
|
| 95 |
+
if isinstance(img_data, dict) and 'bytes' in img_data:
|
| 96 |
+
img_bytes = img_data['bytes']
|
| 97 |
+
return PILImage.open(io.BytesIO(img_bytes))
|
| 98 |
+
return img_data
|
| 99 |
+
|
| 100 |
+
df['image'] = df['image'].apply(convert_image)
|
| 101 |
+
|
| 102 |
# Access data
|
| 103 |
for idx, row in df.iterrows():
|
| 104 |
image = row['image'] # PIL Image
|
publaynet_mini.py
CHANGED
|
@@ -5,6 +5,8 @@ from datasets import Dataset, DatasetDict, Features, Image, Value, Sequence
|
|
| 5 |
import pyarrow.parquet as pq
|
| 6 |
import pandas as pd
|
| 7 |
import numpy as np
|
|
|
|
|
|
|
| 8 |
|
| 9 |
def _load_publaynet_mini():
|
| 10 |
"""Load PubLayNet_mini dataset from parquet file."""
|
|
@@ -27,6 +29,15 @@ def _load_publaynet_mini():
|
|
| 27 |
|
| 28 |
df['annotations'] = df['annotations'].apply(convert_annotations)
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
# Define dataset features
|
| 31 |
features = Features({
|
| 32 |
"id": Value("string"),
|
|
|
|
| 5 |
import pyarrow.parquet as pq
|
| 6 |
import pandas as pd
|
| 7 |
import numpy as np
|
| 8 |
+
from PIL import Image as PILImage
|
| 9 |
+
import io
|
| 10 |
|
| 11 |
def _load_publaynet_mini():
|
| 12 |
"""Load PubLayNet_mini dataset from parquet file."""
|
|
|
|
| 29 |
|
| 30 |
df['annotations'] = df['annotations'].apply(convert_annotations)
|
| 31 |
|
| 32 |
+
# Convert image bytes to PIL Images
|
| 33 |
+
def convert_image(img_data):
|
| 34 |
+
if isinstance(img_data, dict) and 'bytes' in img_data:
|
| 35 |
+
img_bytes = img_data['bytes']
|
| 36 |
+
return PILImage.open(io.BytesIO(img_bytes))
|
| 37 |
+
return img_data
|
| 38 |
+
|
| 39 |
+
df['image'] = df['image'].apply(convert_image)
|
| 40 |
+
|
| 41 |
# Define dataset features
|
| 42 |
features = Features({
|
| 43 |
"id": Value("string"),
|