publaynet-mini / publaynet_mini.py
kenza-ily's picture
Add files using upload-large-folder tool
dc55023 verified
import json
import os
from pathlib import Path
from datasets import Dataset, DatasetDict, Features, Image, Value, Sequence
import pyarrow.parquet as pq
import pandas as pd
import numpy as np
from PIL import Image as PILImage
import io
def _load_publaynet_mini():
"""Load PubLayNet_mini dataset from parquet file."""
data_dir = Path(__file__).parent
parquet_file = data_dir / "publaynet_mini.parquet"
# Read the parquet file
table = pq.read_table(parquet_file)
df = table.to_pandas()
# Convert annotations from numpy arrays to lists for JSON serialization
def convert_annotations(annotations):
if isinstance(annotations, np.ndarray):
return [ann.item() if hasattr(ann, 'item') else ann for ann in annotations]
elif isinstance(annotations, list):
return annotations
else:
return []
df['annotations'] = df['annotations'].apply(convert_annotations)
# Convert image bytes to PIL Images
def convert_image(img_data):
if isinstance(img_data, dict) and 'bytes' in img_data:
img_bytes = img_data['bytes']
return PILImage.open(io.BytesIO(img_bytes))
return img_data
df['image'] = df['image'].apply(convert_image)
# Define dataset features
features = Features({
"id": Value("string"),
"image": Image(),
"annotations": Sequence({
"category_id": Value("int64"),
"bbox": Sequence(Value("float32"), length=4),
"area": Value("float32"),
"iscrowd": Value("int64"),
"id": Value("int64"),
"image_id": Value("int64"),
"segmentation": Sequence(Sequence(Value("float32"))),
}),
})
# Create dataset from pandas DataFrame
dataset = Dataset.from_pandas(df, features=features)
return DatasetDict({
"train": dataset
})
def load_dataset(*args, **kwargs):
"""Load PubLayNet_mini dataset."""
return _load_publaynet_mini()