| { | |
| "@context": { | |
| "@language": "en", | |
| "@vocab": "https://schema.org/", | |
| "citeAs": "cr:citeAs", | |
| "column": "cr:column", | |
| "conformsTo": "dct:conformsTo", | |
| "cr": "http://mlcommons.org/croissant/", | |
| "rai": "http://mlcommons.org/croissant/RAI/", | |
| "dct": "http://purl.org/dc/terms/", | |
| "data": { | |
| "@id": "cr:data", | |
| "@type": "@json" | |
| }, | |
| "dataType": { | |
| "@id": "cr:dataType", | |
| "@type": "@vocab" | |
| }, | |
| "examples": { | |
| "@id": "cr:examples", | |
| "@type": "@json" | |
| }, | |
| "extract": "cr:extract", | |
| "field": "cr:field", | |
| "fileProperty": "cr:fileProperty", | |
| "fileObject": "cr:fileObject", | |
| "fileSet": "cr:fileSet", | |
| "format": "cr:format", | |
| "includes": "cr:includes", | |
| "isLiveDataset": "cr:isLiveDataset", | |
| "jsonPath": "cr:jsonPath", | |
| "key": "cr:key", | |
| "md5": "cr:md5", | |
| "parentField": "cr:parentField", | |
| "path": "cr:path", | |
| "recordSet": "cr:recordSet", | |
| "references": "cr:references", | |
| "regex": "cr:regex", | |
| "repeated": "cr:repeated", | |
| "replace": "cr:replace", | |
| "sc": "https://schema.org/", | |
| "separator": "cr:separator", | |
| "source": "cr:source", | |
| "subField": "cr:subField", | |
| "transform": "cr:transform" | |
| }, | |
| "@type": "sc:Dataset", | |
| "conformsTo": "http://mlcommons.org/croissant/1.0", | |
| "name": "Fidel: A Large-Scale Sentence Level Amharic OCR Dataset", | |
| "description": "Fidel is a comprehensive dataset for Amharic Optical Character Recognition (OCR) at the sentence level. It contains a diverse collection of Amharic text images from handwritten, typed, and synthetic sources, organized into train and test splits. This dataset aims to advance language technology for Amharic, serving critical applications such as digital ID initiatives, document digitization, and automated form processing in Ethiopia.", | |
| "url": "https://huggingface.co/datasets/upanzi/fidel-dataset", | |
| "version": "1.0.0", | |
| "license": "https://creativecommons.org/licenses/by/4.0/", | |
| "creator": [ | |
| { | |
| "@type": "sc:Person", | |
| "name": "Tunga Tessema" | |
| }, | |
| { | |
| "@type": "sc:Person", | |
| "name": "Bereket Retta" | |
| }, | |
| { | |
| "@type": "sc:Person", | |
| "name": "Blessed Guda" | |
| }, | |
| { | |
| "@type": "sc:Person", | |
| "name": "Gabrial Zencha" | |
| }, | |
| { | |
| "@type": "sc:Person", | |
| "name": "Carmel Sagbo" | |
| } | |
| ], | |
| "citeAs": "@inproceedings{fidel2025,\n title={Fidel: A Large-Scale Sentence Level Amharic OCR Dataset},\n author={Tessema, Tunga and Retta, Bereket and Guda, Blessed and Zencha, Gabrial and Sagbo, Carmel},\n booktitle={39th Conference on Neural Information Processing Systems (NeurIPS 2025)},\n year={2025}\n}", | |
| "datePublished": "2025-01-01", | |
| "inLanguage": "am", | |
| "distribution": [ | |
| { | |
| "@type": "cr:FileSet", | |
| "@id": "hf-repository", | |
| "name": "hf-repository", | |
| "description": "Fidel Dataset repository on HuggingFace.", | |
| "contentUrl": "https://huggingface.co/datasets/upanzi/fidel-dataset", | |
| "encodingFormat": "git+https", | |
| "includes": "*" | |
| }, | |
| { | |
| "@type": "cr:FileSet", | |
| "@id": "train", | |
| "name": "train", | |
| "description": "Train data in Fidel Dataset", | |
| "containedIn": { | |
| "@id": "hf-repository" | |
| }, | |
| "encodingFormat": "text/csv", | |
| "includes": "train_labels.csv" | |
| }, | |
| { | |
| "@type": "cr:FileSet", | |
| "@id": "test", | |
| "name": "test", | |
| "description": "Test data in Fidel Dataset", | |
| "containedIn": { | |
| "@id": "hf-repository" | |
| }, | |
| "encodingFormat": "text/csv", | |
| "includes": "test_labels.csv" | |
| } | |
| ], | |
| "recordSet": [ | |
| { | |
| "@id": "train_data", | |
| "name": "train_data", | |
| "@type": "cr:RecordSet", | |
| "key": "image_filename", | |
| "field": [ | |
| { | |
| "@id": "train_data/image_filename", | |
| "@type": "cr:Field", | |
| "name": "image_filename", | |
| "dataType": "sc:Text", | |
| "description": "Amharic text images", | |
| "source": { | |
| "fileSet": { "@id": "train" }, | |
| "extract": { | |
| "column": "image_filename" | |
| }, | |
| "transform": { | |
| "jsonPath": "bytes" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "train_data/line_text", | |
| "name": "line_text", | |
| "description": "Amharic text label for the image", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileSet": { | |
| "@id": "train" | |
| }, | |
| "extract": { | |
| "column": "line_text" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "train_data/type", | |
| "name": "type", | |
| "description": "Type of the image (handwritten, typed, synthetic)", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileSet": { | |
| "@id": "train" | |
| }, | |
| "extract": { | |
| "column": "type" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "train_data/writer", | |
| "name": "writer", | |
| "description": "Writer number for the handwritten images", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileSet": { | |
| "@id": "train" | |
| }, | |
| "extract": { | |
| "column": "writer" | |
| } | |
| } | |
| } | |
| ] | |
| }, | |
| { | |
| "@id": "test_data", | |
| "name": "test_data", | |
| "@type": "cr:RecordSet", | |
| "key": "image_filename", | |
| "field": [ | |
| { | |
| "@id": "test_data/image_filename", | |
| "@type": "cr:Field", | |
| "name": "image_filename", | |
| "dataType": "sc:Text", | |
| "description": "Amharic text images", | |
| "source": { | |
| "fileSet": { "@id": "test" }, | |
| "extract": { | |
| "column": "image_filename" | |
| }, | |
| "transform": { | |
| "jsonPath": "bytes" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "test_data/line_text", | |
| "name": "line_text", | |
| "description": "Amharic text label for the image", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileSet": { | |
| "@id": "test" | |
| }, | |
| "extract": { | |
| "column": "line_text" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "test_data/type", | |
| "name": "type", | |
| "description": "Type of the image (handwritten, typed, synthetic)", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileSet": { | |
| "@id": "test" | |
| }, | |
| "extract": { | |
| "column": "type" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "test_data/writer", | |
| "name": "writer", | |
| "description": "Writer number for the handwritten images", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileSet": { | |
| "@id": "test" | |
| }, | |
| "extract": { | |
| "column": "writer" | |
| } | |
| } | |
| } | |
| ] | |
| } | |
| ], | |
| "dataCollection": "The dataset was collected through contributions from native Amharic writers for the handwritten part, compiled from existing digital documents for the typed part, and generated using data augmentation for the synthetic part.", | |
| "dataCollectionTimeframe": "2024-2025", | |
| "dataAnnotationPlatform": "Custom annotation platform for validating transcriptions", | |
| "annotatorDemographics": "Contributors for handwritten samples included diverse age groups (18-65), balanced gender representation, and participants from different regions of Ethiopia.", | |
| "personalSensitiveInformation": "The dataset does not contain any personally identifiable information of the contributors." | |
| } |