Commit ·
a9a230b
0
Parent(s):
Duplicate from ai-forever/Peter
Browse filesCo-authored-by: ai-forever <ai-forever@users.noreply.huggingface.co>
- .gitattributes +52 -0
- Peter.py +63 -0
- README.md +50 -0
- annotations_test.json +3 -0
- annotations_train.json +3 -0
- annotations_val.json +3 -0
- dataset_infos.json +3 -0
- images.zip +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
# Audio files - uncompressed
|
| 34 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
# Audio files - compressed
|
| 38 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
# Image files - uncompressed
|
| 44 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
# Image files - compressed
|
| 49 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.json filter=lfs diff=lfs merge=lfs -text
|
Peter.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import datasets
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Peter(datasets.GeneratorBasedBuilder):
|
| 7 |
+
def _info(self):
|
| 8 |
+
return datasets.DatasetInfo(
|
| 9 |
+
features=datasets.Features(
|
| 10 |
+
{
|
| 11 |
+
"image": datasets.Image(),
|
| 12 |
+
}
|
| 13 |
+
)
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
def _split_generators(self, dl_manager):
|
| 17 |
+
_URLS = {
|
| 18 |
+
"images": "images.zip",
|
| 19 |
+
"train_data": "annotations_train.json",
|
| 20 |
+
"test_data": "annotations_test.json",
|
| 21 |
+
"val_data": "annotations_val.json"
|
| 22 |
+
}
|
| 23 |
+
data_files = dl_manager.download_and_extract(_URLS)
|
| 24 |
+
|
| 25 |
+
return [
|
| 26 |
+
datasets.SplitGenerator(
|
| 27 |
+
name=datasets.Split.TRAIN,
|
| 28 |
+
gen_kwargs={
|
| 29 |
+
"image_paths": dl_manager.iter_files(data_files["images"]),
|
| 30 |
+
"annotation_path": data_files["train_data"],
|
| 31 |
+
},
|
| 32 |
+
),
|
| 33 |
+
datasets.SplitGenerator(
|
| 34 |
+
name=datasets.Split.TEST,
|
| 35 |
+
gen_kwargs={
|
| 36 |
+
"image_paths": dl_manager.iter_files(data_files["images"]),
|
| 37 |
+
"annotation_path": data_files["test_data"],
|
| 38 |
+
},
|
| 39 |
+
),
|
| 40 |
+
datasets.SplitGenerator(
|
| 41 |
+
name=datasets.Split.VALIDATION,
|
| 42 |
+
gen_kwargs={
|
| 43 |
+
"image_paths": dl_manager.iter_files(data_files["images"]),
|
| 44 |
+
"annotation_path": data_files["val_data"],
|
| 45 |
+
},
|
| 46 |
+
)
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
def _generate_examples(self, image_paths, annotation_path):
|
| 50 |
+
"""Generate examples."""
|
| 51 |
+
with open(annotation_path, 'r') as f:
|
| 52 |
+
data = json.load(f)
|
| 53 |
+
|
| 54 |
+
image_names = set()
|
| 55 |
+
for image_data in data['images']:
|
| 56 |
+
image_names.add(image_data['file_name'])
|
| 57 |
+
|
| 58 |
+
for idx, image_path in enumerate(image_paths):
|
| 59 |
+
if os.path.basename(image_path) in image_names:
|
| 60 |
+
example = {
|
| 61 |
+
"image": image_path,
|
| 62 |
+
}
|
| 63 |
+
yield idx, example
|
README.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language:
|
| 3 |
+
- ru
|
| 4 |
+
license:
|
| 5 |
+
- mit
|
| 6 |
+
source_datasets:
|
| 7 |
+
- original
|
| 8 |
+
task_categories:
|
| 9 |
+
- image-segmentation
|
| 10 |
+
- object-detection
|
| 11 |
+
task_ids: []
|
| 12 |
+
tags:
|
| 13 |
+
- optical-character-recognition
|
| 14 |
+
- text-detection
|
| 15 |
+
- ocr
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Digital Peter
|
| 20 |
+
|
| 21 |
+
The Peter dataset can be used for reading texts from the manuscripts written by Peter the Great. The dataset annotation contain end-to-end markup for training detection and OCR models, as well as an end-to-end model for reading text from pages.
|
| 22 |
+
|
| 23 |
+
Paper is available at http://arxiv.org/abs/2103.09354
|
| 24 |
+
|
| 25 |
+
## Description
|
| 26 |
+
|
| 27 |
+
Digital Peter is an educational task with a historical slant created on the basis of several AI technologies (Computer Vision, NLP, and knowledge graphs). The task was prepared jointly with the Saint Petersburg Institute of History (N.P.Lihachov mansion) of Russian Academy of Sciences, Federal Archival Agency of Russia and Russian State Archive of Ancient Acts.
|
| 28 |
+
|
| 29 |
+
A detailed description of the problem (with an immersion in the problem) can be found in [detailed_description_of_the_task_en.pdf](https://github.com/sberbank-ai/digital_peter_aij2020/blob/master/desc/detailed_description_of_the_task_en.pdf)
|
| 30 |
+
|
| 31 |
+
The dataset consists of 662 full page images and 9696 annotated text files. There are 265788 symbols and approximately 50998 words.
|
| 32 |
+
|
| 33 |
+
## Annotation format
|
| 34 |
+
|
| 35 |
+
The annotation is in COCO format. The `annotation.json` should have the following dictionaries:
|
| 36 |
+
|
| 37 |
+
- `annotation["categories"]` - a list of dicts with a categories info (categotiy names and indexes).
|
| 38 |
+
- `annotation["images"]` - a list of dictionaries with a description of images, each dictionary must contain fields:
|
| 39 |
+
- `file_name` - name of the image file.
|
| 40 |
+
- `id` for image id.
|
| 41 |
+
- `annotation["annotations"]` - a list of dictioraties with a murkup information. Each dictionary stores a description for one polygon from the dataset, and must contain the following fields:
|
| 42 |
+
- `image_id` - the index of the image on which the polygon is located.
|
| 43 |
+
- `category_id` - the polygon’s category index.
|
| 44 |
+
- ```attributes``` - dict with some additional annotatioin information. In the `translation` subdict you can find text translation for the line.
|
| 45 |
+
- `segmentation` - the coordinates of the polygon, a list of numbers - which are coordinate pairs x and y.
|
| 46 |
+
|
| 47 |
+
## Competition
|
| 48 |
+
|
| 49 |
+
We held a competition based on Digital Peter dataset.
|
| 50 |
+
Here is github [link](https://github.com/sberbank-ai/digital_peter_aij2020). Here is competition [page](https://ods.ai/tracks/aij2020) (need to register).
|
annotations_test.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d2604bcfb80a006ab82e9902f7b2afbc099e52619efbcaad296384b455c773ee
|
| 3 |
+
size 980971
|
annotations_train.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f8c046828890c99c04f8b1730528a5f67d4e0ab3b91928df5ad45892f28dae2
|
| 3 |
+
size 12430428
|
annotations_val.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e6b0e41a47642207b0fac522d62f095e0d9730f3db50813a98d20175301d2523
|
| 3 |
+
size 1077715
|
dataset_infos.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:87e84c0a19fc7cc74231712b80ac40546d2420879e9d5d7d591dda993a3b4e84
|
| 3 |
+
size 1309
|
images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bd186a0e321669ccd14e1d43df0027d4b4eb4a448f0a9e0cac50de0175d54054
|
| 3 |
+
size 982693094
|